BNX2X: removes all linux headers
[akaros.git] / kern / drivers / net / bnx2x / bnx2x_sp.c
1 /* bnx2x_sp.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2011-2013 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
16  * Written by: Vladislav Zolotarov
17  *
18  */
19
20 #include "akaros_compat.h"
21
22 #include "bnx2x.h"
23 #include "bnx2x_cmn.h"
24 #include "bnx2x_sp.h"
25
26 #define BNX2X_MAX_EMUL_MULTI            16
27
28 /**** Exe Queue interfaces ****/
29
30 /**
31  * bnx2x_exe_queue_init - init the Exe Queue object
32  *
33  * @o:          pointer to the object
34  * @exe_len:    length
35  * @owner:      pointer to the owner
36  * @validate:   validate function pointer
37  * @optimize:   optimize function pointer
38  * @exec:       execute function pointer
39  * @get:        get function pointer
40  */
41 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
42                                         struct bnx2x_exe_queue_obj *o,
43                                         int exe_len,
44                                         union bnx2x_qable_obj *owner,
45                                         exe_q_validate validate,
46                                         exe_q_remove remove,
47                                         exe_q_optimize optimize,
48                                         exe_q_execute exec,
49                                         exe_q_get get)
50 {
51         memset(o, 0, sizeof(*o));
52
53         INIT_LIST_HEAD(&o->exe_queue);
54         INIT_LIST_HEAD(&o->pending_comp);
55
56         spin_lock_init(&o->lock);
57
58         o->exe_chunk_len = exe_len;
59         o->owner         = owner;
60
61         /* Owner specific callbacks */
62         o->validate      = validate;
63         o->remove        = remove;
64         o->optimize      = optimize;
65         o->execute       = exec;
66         o->get           = get;
67
68         DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
69            exe_len);
70 }
71
72 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
73                                              struct bnx2x_exeq_elem *elem)
74 {
75         DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
76         kfree(elem);
77 }
78
79 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
80 {
81         struct bnx2x_exeq_elem *elem;
82         int cnt = 0;
83
84         spin_lock_bh(&o->lock);
85
86         list_for_each_entry(elem, &o->exe_queue, link)
87                 cnt++;
88
89         spin_unlock_bh(&o->lock);
90
91         return cnt;
92 }
93
94 /**
95  * bnx2x_exe_queue_add - add a new element to the execution queue
96  *
97  * @bp:         driver handle
98  * @o:          queue
99  * @cmd:        new command to add
100  * @restore:    true - do not optimize the command
101  *
102  * If the element is optimized or is illegal, frees it.
103  */
104 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
105                                       struct bnx2x_exe_queue_obj *o,
106                                       struct bnx2x_exeq_elem *elem,
107                                       bool restore)
108 {
109         int rc;
110
111         spin_lock_bh(&o->lock);
112
113         if (!restore) {
114                 /* Try to cancel this element queue */
115                 rc = o->optimize(bp, o->owner, elem);
116                 if (rc)
117                         goto free_and_exit;
118
119                 /* Check if this request is ok */
120                 rc = o->validate(bp, o->owner, elem);
121                 if (rc) {
122                         DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
123                         goto free_and_exit;
124                 }
125         }
126
127         /* If so, add it to the execution queue */
128         list_add_tail(&elem->link, &o->exe_queue);
129
130         spin_unlock_bh(&o->lock);
131
132         return 0;
133
134 free_and_exit:
135         bnx2x_exe_queue_free_elem(bp, elem);
136
137         spin_unlock_bh(&o->lock);
138
139         return rc;
140 }
141
142 static inline void __bnx2x_exe_queue_reset_pending(
143         struct bnx2x *bp,
144         struct bnx2x_exe_queue_obj *o)
145 {
146         struct bnx2x_exeq_elem *elem;
147
148         while (!list_empty(&o->pending_comp)) {
149                 elem = list_first_entry(&o->pending_comp,
150                                         struct bnx2x_exeq_elem, link);
151
152                 list_del(&elem->link);
153                 bnx2x_exe_queue_free_elem(bp, elem);
154         }
155 }
156
157 /**
158  * bnx2x_exe_queue_step - execute one execution chunk atomically
159  *
160  * @bp:                 driver handle
161  * @o:                  queue
162  * @ramrod_flags:       flags
163  *
164  * (Should be called while holding the exe_queue->lock).
165  */
166 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
167                                        struct bnx2x_exe_queue_obj *o,
168                                        unsigned long *ramrod_flags)
169 {
170         struct bnx2x_exeq_elem *elem, spacer;
171         int cur_len = 0, rc;
172
173         memset(&spacer, 0, sizeof(spacer));
174
175         /* Next step should not be performed until the current is finished,
176          * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
177          * properly clear object internals without sending any command to the FW
178          * which also implies there won't be any completion to clear the
179          * 'pending' list.
180          */
181         if (!list_empty(&o->pending_comp)) {
182                 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
183                         DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
184                         __bnx2x_exe_queue_reset_pending(bp, o);
185                 } else {
186                         return 1;
187                 }
188         }
189
190         /* Run through the pending commands list and create a next
191          * execution chunk.
192          */
193         while (!list_empty(&o->exe_queue)) {
194                 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
195                                         link);
196                 WARN_ON(!elem->cmd_len);
197
198                 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
199                         cur_len += elem->cmd_len;
200                         /* Prevent from both lists being empty when moving an
201                          * element. This will allow the call of
202                          * bnx2x_exe_queue_empty() without locking.
203                          */
204                         list_add_tail(&spacer.link, &o->pending_comp);
205                         mb();
206                         list_move_tail(&elem->link, &o->pending_comp);
207                         list_del(&spacer.link);
208                 } else
209                         break;
210         }
211
212         /* Sanity check */
213         if (!cur_len)
214                 return 0;
215
216         rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
217         if (rc < 0)
218                 /* In case of an error return the commands back to the queue
219                  * and reset the pending_comp.
220                  */
221                 list_splice_init(&o->pending_comp, &o->exe_queue);
222         else if (!rc)
223                 /* If zero is returned, means there are no outstanding pending
224                  * completions and we may dismiss the pending list.
225                  */
226                 __bnx2x_exe_queue_reset_pending(bp, o);
227
228         return rc;
229 }
230
231 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
232 {
233         bool empty = list_empty(&o->exe_queue);
234
235         /* Don't reorder!!! */
236         mb();
237
238         return empty && list_empty(&o->pending_comp);
239 }
240
241 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
242         struct bnx2x *bp)
243 {
244         DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
245         return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
246 }
247
248 /************************ raw_obj functions ***********************************/
249 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
250 {
251         return !!test_bit(o->state, o->pstate);
252 }
253
254 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
255 {
256         smp_mb__before_atomic();
257         clear_bit(o->state, o->pstate);
258         smp_mb__after_atomic();
259 }
260
261 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
262 {
263         smp_mb__before_atomic();
264         set_bit(o->state, o->pstate);
265         smp_mb__after_atomic();
266 }
267
268 /**
269  * bnx2x_state_wait - wait until the given bit(state) is cleared
270  *
271  * @bp:         device handle
272  * @state:      state which is to be cleared
273  * @state_p:    state buffer
274  *
275  */
276 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
277                                    unsigned long *pstate)
278 {
279         /* can take a while if any port is running */
280         int cnt = 5000;
281
282         if (CHIP_REV_IS_EMUL(bp))
283                 cnt *= 20;
284
285         DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
286
287         might_sleep();
288         while (cnt--) {
289                 if (!test_bit(state, pstate)) {
290 #ifdef BNX2X_STOP_ON_ERROR
291                         DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
292 #endif
293                         return 0;
294                 }
295
296                 usleep_range(1000, 2000);
297
298                 if (bp->panic)
299                         return -EIO;
300         }
301
302         /* timeout! */
303         BNX2X_ERR("timeout waiting for state %d\n", state);
304 #ifdef BNX2X_STOP_ON_ERROR
305         bnx2x_panic();
306 #endif
307
308         return -EBUSY;
309 }
310
311 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
312 {
313         return bnx2x_state_wait(bp, raw->state, raw->pstate);
314 }
315
316 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
317 /* credit handling callbacks */
318 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
319 {
320         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
321
322         WARN_ON(!mp);
323
324         return mp->get_entry(mp, offset);
325 }
326
327 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
328 {
329         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
330
331         WARN_ON(!mp);
332
333         return mp->get(mp, 1);
334 }
335
336 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
337 {
338         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
339
340         WARN_ON(!vp);
341
342         return vp->get_entry(vp, offset);
343 }
344
345 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
346 {
347         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
348
349         WARN_ON(!vp);
350
351         return vp->get(vp, 1);
352 }
353 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
354 {
355         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
356
357         return mp->put_entry(mp, offset);
358 }
359
360 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
361 {
362         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
363
364         return mp->put(mp, 1);
365 }
366
367 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
368 {
369         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
370
371         return vp->put_entry(vp, offset);
372 }
373
374 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
375 {
376         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
377
378         return vp->put(vp, 1);
379 }
380
381 /**
382  * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
383  *
384  * @bp:         device handle
385  * @o:          vlan_mac object
386  *
387  * @details: Non-blocking implementation; should be called under execution
388  *           queue lock.
389  */
390 static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
391                                             struct bnx2x_vlan_mac_obj *o)
392 {
393         if (o->head_reader) {
394                 DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
395                 return -EBUSY;
396         }
397
398         DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
399         return 0;
400 }
401
402 /**
403  * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
404  *
405  * @bp:         device handle
406  * @o:          vlan_mac object
407  *
408  * @details Should be called under execution queue lock; notice it might release
409  *          and reclaim it during its run.
410  */
411 static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
412                                             struct bnx2x_vlan_mac_obj *o)
413 {
414         int rc;
415         unsigned long ramrod_flags = o->saved_ramrod_flags;
416
417         DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
418            ramrod_flags);
419         o->head_exe_request = false;
420         o->saved_ramrod_flags = 0;
421         rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
422         if (rc != 0) {
423                 BNX2X_ERR("execution of pending commands failed with rc %d\n",
424                           rc);
425 #ifdef BNX2X_STOP_ON_ERROR
426                 bnx2x_panic();
427 #endif
428         }
429 }
430
431 /**
432  * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
433  *
434  * @bp:                 device handle
435  * @o:                  vlan_mac object
436  * @ramrod_flags:       ramrod flags of missed execution
437  *
438  * @details Should be called under execution queue lock.
439  */
440 static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
441                                     struct bnx2x_vlan_mac_obj *o,
442                                     unsigned long ramrod_flags)
443 {
444         o->head_exe_request = true;
445         o->saved_ramrod_flags = ramrod_flags;
446         DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
447            ramrod_flags);
448 }
449
450 /**
451  * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
452  *
453  * @bp:                 device handle
454  * @o:                  vlan_mac object
455  *
456  * @details Should be called under execution queue lock. Notice if a pending
457  *          execution exists, it would perform it - possibly releasing and
458  *          reclaiming the execution queue lock.
459  */
460 static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
461                                             struct bnx2x_vlan_mac_obj *o)
462 {
463         /* It's possible a new pending execution was added since this writer
464          * executed. If so, execute again. [Ad infinitum]
465          */
466         while (o->head_exe_request) {
467                 DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
468                 __bnx2x_vlan_mac_h_exec_pending(bp, o);
469         }
470 }
471
472
473 /**
474  * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
475  *
476  * @bp:                 device handle
477  * @o:                  vlan_mac object
478  *
479  * @details Should be called under the execution queue lock. May sleep. May
480  *          release and reclaim execution queue lock during its run.
481  */
482 static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
483                                         struct bnx2x_vlan_mac_obj *o)
484 {
485         /* If we got here, we're holding lock --> no WRITER exists */
486         o->head_reader++;
487         DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
488            o->head_reader);
489
490         return 0;
491 }
492
493 /**
494  * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
495  *
496  * @bp:                 device handle
497  * @o:                  vlan_mac object
498  *
499  * @details May sleep. Claims and releases execution queue lock during its run.
500  */
501 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
502                                struct bnx2x_vlan_mac_obj *o)
503 {
504         int rc;
505
506         spin_lock_bh(&o->exe_queue.lock);
507         rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
508         spin_unlock_bh(&o->exe_queue.lock);
509
510         return rc;
511 }
512
513 /**
514  * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
515  *
516  * @bp:                 device handle
517  * @o:                  vlan_mac object
518  *
519  * @details Should be called under execution queue lock. Notice if a pending
520  *          execution exists, it would be performed if this was the last
521  *          reader. possibly releasing and reclaiming the execution queue lock.
522  */
523 static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
524                                           struct bnx2x_vlan_mac_obj *o)
525 {
526         if (!o->head_reader) {
527                 BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
528 #ifdef BNX2X_STOP_ON_ERROR
529                 bnx2x_panic();
530 #endif
531         } else {
532                 o->head_reader--;
533                 DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
534                    o->head_reader);
535         }
536
537         /* It's possible a new pending execution was added, and that this reader
538          * was last - if so we need to execute the command.
539          */
540         if (!o->head_reader && o->head_exe_request) {
541                 DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
542
543                 /* Writer release will do the trick */
544                 __bnx2x_vlan_mac_h_write_unlock(bp, o);
545         }
546 }
547
548 /**
549  * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
550  *
551  * @bp:                 device handle
552  * @o:                  vlan_mac object
553  *
554  * @details Notice if a pending execution exists, it would be performed if this
555  *          was the last reader. Claims and releases the execution queue lock
556  *          during its run.
557  */
558 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
559                                   struct bnx2x_vlan_mac_obj *o)
560 {
561         spin_lock_bh(&o->exe_queue.lock);
562         __bnx2x_vlan_mac_h_read_unlock(bp, o);
563         spin_unlock_bh(&o->exe_queue.lock);
564 }
565
566 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
567                                 int n, u8 *base, u8 stride, u8 size)
568 {
569         struct bnx2x_vlan_mac_registry_elem *pos;
570         u8 *next = base;
571         int counter = 0;
572         int read_lock;
573
574         DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
575         read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
576         if (read_lock != 0)
577                 BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
578
579         /* traverse list */
580         list_for_each_entry(pos, &o->head, link) {
581                 if (counter < n) {
582                         memcpy(next, &pos->u, size);
583                         counter++;
584                         DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
585                            counter, next);
586                         next += stride + size;
587                 }
588         }
589
590         if (read_lock == 0) {
591                 DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
592                 bnx2x_vlan_mac_h_read_unlock(bp, o);
593         }
594
595         return counter * ETH_ALEN;
596 }
597
598 /* check_add() callbacks */
599 static int bnx2x_check_mac_add(struct bnx2x *bp,
600                                struct bnx2x_vlan_mac_obj *o,
601                                union bnx2x_classification_ramrod_data *data)
602 {
603         struct bnx2x_vlan_mac_registry_elem *pos;
604
605         DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
606
607         if (!is_valid_ether_addr(data->mac.mac))
608                 return -EINVAL;
609
610         /* Check if a requested MAC already exists */
611         list_for_each_entry(pos, &o->head, link)
612                 if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
613                     (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
614                         return -EEXIST;
615
616         return 0;
617 }
618
619 static int bnx2x_check_vlan_add(struct bnx2x *bp,
620                                 struct bnx2x_vlan_mac_obj *o,
621                                 union bnx2x_classification_ramrod_data *data)
622 {
623         struct bnx2x_vlan_mac_registry_elem *pos;
624
625         DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
626
627         list_for_each_entry(pos, &o->head, link)
628                 if (data->vlan.vlan == pos->u.vlan.vlan)
629                         return -EEXIST;
630
631         return 0;
632 }
633
634 /* check_del() callbacks */
635 static struct bnx2x_vlan_mac_registry_elem *
636         bnx2x_check_mac_del(struct bnx2x *bp,
637                             struct bnx2x_vlan_mac_obj *o,
638                             union bnx2x_classification_ramrod_data *data)
639 {
640         struct bnx2x_vlan_mac_registry_elem *pos;
641
642         DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
643
644         list_for_each_entry(pos, &o->head, link)
645                 if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&
646                     (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
647                         return pos;
648
649         return NULL;
650 }
651
652 static struct bnx2x_vlan_mac_registry_elem *
653         bnx2x_check_vlan_del(struct bnx2x *bp,
654                              struct bnx2x_vlan_mac_obj *o,
655                              union bnx2x_classification_ramrod_data *data)
656 {
657         struct bnx2x_vlan_mac_registry_elem *pos;
658
659         DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
660
661         list_for_each_entry(pos, &o->head, link)
662                 if (data->vlan.vlan == pos->u.vlan.vlan)
663                         return pos;
664
665         return NULL;
666 }
667
668 /* check_move() callback */
669 static bool bnx2x_check_move(struct bnx2x *bp,
670                              struct bnx2x_vlan_mac_obj *src_o,
671                              struct bnx2x_vlan_mac_obj *dst_o,
672                              union bnx2x_classification_ramrod_data *data)
673 {
674         struct bnx2x_vlan_mac_registry_elem *pos;
675         int rc;
676
677         /* Check if we can delete the requested configuration from the first
678          * object.
679          */
680         pos = src_o->check_del(bp, src_o, data);
681
682         /*  check if configuration can be added */
683         rc = dst_o->check_add(bp, dst_o, data);
684
685         /* If this classification can not be added (is already set)
686          * or can't be deleted - return an error.
687          */
688         if (rc || !pos)
689                 return false;
690
691         return true;
692 }
693
694 static bool bnx2x_check_move_always_err(
695         struct bnx2x *bp,
696         struct bnx2x_vlan_mac_obj *src_o,
697         struct bnx2x_vlan_mac_obj *dst_o,
698         union bnx2x_classification_ramrod_data *data)
699 {
700         return false;
701 }
702
703 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
704 {
705         struct bnx2x_raw_obj *raw = &o->raw;
706         u8 rx_tx_flag = 0;
707
708         if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
709             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
710                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
711
712         if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
713             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
714                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
715
716         return rx_tx_flag;
717 }
718
719 static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
720                                  bool add, unsigned char *dev_addr, int index)
721 {
722         u32 wb_data[2];
723         u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
724                          NIG_REG_LLH0_FUNC_MEM;
725
726         if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
727                 return;
728
729         if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
730                 return;
731
732         DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
733                          (add ? "ADD" : "DELETE"), index);
734
735         if (add) {
736                 /* LLH_FUNC_MEM is a u64 WB register */
737                 reg_offset += 8*index;
738
739                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
740                               (dev_addr[4] <<  8) |  dev_addr[5]);
741                 wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
742
743                 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
744         }
745
746         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
747                                   NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
748 }
749
750 /**
751  * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
752  *
753  * @bp:         device handle
754  * @o:          queue for which we want to configure this rule
755  * @add:        if true the command is an ADD command, DEL otherwise
756  * @opcode:     CLASSIFY_RULE_OPCODE_XXX
757  * @hdr:        pointer to a header to setup
758  *
759  */
760 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
761         struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
762         struct eth_classify_cmd_header *hdr)
763 {
764         struct bnx2x_raw_obj *raw = &o->raw;
765
766         hdr->client_id = raw->cl_id;
767         hdr->func_id = raw->func_id;
768
769         /* Rx or/and Tx (internal switching) configuration ? */
770         hdr->cmd_general_data |=
771                 bnx2x_vlan_mac_get_rx_tx_flag(o);
772
773         if (add)
774                 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
775
776         hdr->cmd_general_data |=
777                 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
778 }
779
780 /**
781  * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
782  *
783  * @cid:        connection id
784  * @type:       BNX2X_FILTER_XXX_PENDING
785  * @hdr:        pointer to header to setup
786  * @rule_cnt:
787  *
788  * currently we always configure one rule and echo field to contain a CID and an
789  * opcode type.
790  */
791 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
792                                 struct eth_classify_header *hdr, int rule_cnt)
793 {
794         hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
795                                 (type << BNX2X_SWCID_SHIFT));
796         hdr->rule_cnt = (u8)rule_cnt;
797 }
798
799 /* hw_config() callbacks */
800 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
801                                  struct bnx2x_vlan_mac_obj *o,
802                                  struct bnx2x_exeq_elem *elem, int rule_idx,
803                                  int cam_offset)
804 {
805         struct bnx2x_raw_obj *raw = &o->raw;
806         struct eth_classify_rules_ramrod_data *data =
807                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
808         int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
809         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
810         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
811         unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
812         u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
813
814         /* Set LLH CAM entry: currently only iSCSI and ETH macs are
815          * relevant. In addition, current implementation is tuned for a
816          * single ETH MAC.
817          *
818          * When multiple unicast ETH MACs PF configuration in switch
819          * independent mode is required (NetQ, multiple netdev MACs,
820          * etc.), consider better utilisation of 8 per function MAC
821          * entries in the LLH register. There is also
822          * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
823          * total number of CAM entries to 16.
824          *
825          * Currently we won't configure NIG for MACs other than a primary ETH
826          * MAC and iSCSI L2 MAC.
827          *
828          * If this MAC is moving from one Queue to another, no need to change
829          * NIG configuration.
830          */
831         if (cmd != BNX2X_VLAN_MAC_MOVE) {
832                 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
833                         bnx2x_set_mac_in_nig(bp, add, mac,
834                                              BNX2X_LLH_CAM_ISCSI_ETH_LINE);
835                 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
836                         bnx2x_set_mac_in_nig(bp, add, mac,
837                                              BNX2X_LLH_CAM_ETH_LINE);
838         }
839
840         /* Reset the ramrod data buffer for the first rule */
841         if (rule_idx == 0)
842                 memset(data, 0, sizeof(*data));
843
844         /* Setup a command header */
845         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
846                                       &rule_entry->mac.header);
847
848         DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
849            (add ? "add" : "delete"), mac, raw->cl_id);
850
851         /* Set a MAC itself */
852         bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
853                               &rule_entry->mac.mac_mid,
854                               &rule_entry->mac.mac_lsb, mac);
855         rule_entry->mac.inner_mac =
856                 cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
857
858         /* MOVE: Add a rule that will add this MAC to the target Queue */
859         if (cmd == BNX2X_VLAN_MAC_MOVE) {
860                 rule_entry++;
861                 rule_cnt++;
862
863                 /* Setup ramrod data */
864                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
865                                         elem->cmd_data.vlan_mac.target_obj,
866                                               true, CLASSIFY_RULE_OPCODE_MAC,
867                                               &rule_entry->mac.header);
868
869                 /* Set a MAC itself */
870                 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
871                                       &rule_entry->mac.mac_mid,
872                                       &rule_entry->mac.mac_lsb, mac);
873                 rule_entry->mac.inner_mac =
874                         cpu_to_le16(elem->cmd_data.vlan_mac.
875                                                 u.mac.is_inner_mac);
876         }
877
878         /* Set the ramrod data header */
879         /* TODO: take this to the higher level in order to prevent multiple
880                  writing */
881         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
882                                         rule_cnt);
883 }
884
885 /**
886  * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
887  *
888  * @bp:         device handle
889  * @o:          queue
890  * @type:
891  * @cam_offset: offset in cam memory
892  * @hdr:        pointer to a header to setup
893  *
894  * E1/E1H
895  */
896 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
897         struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
898         struct mac_configuration_hdr *hdr)
899 {
900         struct bnx2x_raw_obj *r = &o->raw;
901
902         hdr->length = 1;
903         hdr->offset = (u8)cam_offset;
904         hdr->client_id = cpu_to_le16(0xff);
905         hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
906                                 (type << BNX2X_SWCID_SHIFT));
907 }
908
909 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
910         struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
911         u16 vlan_id, struct mac_configuration_entry *cfg_entry)
912 {
913         struct bnx2x_raw_obj *r = &o->raw;
914         u32 cl_bit_vec = (1 << r->cl_id);
915
916         cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
917         cfg_entry->pf_id = r->func_id;
918         cfg_entry->vlan_id = cpu_to_le16(vlan_id);
919
920         if (add) {
921                 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
922                          T_ETH_MAC_COMMAND_SET);
923                 SET_FLAG(cfg_entry->flags,
924                          MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
925
926                 /* Set a MAC in a ramrod data */
927                 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
928                                       &cfg_entry->middle_mac_addr,
929                                       &cfg_entry->lsb_mac_addr, mac);
930         } else
931                 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
932                          T_ETH_MAC_COMMAND_INVALIDATE);
933 }
934
935 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
936         struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
937         u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
938 {
939         struct mac_configuration_entry *cfg_entry = &config->config_table[0];
940         struct bnx2x_raw_obj *raw = &o->raw;
941
942         bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
943                                          &config->hdr);
944         bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
945                                          cfg_entry);
946
947         DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
948                          (add ? "setting" : "clearing"),
949                          mac, raw->cl_id, cam_offset);
950 }
951
952 /**
953  * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
954  *
955  * @bp:         device handle
956  * @o:          bnx2x_vlan_mac_obj
957  * @elem:       bnx2x_exeq_elem
958  * @rule_idx:   rule_idx
959  * @cam_offset: cam_offset
960  */
961 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
962                                   struct bnx2x_vlan_mac_obj *o,
963                                   struct bnx2x_exeq_elem *elem, int rule_idx,
964                                   int cam_offset)
965 {
966         struct bnx2x_raw_obj *raw = &o->raw;
967         struct mac_configuration_cmd *config =
968                 (struct mac_configuration_cmd *)(raw->rdata);
969         /* 57710 and 57711 do not support MOVE command,
970          * so it's either ADD or DEL
971          */
972         bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
973                 true : false;
974
975         /* Reset the ramrod data buffer */
976         memset(config, 0, sizeof(*config));
977
978         bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
979                                      cam_offset, add,
980                                      elem->cmd_data.vlan_mac.u.mac.mac, 0,
981                                      ETH_VLAN_FILTER_ANY_VLAN, config);
982 }
983
984 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
985                                   struct bnx2x_vlan_mac_obj *o,
986                                   struct bnx2x_exeq_elem *elem, int rule_idx,
987                                   int cam_offset)
988 {
989         struct bnx2x_raw_obj *raw = &o->raw;
990         struct eth_classify_rules_ramrod_data *data =
991                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
992         int rule_cnt = rule_idx + 1;
993         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
994         enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
995         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
996         u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
997
998         /* Reset the ramrod data buffer for the first rule */
999         if (rule_idx == 0)
1000                 memset(data, 0, sizeof(*data));
1001
1002         /* Set a rule header */
1003         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1004                                       &rule_entry->vlan.header);
1005
1006         DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1007                          vlan);
1008
1009         /* Set a VLAN itself */
1010         rule_entry->vlan.vlan = cpu_to_le16(vlan);
1011
1012         /* MOVE: Add a rule that will add this MAC to the target Queue */
1013         if (cmd == BNX2X_VLAN_MAC_MOVE) {
1014                 rule_entry++;
1015                 rule_cnt++;
1016
1017                 /* Setup ramrod data */
1018                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
1019                                         elem->cmd_data.vlan_mac.target_obj,
1020                                               true, CLASSIFY_RULE_OPCODE_VLAN,
1021                                               &rule_entry->vlan.header);
1022
1023                 /* Set a VLAN itself */
1024                 rule_entry->vlan.vlan = cpu_to_le16(vlan);
1025         }
1026
1027         /* Set the ramrod data header */
1028         /* TODO: take this to the higher level in order to prevent multiple
1029                  writing */
1030         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1031                                         rule_cnt);
1032 }
1033
1034 /**
1035  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1036  *
1037  * @bp:         device handle
1038  * @p:          command parameters
1039  * @ppos:       pointer to the cookie
1040  *
1041  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1042  * previously configured elements list.
1043  *
1044  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1045  * into an account
1046  *
1047  * pointer to the cookie  - that should be given back in the next call to make
1048  * function handle the next element. If *ppos is set to NULL it will restart the
1049  * iterator. If returned *ppos == NULL this means that the last element has been
1050  * handled.
1051  *
1052  */
1053 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1054                            struct bnx2x_vlan_mac_ramrod_params *p,
1055                            struct bnx2x_vlan_mac_registry_elem **ppos)
1056 {
1057         struct bnx2x_vlan_mac_registry_elem *pos;
1058         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1059
1060         /* If list is empty - there is nothing to do here */
1061         if (list_empty(&o->head)) {
1062                 *ppos = NULL;
1063                 return 0;
1064         }
1065
1066         /* make a step... */
1067         if (*ppos == NULL)
1068                 *ppos = list_first_entry(&o->head,
1069                                          struct bnx2x_vlan_mac_registry_elem,
1070                                          link);
1071         else
1072                 *ppos = list_next_entry(*ppos, link);
1073
1074         pos = *ppos;
1075
1076         /* If it's the last step - return NULL */
1077         if (list_is_last(&pos->link, &o->head))
1078                 *ppos = NULL;
1079
1080         /* Prepare a 'user_req' */
1081         memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1082
1083         /* Set the command */
1084         p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1085
1086         /* Set vlan_mac_flags */
1087         p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1088
1089         /* Set a restore bit */
1090         __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1091
1092         return bnx2x_config_vlan_mac(bp, p);
1093 }
1094
1095 /* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1096  * pointer to an element with a specific criteria and NULL if such an element
1097  * hasn't been found.
1098  */
1099 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1100         struct bnx2x_exe_queue_obj *o,
1101         struct bnx2x_exeq_elem *elem)
1102 {
1103         struct bnx2x_exeq_elem *pos;
1104         struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1105
1106         /* Check pending for execution commands */
1107         list_for_each_entry(pos, &o->exe_queue, link)
1108                 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1109                               sizeof(*data)) &&
1110                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1111                         return pos;
1112
1113         return NULL;
1114 }
1115
1116 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1117         struct bnx2x_exe_queue_obj *o,
1118         struct bnx2x_exeq_elem *elem)
1119 {
1120         struct bnx2x_exeq_elem *pos;
1121         struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1122
1123         /* Check pending for execution commands */
1124         list_for_each_entry(pos, &o->exe_queue, link)
1125                 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1126                               sizeof(*data)) &&
1127                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1128                         return pos;
1129
1130         return NULL;
1131 }
1132
1133 /**
1134  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1135  *
1136  * @bp:         device handle
1137  * @qo:         bnx2x_qable_obj
1138  * @elem:       bnx2x_exeq_elem
1139  *
1140  * Checks that the requested configuration can be added. If yes and if
1141  * requested, consume CAM credit.
1142  *
1143  * The 'validate' is run after the 'optimize'.
1144  *
1145  */
1146 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1147                                               union bnx2x_qable_obj *qo,
1148                                               struct bnx2x_exeq_elem *elem)
1149 {
1150         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1151         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1152         int rc;
1153
1154         /* Check the registry */
1155         rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1156         if (rc) {
1157                 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1158                 return rc;
1159         }
1160
1161         /* Check if there is a pending ADD command for this
1162          * MAC/VLAN/VLAN-MAC. Return an error if there is.
1163          */
1164         if (exeq->get(exeq, elem)) {
1165                 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1166                 return -EEXIST;
1167         }
1168
1169         /* TODO: Check the pending MOVE from other objects where this
1170          * object is a destination object.
1171          */
1172
1173         /* Consume the credit if not requested not to */
1174         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1175                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1176             o->get_credit(o)))
1177                 return -EINVAL;
1178
1179         return 0;
1180 }
1181
1182 /**
1183  * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1184  *
1185  * @bp:         device handle
1186  * @qo:         quable object to check
1187  * @elem:       element that needs to be deleted
1188  *
1189  * Checks that the requested configuration can be deleted. If yes and if
1190  * requested, returns a CAM credit.
1191  *
1192  * The 'validate' is run after the 'optimize'.
1193  */
1194 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1195                                               union bnx2x_qable_obj *qo,
1196                                               struct bnx2x_exeq_elem *elem)
1197 {
1198         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1199         struct bnx2x_vlan_mac_registry_elem *pos;
1200         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1201         struct bnx2x_exeq_elem query_elem;
1202
1203         /* If this classification can not be deleted (doesn't exist)
1204          * - return a BNX2X_EXIST.
1205          */
1206         pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1207         if (!pos) {
1208                 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1209                 return -EEXIST;
1210         }
1211
1212         /* Check if there are pending DEL or MOVE commands for this
1213          * MAC/VLAN/VLAN-MAC. Return an error if so.
1214          */
1215         memcpy(&query_elem, elem, sizeof(query_elem));
1216
1217         /* Check for MOVE commands */
1218         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1219         if (exeq->get(exeq, &query_elem)) {
1220                 BNX2X_ERR("There is a pending MOVE command already\n");
1221                 return -EINVAL;
1222         }
1223
1224         /* Check for DEL commands */
1225         if (exeq->get(exeq, elem)) {
1226                 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1227                 return -EEXIST;
1228         }
1229
1230         /* Return the credit to the credit pool if not requested not to */
1231         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1232                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1233             o->put_credit(o))) {
1234                 BNX2X_ERR("Failed to return a credit\n");
1235                 return -EINVAL;
1236         }
1237
1238         return 0;
1239 }
1240
1241 /**
1242  * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1243  *
1244  * @bp:         device handle
1245  * @qo:         quable object to check (source)
1246  * @elem:       element that needs to be moved
1247  *
1248  * Checks that the requested configuration can be moved. If yes and if
1249  * requested, returns a CAM credit.
1250  *
1251  * The 'validate' is run after the 'optimize'.
1252  */
1253 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1254                                                union bnx2x_qable_obj *qo,
1255                                                struct bnx2x_exeq_elem *elem)
1256 {
1257         struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1258         struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1259         struct bnx2x_exeq_elem query_elem;
1260         struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1261         struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1262
1263         /* Check if we can perform this operation based on the current registry
1264          * state.
1265          */
1266         if (!src_o->check_move(bp, src_o, dest_o,
1267                                &elem->cmd_data.vlan_mac.u)) {
1268                 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1269                 return -EINVAL;
1270         }
1271
1272         /* Check if there is an already pending DEL or MOVE command for the
1273          * source object or ADD command for a destination object. Return an
1274          * error if so.
1275          */
1276         memcpy(&query_elem, elem, sizeof(query_elem));
1277
1278         /* Check DEL on source */
1279         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1280         if (src_exeq->get(src_exeq, &query_elem)) {
1281                 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1282                 return -EINVAL;
1283         }
1284
1285         /* Check MOVE on source */
1286         if (src_exeq->get(src_exeq, elem)) {
1287                 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1288                 return -EEXIST;
1289         }
1290
1291         /* Check ADD on destination */
1292         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1293         if (dest_exeq->get(dest_exeq, &query_elem)) {
1294                 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1295                 return -EINVAL;
1296         }
1297
1298         /* Consume the credit if not requested not to */
1299         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1300                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1301             dest_o->get_credit(dest_o)))
1302                 return -EINVAL;
1303
1304         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1305                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1306             src_o->put_credit(src_o))) {
1307                 /* return the credit taken from dest... */
1308                 dest_o->put_credit(dest_o);
1309                 return -EINVAL;
1310         }
1311
1312         return 0;
1313 }
1314
1315 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1316                                    union bnx2x_qable_obj *qo,
1317                                    struct bnx2x_exeq_elem *elem)
1318 {
1319         switch (elem->cmd_data.vlan_mac.cmd) {
1320         case BNX2X_VLAN_MAC_ADD:
1321                 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1322         case BNX2X_VLAN_MAC_DEL:
1323                 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1324         case BNX2X_VLAN_MAC_MOVE:
1325                 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1326         default:
1327                 return -EINVAL;
1328         }
1329 }
1330
1331 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1332                                   union bnx2x_qable_obj *qo,
1333                                   struct bnx2x_exeq_elem *elem)
1334 {
1335         int rc = 0;
1336
1337         /* If consumption wasn't required, nothing to do */
1338         if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1339                      &elem->cmd_data.vlan_mac.vlan_mac_flags))
1340                 return 0;
1341
1342         switch (elem->cmd_data.vlan_mac.cmd) {
1343         case BNX2X_VLAN_MAC_ADD:
1344         case BNX2X_VLAN_MAC_MOVE:
1345                 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1346                 break;
1347         case BNX2X_VLAN_MAC_DEL:
1348                 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1349                 break;
1350         default:
1351                 return -EINVAL;
1352         }
1353
1354         if (rc != true)
1355                 return -EINVAL;
1356
1357         return 0;
1358 }
1359
1360 /**
1361  * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1362  *
1363  * @bp:         device handle
1364  * @o:          bnx2x_vlan_mac_obj
1365  *
1366  */
1367 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1368                                struct bnx2x_vlan_mac_obj *o)
1369 {
1370         int cnt = 5000, rc;
1371         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1372         struct bnx2x_raw_obj *raw = &o->raw;
1373
1374         while (cnt--) {
1375                 /* Wait for the current command to complete */
1376                 rc = raw->wait_comp(bp, raw);
1377                 if (rc)
1378                         return rc;
1379
1380                 /* Wait until there are no pending commands */
1381                 if (!bnx2x_exe_queue_empty(exeq))
1382                         usleep_range(1000, 2000);
1383                 else
1384                         return 0;
1385         }
1386
1387         return -EBUSY;
1388 }
1389
1390 static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
1391                                          struct bnx2x_vlan_mac_obj *o,
1392                                          unsigned long *ramrod_flags)
1393 {
1394         int rc = 0;
1395
1396         spin_lock_bh(&o->exe_queue.lock);
1397
1398         DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
1399         rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
1400
1401         if (rc != 0) {
1402                 __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
1403
1404                 /* Calling function should not diffrentiate between this case
1405                  * and the case in which there is already a pending ramrod
1406                  */
1407                 rc = 1;
1408         } else {
1409                 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1410         }
1411         spin_unlock_bh(&o->exe_queue.lock);
1412
1413         return rc;
1414 }
1415
1416 /**
1417  * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1418  *
1419  * @bp:         device handle
1420  * @o:          bnx2x_vlan_mac_obj
1421  * @cqe:
1422  * @cont:       if true schedule next execution chunk
1423  *
1424  */
1425 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1426                                    struct bnx2x_vlan_mac_obj *o,
1427                                    union event_ring_elem *cqe,
1428                                    unsigned long *ramrod_flags)
1429 {
1430         struct bnx2x_raw_obj *r = &o->raw;
1431         int rc;
1432
1433         /* Clearing the pending list & raw state should be made
1434          * atomically (as execution flow assumes they represent the same).
1435          */
1436         spin_lock_bh(&o->exe_queue.lock);
1437
1438         /* Reset pending list */
1439         __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1440
1441         /* Clear pending */
1442         r->clear_pending(r);
1443
1444         spin_unlock_bh(&o->exe_queue.lock);
1445
1446         /* If ramrod failed this is most likely a SW bug */
1447         if (cqe->message.error)
1448                 return -EINVAL;
1449
1450         /* Run the next bulk of pending commands if requested */
1451         if (test_bit(RAMROD_CONT, ramrod_flags)) {
1452                 rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
1453
1454                 if (rc < 0)
1455                         return rc;
1456         }
1457
1458         /* If there is more work to do return PENDING */
1459         if (!bnx2x_exe_queue_empty(&o->exe_queue))
1460                 return 1;
1461
1462         return 0;
1463 }
1464
1465 /**
1466  * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1467  *
1468  * @bp:         device handle
1469  * @o:          bnx2x_qable_obj
1470  * @elem:       bnx2x_exeq_elem
1471  */
1472 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1473                                    union bnx2x_qable_obj *qo,
1474                                    struct bnx2x_exeq_elem *elem)
1475 {
1476         struct bnx2x_exeq_elem query, *pos;
1477         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1478         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1479
1480         memcpy(&query, elem, sizeof(query));
1481
1482         switch (elem->cmd_data.vlan_mac.cmd) {
1483         case BNX2X_VLAN_MAC_ADD:
1484                 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1485                 break;
1486         case BNX2X_VLAN_MAC_DEL:
1487                 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1488                 break;
1489         default:
1490                 /* Don't handle anything other than ADD or DEL */
1491                 return 0;
1492         }
1493
1494         /* If we found the appropriate element - delete it */
1495         pos = exeq->get(exeq, &query);
1496         if (pos) {
1497
1498                 /* Return the credit of the optimized command */
1499                 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1500                               &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1501                         if ((query.cmd_data.vlan_mac.cmd ==
1502                              BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1503                                 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1504                                 return -EINVAL;
1505                         } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1506                                 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1507                                 return -EINVAL;
1508                         }
1509                 }
1510
1511                 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1512                            (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1513                            "ADD" : "DEL");
1514
1515                 list_del(&pos->link);
1516                 bnx2x_exe_queue_free_elem(bp, pos);
1517                 return 1;
1518         }
1519
1520         return 0;
1521 }
1522
1523 /**
1524  * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1525  *
1526  * @bp:   device handle
1527  * @o:
1528  * @elem:
1529  * @restore:
1530  * @re:
1531  *
1532  * prepare a registry element according to the current command request.
1533  */
1534 static inline int bnx2x_vlan_mac_get_registry_elem(
1535         struct bnx2x *bp,
1536         struct bnx2x_vlan_mac_obj *o,
1537         struct bnx2x_exeq_elem *elem,
1538         bool restore,
1539         struct bnx2x_vlan_mac_registry_elem **re)
1540 {
1541         enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1542         struct bnx2x_vlan_mac_registry_elem *reg_elem;
1543
1544         /* Allocate a new registry element if needed. */
1545         if (!restore &&
1546             ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1547                 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1548                 if (!reg_elem)
1549                         return -ENOMEM;
1550
1551                 /* Get a new CAM offset */
1552                 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1553                         /* This shall never happen, because we have checked the
1554                          * CAM availability in the 'validate'.
1555                          */
1556                         WARN_ON(1);
1557                         kfree(reg_elem);
1558                         return -EINVAL;
1559                 }
1560
1561                 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1562
1563                 /* Set a VLAN-MAC data */
1564                 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1565                           sizeof(reg_elem->u));
1566
1567                 /* Copy the flags (needed for DEL and RESTORE flows) */
1568                 reg_elem->vlan_mac_flags =
1569                         elem->cmd_data.vlan_mac.vlan_mac_flags;
1570         } else /* DEL, RESTORE */
1571                 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1572
1573         *re = reg_elem;
1574         return 0;
1575 }
1576
1577 /**
1578  * bnx2x_execute_vlan_mac - execute vlan mac command
1579  *
1580  * @bp:                 device handle
1581  * @qo:
1582  * @exe_chunk:
1583  * @ramrod_flags:
1584  *
1585  * go and send a ramrod!
1586  */
1587 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1588                                   union bnx2x_qable_obj *qo,
1589                                   struct list_head *exe_chunk,
1590                                   unsigned long *ramrod_flags)
1591 {
1592         struct bnx2x_exeq_elem *elem;
1593         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1594         struct bnx2x_raw_obj *r = &o->raw;
1595         int rc, idx = 0;
1596         bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1597         bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1598         struct bnx2x_vlan_mac_registry_elem *reg_elem;
1599         enum bnx2x_vlan_mac_cmd cmd;
1600
1601         /* If DRIVER_ONLY execution is requested, cleanup a registry
1602          * and exit. Otherwise send a ramrod to FW.
1603          */
1604         if (!drv_only) {
1605                 WARN_ON(r->check_pending(r));
1606
1607                 /* Set pending */
1608                 r->set_pending(r);
1609
1610                 /* Fill the ramrod data */
1611                 list_for_each_entry(elem, exe_chunk, link) {
1612                         cmd = elem->cmd_data.vlan_mac.cmd;
1613                         /* We will add to the target object in MOVE command, so
1614                          * change the object for a CAM search.
1615                          */
1616                         if (cmd == BNX2X_VLAN_MAC_MOVE)
1617                                 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1618                         else
1619                                 cam_obj = o;
1620
1621                         rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1622                                                               elem, restore,
1623                                                               &reg_elem);
1624                         if (rc)
1625                                 goto error_exit;
1626
1627                         WARN_ON(!reg_elem);
1628
1629                         /* Push a new entry into the registry */
1630                         if (!restore &&
1631                             ((cmd == BNX2X_VLAN_MAC_ADD) ||
1632                             (cmd == BNX2X_VLAN_MAC_MOVE)))
1633                                 list_add(&reg_elem->link, &cam_obj->head);
1634
1635                         /* Configure a single command in a ramrod data buffer */
1636                         o->set_one_rule(bp, o, elem, idx,
1637                                         reg_elem->cam_offset);
1638
1639                         /* MOVE command consumes 2 entries in the ramrod data */
1640                         if (cmd == BNX2X_VLAN_MAC_MOVE)
1641                                 idx += 2;
1642                         else
1643                                 idx++;
1644                 }
1645
1646                 /* No need for an explicit memory barrier here as long we would
1647                  * need to ensure the ordering of writing to the SPQ element
1648                  * and updating of the SPQ producer which involves a memory
1649                  * read and we will have to put a full memory barrier there
1650                  * (inside bnx2x_sp_post()).
1651                  */
1652
1653                 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1654                                    U64_HI(r->rdata_mapping),
1655                                    U64_LO(r->rdata_mapping),
1656                                    ETH_CONNECTION_TYPE);
1657                 if (rc)
1658                         goto error_exit;
1659         }
1660
1661         /* Now, when we are done with the ramrod - clean up the registry */
1662         list_for_each_entry(elem, exe_chunk, link) {
1663                 cmd = elem->cmd_data.vlan_mac.cmd;
1664                 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1665                     (cmd == BNX2X_VLAN_MAC_MOVE)) {
1666                         reg_elem = o->check_del(bp, o,
1667                                                 &elem->cmd_data.vlan_mac.u);
1668
1669                         WARN_ON(!reg_elem);
1670
1671                         o->put_cam_offset(o, reg_elem->cam_offset);
1672                         list_del(&reg_elem->link);
1673                         kfree(reg_elem);
1674                 }
1675         }
1676
1677         if (!drv_only)
1678                 return 1;
1679         else
1680                 return 0;
1681
1682 error_exit:
1683         r->clear_pending(r);
1684
1685         /* Cleanup a registry in case of a failure */
1686         list_for_each_entry(elem, exe_chunk, link) {
1687                 cmd = elem->cmd_data.vlan_mac.cmd;
1688
1689                 if (cmd == BNX2X_VLAN_MAC_MOVE)
1690                         cam_obj = elem->cmd_data.vlan_mac.target_obj;
1691                 else
1692                         cam_obj = o;
1693
1694                 /* Delete all newly added above entries */
1695                 if (!restore &&
1696                     ((cmd == BNX2X_VLAN_MAC_ADD) ||
1697                     (cmd == BNX2X_VLAN_MAC_MOVE))) {
1698                         reg_elem = o->check_del(bp, cam_obj,
1699                                                 &elem->cmd_data.vlan_mac.u);
1700                         if (reg_elem) {
1701                                 list_del(&reg_elem->link);
1702                                 kfree(reg_elem);
1703                         }
1704                 }
1705         }
1706
1707         return rc;
1708 }
1709
1710 static inline int bnx2x_vlan_mac_push_new_cmd(
1711         struct bnx2x *bp,
1712         struct bnx2x_vlan_mac_ramrod_params *p)
1713 {
1714         struct bnx2x_exeq_elem *elem;
1715         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1716         bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1717
1718         /* Allocate the execution queue element */
1719         elem = bnx2x_exe_queue_alloc_elem(bp);
1720         if (!elem)
1721                 return -ENOMEM;
1722
1723         /* Set the command 'length' */
1724         switch (p->user_req.cmd) {
1725         case BNX2X_VLAN_MAC_MOVE:
1726                 elem->cmd_len = 2;
1727                 break;
1728         default:
1729                 elem->cmd_len = 1;
1730         }
1731
1732         /* Fill the object specific info */
1733         memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1734
1735         /* Try to add a new command to the pending list */
1736         return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1737 }
1738
1739 /**
1740  * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1741  *
1742  * @bp:   device handle
1743  * @p:
1744  *
1745  */
1746 int bnx2x_config_vlan_mac(struct bnx2x *bp,
1747                            struct bnx2x_vlan_mac_ramrod_params *p)
1748 {
1749         int rc = 0;
1750         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1751         unsigned long *ramrod_flags = &p->ramrod_flags;
1752         bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1753         struct bnx2x_raw_obj *raw = &o->raw;
1754
1755         /*
1756          * Add new elements to the execution list for commands that require it.
1757          */
1758         if (!cont) {
1759                 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1760                 if (rc)
1761                         return rc;
1762         }
1763
1764         /* If nothing will be executed further in this iteration we want to
1765          * return PENDING if there are pending commands
1766          */
1767         if (!bnx2x_exe_queue_empty(&o->exe_queue))
1768                 rc = 1;
1769
1770         if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
1771                 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1772                 raw->clear_pending(raw);
1773         }
1774
1775         /* Execute commands if required */
1776         if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1777             test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1778                 rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
1779                                                    &p->ramrod_flags);
1780                 if (rc < 0)
1781                         return rc;
1782         }
1783
1784         /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1785          * then user want to wait until the last command is done.
1786          */
1787         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1788                 /* Wait maximum for the current exe_queue length iterations plus
1789                  * one (for the current pending command).
1790                  */
1791                 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1792
1793                 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1794                        max_iterations--) {
1795
1796                         /* Wait for the current command to complete */
1797                         rc = raw->wait_comp(bp, raw);
1798                         if (rc)
1799                                 return rc;
1800
1801                         /* Make a next step */
1802                         rc = __bnx2x_vlan_mac_execute_step(bp,
1803                                                            p->vlan_mac_obj,
1804                                                            &p->ramrod_flags);
1805                         if (rc < 0)
1806                                 return rc;
1807                 }
1808
1809                 return 0;
1810         }
1811
1812         return rc;
1813 }
1814
1815 /**
1816  * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1817  *
1818  * @bp:                 device handle
1819  * @o:
1820  * @vlan_mac_flags:
1821  * @ramrod_flags:       execution flags to be used for this deletion
1822  *
1823  * if the last operation has completed successfully and there are no
1824  * more elements left, positive value if the last operation has completed
1825  * successfully and there are more previously configured elements, negative
1826  * value is current operation has failed.
1827  */
1828 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1829                                   struct bnx2x_vlan_mac_obj *o,
1830                                   unsigned long *vlan_mac_flags,
1831                                   unsigned long *ramrod_flags)
1832 {
1833         struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1834         struct bnx2x_vlan_mac_ramrod_params p;
1835         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1836         struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1837         unsigned long flags;
1838         int read_lock;
1839         int rc = 0;
1840
1841         /* Clear pending commands first */
1842
1843         spin_lock_bh(&exeq->lock);
1844
1845         list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1846                 flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
1847                 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
1848                     BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
1849                         rc = exeq->remove(bp, exeq->owner, exeq_pos);
1850                         if (rc) {
1851                                 BNX2X_ERR("Failed to remove command\n");
1852                                 spin_unlock_bh(&exeq->lock);
1853                                 return rc;
1854                         }
1855                         list_del(&exeq_pos->link);
1856                         bnx2x_exe_queue_free_elem(bp, exeq_pos);
1857                 }
1858         }
1859
1860         spin_unlock_bh(&exeq->lock);
1861
1862         /* Prepare a command request */
1863         memset(&p, 0, sizeof(p));
1864         p.vlan_mac_obj = o;
1865         p.ramrod_flags = *ramrod_flags;
1866         p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1867
1868         /* Add all but the last VLAN-MAC to the execution queue without actually
1869          * execution anything.
1870          */
1871         __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1872         __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1873         __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1874
1875         DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
1876         read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
1877         if (read_lock != 0)
1878                 return read_lock;
1879
1880         list_for_each_entry(pos, &o->head, link) {
1881                 flags = pos->vlan_mac_flags;
1882                 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
1883                     BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
1884                         p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1885                         memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1886                         rc = bnx2x_config_vlan_mac(bp, &p);
1887                         if (rc < 0) {
1888                                 BNX2X_ERR("Failed to add a new DEL command\n");
1889                                 bnx2x_vlan_mac_h_read_unlock(bp, o);
1890                                 return rc;
1891                         }
1892                 }
1893         }
1894
1895         DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
1896         bnx2x_vlan_mac_h_read_unlock(bp, o);
1897
1898         p.ramrod_flags = *ramrod_flags;
1899         __set_bit(RAMROD_CONT, &p.ramrod_flags);
1900
1901         return bnx2x_config_vlan_mac(bp, &p);
1902 }
1903
1904 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1905         u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1906         unsigned long *pstate, bnx2x_obj_type type)
1907 {
1908         raw->func_id = func_id;
1909         raw->cid = cid;
1910         raw->cl_id = cl_id;
1911         raw->rdata = rdata;
1912         raw->rdata_mapping = rdata_mapping;
1913         raw->state = state;
1914         raw->pstate = pstate;
1915         raw->obj_type = type;
1916         raw->check_pending = bnx2x_raw_check_pending;
1917         raw->clear_pending = bnx2x_raw_clear_pending;
1918         raw->set_pending = bnx2x_raw_set_pending;
1919         raw->wait_comp = bnx2x_raw_wait;
1920 }
1921
1922 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1923         u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1924         int state, unsigned long *pstate, bnx2x_obj_type type,
1925         struct bnx2x_credit_pool_obj *macs_pool,
1926         struct bnx2x_credit_pool_obj *vlans_pool)
1927 {
1928         INIT_LIST_HEAD(&o->head);
1929         o->head_reader = 0;
1930         o->head_exe_request = false;
1931         o->saved_ramrod_flags = 0;
1932
1933         o->macs_pool = macs_pool;
1934         o->vlans_pool = vlans_pool;
1935
1936         o->delete_all = bnx2x_vlan_mac_del_all;
1937         o->restore = bnx2x_vlan_mac_restore;
1938         o->complete = bnx2x_complete_vlan_mac;
1939         o->wait = bnx2x_wait_vlan_mac;
1940
1941         bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1942                            state, pstate, type);
1943 }
1944
1945 void bnx2x_init_mac_obj(struct bnx2x *bp,
1946                         struct bnx2x_vlan_mac_obj *mac_obj,
1947                         u8 cl_id, u32 cid, u8 func_id, void *rdata,
1948                         dma_addr_t rdata_mapping, int state,
1949                         unsigned long *pstate, bnx2x_obj_type type,
1950                         struct bnx2x_credit_pool_obj *macs_pool)
1951 {
1952         union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1953
1954         bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1955                                    rdata_mapping, state, pstate, type,
1956                                    macs_pool, NULL);
1957
1958         /* CAM credit pool handling */
1959         mac_obj->get_credit = bnx2x_get_credit_mac;
1960         mac_obj->put_credit = bnx2x_put_credit_mac;
1961         mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1962         mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1963
1964         if (CHIP_IS_E1x(bp)) {
1965                 mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
1966                 mac_obj->check_del         = bnx2x_check_mac_del;
1967                 mac_obj->check_add         = bnx2x_check_mac_add;
1968                 mac_obj->check_move        = bnx2x_check_move_always_err;
1969                 mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
1970
1971                 /* Exe Queue */
1972                 bnx2x_exe_queue_init(bp,
1973                                      &mac_obj->exe_queue, 1, qable_obj,
1974                                      bnx2x_validate_vlan_mac,
1975                                      bnx2x_remove_vlan_mac,
1976                                      bnx2x_optimize_vlan_mac,
1977                                      bnx2x_execute_vlan_mac,
1978                                      bnx2x_exeq_get_mac);
1979         } else {
1980                 mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
1981                 mac_obj->check_del         = bnx2x_check_mac_del;
1982                 mac_obj->check_add         = bnx2x_check_mac_add;
1983                 mac_obj->check_move        = bnx2x_check_move;
1984                 mac_obj->ramrod_cmd        =
1985                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1986                 mac_obj->get_n_elements    = bnx2x_get_n_elements;
1987
1988                 /* Exe Queue */
1989                 bnx2x_exe_queue_init(bp,
1990                                      &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1991                                      qable_obj, bnx2x_validate_vlan_mac,
1992                                      bnx2x_remove_vlan_mac,
1993                                      bnx2x_optimize_vlan_mac,
1994                                      bnx2x_execute_vlan_mac,
1995                                      bnx2x_exeq_get_mac);
1996         }
1997 }
1998
1999 void bnx2x_init_vlan_obj(struct bnx2x *bp,
2000                          struct bnx2x_vlan_mac_obj *vlan_obj,
2001                          u8 cl_id, u32 cid, u8 func_id, void *rdata,
2002                          dma_addr_t rdata_mapping, int state,
2003                          unsigned long *pstate, bnx2x_obj_type type,
2004                          struct bnx2x_credit_pool_obj *vlans_pool)
2005 {
2006         union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
2007
2008         bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2009                                    rdata_mapping, state, pstate, type, NULL,
2010                                    vlans_pool);
2011
2012         vlan_obj->get_credit = bnx2x_get_credit_vlan;
2013         vlan_obj->put_credit = bnx2x_put_credit_vlan;
2014         vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2015         vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2016
2017         if (CHIP_IS_E1x(bp)) {
2018                 BNX2X_ERR("Do not support chips others than E2 and newer\n");
2019                 BUG();
2020         } else {
2021                 vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
2022                 vlan_obj->check_del         = bnx2x_check_vlan_del;
2023                 vlan_obj->check_add         = bnx2x_check_vlan_add;
2024                 vlan_obj->check_move        = bnx2x_check_move;
2025                 vlan_obj->ramrod_cmd        =
2026                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2027                 vlan_obj->get_n_elements    = bnx2x_get_n_elements;
2028
2029                 /* Exe Queue */
2030                 bnx2x_exe_queue_init(bp,
2031                                      &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2032                                      qable_obj, bnx2x_validate_vlan_mac,
2033                                      bnx2x_remove_vlan_mac,
2034                                      bnx2x_optimize_vlan_mac,
2035                                      bnx2x_execute_vlan_mac,
2036                                      bnx2x_exeq_get_vlan);
2037         }
2038 }
2039
2040 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2041 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2042                         struct tstorm_eth_mac_filter_config *mac_filters,
2043                         u16 pf_id)
2044 {
2045         size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2046
2047         u32 addr = BAR_TSTRORM_INTMEM +
2048                         TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2049
2050         __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2051 }
2052
2053 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2054                                  struct bnx2x_rx_mode_ramrod_params *p)
2055 {
2056         /* update the bp MAC filter structure */
2057         u32 mask = (1 << p->cl_id);
2058
2059         struct tstorm_eth_mac_filter_config *mac_filters =
2060                 (struct tstorm_eth_mac_filter_config *)p->rdata;
2061
2062         /* initial setting is drop-all */
2063         u8 drop_all_ucast = 1, drop_all_mcast = 1;
2064         u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2065         u8 unmatched_unicast = 0;
2066
2067     /* In e1x there we only take into account rx accept flag since tx switching
2068      * isn't enabled. */
2069         if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2070                 /* accept matched ucast */
2071                 drop_all_ucast = 0;
2072
2073         if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2074                 /* accept matched mcast */
2075                 drop_all_mcast = 0;
2076
2077         if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2078                 /* accept all mcast */
2079                 drop_all_ucast = 0;
2080                 accp_all_ucast = 1;
2081         }
2082         if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2083                 /* accept all mcast */
2084                 drop_all_mcast = 0;
2085                 accp_all_mcast = 1;
2086         }
2087         if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2088                 /* accept (all) bcast */
2089                 accp_all_bcast = 1;
2090         if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2091                 /* accept unmatched unicasts */
2092                 unmatched_unicast = 1;
2093
2094         mac_filters->ucast_drop_all = drop_all_ucast ?
2095                 mac_filters->ucast_drop_all | mask :
2096                 mac_filters->ucast_drop_all & ~mask;
2097
2098         mac_filters->mcast_drop_all = drop_all_mcast ?
2099                 mac_filters->mcast_drop_all | mask :
2100                 mac_filters->mcast_drop_all & ~mask;
2101
2102         mac_filters->ucast_accept_all = accp_all_ucast ?
2103                 mac_filters->ucast_accept_all | mask :
2104                 mac_filters->ucast_accept_all & ~mask;
2105
2106         mac_filters->mcast_accept_all = accp_all_mcast ?
2107                 mac_filters->mcast_accept_all | mask :
2108                 mac_filters->mcast_accept_all & ~mask;
2109
2110         mac_filters->bcast_accept_all = accp_all_bcast ?
2111                 mac_filters->bcast_accept_all | mask :
2112                 mac_filters->bcast_accept_all & ~mask;
2113
2114         mac_filters->unmatched_unicast = unmatched_unicast ?
2115                 mac_filters->unmatched_unicast | mask :
2116                 mac_filters->unmatched_unicast & ~mask;
2117
2118         DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2119                          "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2120            mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2121            mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2122            mac_filters->bcast_accept_all);
2123
2124         /* write the MAC filter structure*/
2125         __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2126
2127         /* The operation is completed */
2128         clear_bit(p->state, p->pstate);
2129         smp_mb__after_atomic();
2130
2131         return 0;
2132 }
2133
2134 /* Setup ramrod data */
2135 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2136                                 struct eth_classify_header *hdr,
2137                                 u8 rule_cnt)
2138 {
2139         hdr->echo = cpu_to_le32(cid);
2140         hdr->rule_cnt = rule_cnt;
2141 }
2142
2143 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2144                                 unsigned long *accept_flags,
2145                                 struct eth_filter_rules_cmd *cmd,
2146                                 bool clear_accept_all)
2147 {
2148         u16 state;
2149
2150         /* start with 'drop-all' */
2151         state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2152                 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2153
2154         if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2155                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2156
2157         if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2158                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2159
2160         if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2161                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2162                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2163         }
2164
2165         if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2166                 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2167                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2168         }
2169
2170         if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2171                 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2172
2173         if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2174                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2175                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2176         }
2177
2178         if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2179                 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2180
2181         /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2182         if (clear_accept_all) {
2183                 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2184                 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2185                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2186                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2187         }
2188
2189         cmd->state = cpu_to_le16(state);
2190 }
2191
2192 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2193                                 struct bnx2x_rx_mode_ramrod_params *p)
2194 {
2195         struct eth_filter_rules_ramrod_data *data = p->rdata;
2196         int rc;
2197         u8 rule_idx = 0;
2198
2199         /* Reset the ramrod data buffer */
2200         memset(data, 0, sizeof(*data));
2201
2202         /* Setup ramrod data */
2203
2204         /* Tx (internal switching) */
2205         if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2206                 data->rules[rule_idx].client_id = p->cl_id;
2207                 data->rules[rule_idx].func_id = p->func_id;
2208
2209                 data->rules[rule_idx].cmd_general_data =
2210                         ETH_FILTER_RULES_CMD_TX_CMD;
2211
2212                 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2213                                                &(data->rules[rule_idx++]),
2214                                                false);
2215         }
2216
2217         /* Rx */
2218         if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2219                 data->rules[rule_idx].client_id = p->cl_id;
2220                 data->rules[rule_idx].func_id = p->func_id;
2221
2222                 data->rules[rule_idx].cmd_general_data =
2223                         ETH_FILTER_RULES_CMD_RX_CMD;
2224
2225                 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2226                                                &(data->rules[rule_idx++]),
2227                                                false);
2228         }
2229
2230         /* If FCoE Queue configuration has been requested configure the Rx and
2231          * internal switching modes for this queue in separate rules.
2232          *
2233          * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2234          * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2235          */
2236         if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2237                 /*  Tx (internal switching) */
2238                 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2239                         data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2240                         data->rules[rule_idx].func_id = p->func_id;
2241
2242                         data->rules[rule_idx].cmd_general_data =
2243                                                 ETH_FILTER_RULES_CMD_TX_CMD;
2244
2245                         bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2246                                                        &(data->rules[rule_idx]),
2247                                                        true);
2248                         rule_idx++;
2249                 }
2250
2251                 /* Rx */
2252                 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2253                         data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2254                         data->rules[rule_idx].func_id = p->func_id;
2255
2256                         data->rules[rule_idx].cmd_general_data =
2257                                                 ETH_FILTER_RULES_CMD_RX_CMD;
2258
2259                         bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2260                                                        &(data->rules[rule_idx]),
2261                                                        true);
2262                         rule_idx++;
2263                 }
2264         }
2265
2266         /* Set the ramrod header (most importantly - number of rules to
2267          * configure).
2268          */
2269         bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2270
2271         DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2272                          data->header.rule_cnt, p->rx_accept_flags,
2273                          p->tx_accept_flags);
2274
2275         /* No need for an explicit memory barrier here as long as we
2276          * ensure the ordering of writing to the SPQ element
2277          * and updating of the SPQ producer which involves a memory
2278          * read. If the memory read is removed we will have to put a
2279          * full memory barrier there (inside bnx2x_sp_post()).
2280          */
2281
2282         /* Send a ramrod */
2283         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2284                            U64_HI(p->rdata_mapping),
2285                            U64_LO(p->rdata_mapping),
2286                            ETH_CONNECTION_TYPE);
2287         if (rc)
2288                 return rc;
2289
2290         /* Ramrod completion is pending */
2291         return 1;
2292 }
2293
2294 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2295                                       struct bnx2x_rx_mode_ramrod_params *p)
2296 {
2297         return bnx2x_state_wait(bp, p->state, p->pstate);
2298 }
2299
2300 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2301                                     struct bnx2x_rx_mode_ramrod_params *p)
2302 {
2303         /* Do nothing */
2304         return 0;
2305 }
2306
2307 int bnx2x_config_rx_mode(struct bnx2x *bp,
2308                          struct bnx2x_rx_mode_ramrod_params *p)
2309 {
2310         int rc;
2311
2312         /* Configure the new classification in the chip */
2313         rc = p->rx_mode_obj->config_rx_mode(bp, p);
2314         if (rc < 0)
2315                 return rc;
2316
2317         /* Wait for a ramrod completion if was requested */
2318         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2319                 rc = p->rx_mode_obj->wait_comp(bp, p);
2320                 if (rc)
2321                         return rc;
2322         }
2323
2324         return rc;
2325 }
2326
2327 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2328                             struct bnx2x_rx_mode_obj *o)
2329 {
2330         if (CHIP_IS_E1x(bp)) {
2331                 o->wait_comp      = bnx2x_empty_rx_mode_wait;
2332                 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2333         } else {
2334                 o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
2335                 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2336         }
2337 }
2338
2339 /********************* Multicast verbs: SET, CLEAR ****************************/
2340 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2341 {
2342         return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2343 }
2344
2345 struct bnx2x_mcast_mac_elem {
2346         struct list_head link;
2347         u8 mac[ETH_ALEN];
2348         u8 pad[2]; /* For a natural alignment of the following buffer */
2349 };
2350
2351 struct bnx2x_pending_mcast_cmd {
2352         struct list_head link;
2353         int type; /* BNX2X_MCAST_CMD_X */
2354         union {
2355                 struct list_head macs_head;
2356                 u32 macs_num; /* Needed for DEL command */
2357                 int next_bin; /* Needed for RESTORE flow with aprox match */
2358         } data;
2359
2360         bool done; /* set to true, when the command has been handled,
2361                     * practically used in 57712 handling only, where one pending
2362                     * command may be handled in a few operations. As long as for
2363                     * other chips every operation handling is completed in a
2364                     * single ramrod, there is no need to utilize this field.
2365                     */
2366 };
2367
2368 static int bnx2x_mcast_wait(struct bnx2x *bp,
2369                             struct bnx2x_mcast_obj *o)
2370 {
2371         if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2372                         o->raw.wait_comp(bp, &o->raw))
2373                 return -EBUSY;
2374
2375         return 0;
2376 }
2377
2378 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2379                                    struct bnx2x_mcast_obj *o,
2380                                    struct bnx2x_mcast_ramrod_params *p,
2381                                    enum bnx2x_mcast_cmd cmd)
2382 {
2383         int total_sz;
2384         struct bnx2x_pending_mcast_cmd *new_cmd;
2385         struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2386         struct bnx2x_mcast_list_elem *pos;
2387         int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2388                              p->mcast_list_len : 0);
2389
2390         /* If the command is empty ("handle pending commands only"), break */
2391         if (!p->mcast_list_len)
2392                 return 0;
2393
2394         total_sz = sizeof(*new_cmd) +
2395                 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2396
2397         /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2398         new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2399
2400         if (!new_cmd)
2401                 return -ENOMEM;
2402
2403         DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2404            cmd, macs_list_len);
2405
2406         INIT_LIST_HEAD(&new_cmd->data.macs_head);
2407
2408         new_cmd->type = cmd;
2409         new_cmd->done = false;
2410
2411         switch (cmd) {
2412         case BNX2X_MCAST_CMD_ADD:
2413                 cur_mac = (struct bnx2x_mcast_mac_elem *)
2414                           ((u8 *)new_cmd + sizeof(*new_cmd));
2415
2416                 /* Push the MACs of the current command into the pending command
2417                  * MACs list: FIFO
2418                  */
2419                 list_for_each_entry(pos, &p->mcast_list, link) {
2420                         memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2421                         list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2422                         cur_mac++;
2423                 }
2424
2425                 break;
2426
2427         case BNX2X_MCAST_CMD_DEL:
2428                 new_cmd->data.macs_num = p->mcast_list_len;
2429                 break;
2430
2431         case BNX2X_MCAST_CMD_RESTORE:
2432                 new_cmd->data.next_bin = 0;
2433                 break;
2434
2435         default:
2436                 kfree(new_cmd);
2437                 BNX2X_ERR("Unknown command: %d\n", cmd);
2438                 return -EINVAL;
2439         }
2440
2441         /* Push the new pending command to the tail of the pending list: FIFO */
2442         list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2443
2444         o->set_sched(o);
2445
2446         return 1;
2447 }
2448
2449 /**
2450  * bnx2x_mcast_get_next_bin - get the next set bin (index)
2451  *
2452  * @o:
2453  * @last:       index to start looking from (including)
2454  *
2455  * returns the next found (set) bin or a negative value if none is found.
2456  */
2457 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2458 {
2459         int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2460
2461         for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2462                 if (o->registry.aprox_match.vec[i])
2463                         for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2464                                 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2465                                 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2466                                                        vec, cur_bit)) {
2467                                         return cur_bit;
2468                                 }
2469                         }
2470                 inner_start = 0;
2471         }
2472
2473         /* None found */
2474         return -1;
2475 }
2476
2477 /**
2478  * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2479  *
2480  * @o:
2481  *
2482  * returns the index of the found bin or -1 if none is found
2483  */
2484 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2485 {
2486         int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2487
2488         if (cur_bit >= 0)
2489                 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2490
2491         return cur_bit;
2492 }
2493
2494 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2495 {
2496         struct bnx2x_raw_obj *raw = &o->raw;
2497         u8 rx_tx_flag = 0;
2498
2499         if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2500             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2501                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2502
2503         if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2504             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2505                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2506
2507         return rx_tx_flag;
2508 }
2509
2510 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2511                                         struct bnx2x_mcast_obj *o, int idx,
2512                                         union bnx2x_mcast_config_data *cfg_data,
2513                                         enum bnx2x_mcast_cmd cmd)
2514 {
2515         struct bnx2x_raw_obj *r = &o->raw;
2516         struct eth_multicast_rules_ramrod_data *data =
2517                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2518         u8 func_id = r->func_id;
2519         u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2520         int bin;
2521
2522         if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2523                 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2524
2525         data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2526
2527         /* Get a bin and update a bins' vector */
2528         switch (cmd) {
2529         case BNX2X_MCAST_CMD_ADD:
2530                 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2531                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2532                 break;
2533
2534         case BNX2X_MCAST_CMD_DEL:
2535                 /* If there were no more bins to clear
2536                  * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2537                  * clear any (0xff) bin.
2538                  * See bnx2x_mcast_validate_e2() for explanation when it may
2539                  * happen.
2540                  */
2541                 bin = bnx2x_mcast_clear_first_bin(o);
2542                 break;
2543
2544         case BNX2X_MCAST_CMD_RESTORE:
2545                 bin = cfg_data->bin;
2546                 break;
2547
2548         default:
2549                 BNX2X_ERR("Unknown command: %d\n", cmd);
2550                 return;
2551         }
2552
2553         DP(BNX2X_MSG_SP, "%s bin %d\n",
2554                          ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2555                          "Setting"  : "Clearing"), bin);
2556
2557         data->rules[idx].bin_id    = (u8)bin;
2558         data->rules[idx].func_id   = func_id;
2559         data->rules[idx].engine_id = o->engine_id;
2560 }
2561
2562 /**
2563  * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2564  *
2565  * @bp:         device handle
2566  * @o:
2567  * @start_bin:  index in the registry to start from (including)
2568  * @rdata_idx:  index in the ramrod data to start from
2569  *
2570  * returns last handled bin index or -1 if all bins have been handled
2571  */
2572 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2573         struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2574         int *rdata_idx)
2575 {
2576         int cur_bin, cnt = *rdata_idx;
2577         union bnx2x_mcast_config_data cfg_data = {NULL};
2578
2579         /* go through the registry and configure the bins from it */
2580         for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2581             cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2582
2583                 cfg_data.bin = (u8)cur_bin;
2584                 o->set_one_rule(bp, o, cnt, &cfg_data,
2585                                 BNX2X_MCAST_CMD_RESTORE);
2586
2587                 cnt++;
2588
2589                 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2590
2591                 /* Break if we reached the maximum number
2592                  * of rules.
2593                  */
2594                 if (cnt >= o->max_cmd_len)
2595                         break;
2596         }
2597
2598         *rdata_idx = cnt;
2599
2600         return cur_bin;
2601 }
2602
2603 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2604         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2605         int *line_idx)
2606 {
2607         struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2608         int cnt = *line_idx;
2609         union bnx2x_mcast_config_data cfg_data = {NULL};
2610
2611         list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2612                                  link) {
2613
2614                 cfg_data.mac = &pmac_pos->mac[0];
2615                 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2616
2617                 cnt++;
2618
2619                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2620                    pmac_pos->mac);
2621
2622                 list_del(&pmac_pos->link);
2623
2624                 /* Break if we reached the maximum number
2625                  * of rules.
2626                  */
2627                 if (cnt >= o->max_cmd_len)
2628                         break;
2629         }
2630
2631         *line_idx = cnt;
2632
2633         /* if no more MACs to configure - we are done */
2634         if (list_empty(&cmd_pos->data.macs_head))
2635                 cmd_pos->done = true;
2636 }
2637
2638 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2639         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2640         int *line_idx)
2641 {
2642         int cnt = *line_idx;
2643
2644         while (cmd_pos->data.macs_num) {
2645                 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2646
2647                 cnt++;
2648
2649                 cmd_pos->data.macs_num--;
2650
2651                   DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2652                                    cmd_pos->data.macs_num, cnt);
2653
2654                 /* Break if we reached the maximum
2655                  * number of rules.
2656                  */
2657                 if (cnt >= o->max_cmd_len)
2658                         break;
2659         }
2660
2661         *line_idx = cnt;
2662
2663         /* If we cleared all bins - we are done */
2664         if (!cmd_pos->data.macs_num)
2665                 cmd_pos->done = true;
2666 }
2667
2668 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2669         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2670         int *line_idx)
2671 {
2672         cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2673                                                 line_idx);
2674
2675         if (cmd_pos->data.next_bin < 0)
2676                 /* If o->set_restore returned -1 we are done */
2677                 cmd_pos->done = true;
2678         else
2679                 /* Start from the next bin next time */
2680                 cmd_pos->data.next_bin++;
2681 }
2682
2683 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2684                                 struct bnx2x_mcast_ramrod_params *p)
2685 {
2686         struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2687         int cnt = 0;
2688         struct bnx2x_mcast_obj *o = p->mcast_obj;
2689
2690         list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2691                                  link) {
2692                 switch (cmd_pos->type) {
2693                 case BNX2X_MCAST_CMD_ADD:
2694                         bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2695                         break;
2696
2697                 case BNX2X_MCAST_CMD_DEL:
2698                         bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2699                         break;
2700
2701                 case BNX2X_MCAST_CMD_RESTORE:
2702                         bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2703                                                            &cnt);
2704                         break;
2705
2706                 default:
2707                         BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2708                         return -EINVAL;
2709                 }
2710
2711                 /* If the command has been completed - remove it from the list
2712                  * and free the memory
2713                  */
2714                 if (cmd_pos->done) {
2715                         list_del(&cmd_pos->link);
2716                         kfree(cmd_pos);
2717                 }
2718
2719                 /* Break if we reached the maximum number of rules */
2720                 if (cnt >= o->max_cmd_len)
2721                         break;
2722         }
2723
2724         return cnt;
2725 }
2726
2727 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2728         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2729         int *line_idx)
2730 {
2731         struct bnx2x_mcast_list_elem *mlist_pos;
2732         union bnx2x_mcast_config_data cfg_data = {NULL};
2733         int cnt = *line_idx;
2734
2735         list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2736                 cfg_data.mac = mlist_pos->mac;
2737                 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2738
2739                 cnt++;
2740
2741                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2742                    mlist_pos->mac);
2743         }
2744
2745         *line_idx = cnt;
2746 }
2747
2748 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2749         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2750         int *line_idx)
2751 {
2752         int cnt = *line_idx, i;
2753
2754         for (i = 0; i < p->mcast_list_len; i++) {
2755                 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2756
2757                 cnt++;
2758
2759                 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2760                                  p->mcast_list_len - i - 1);
2761         }
2762
2763         *line_idx = cnt;
2764 }
2765
2766 /**
2767  * bnx2x_mcast_handle_current_cmd -
2768  *
2769  * @bp:         device handle
2770  * @p:
2771  * @cmd:
2772  * @start_cnt:  first line in the ramrod data that may be used
2773  *
2774  * This function is called iff there is enough place for the current command in
2775  * the ramrod data.
2776  * Returns number of lines filled in the ramrod data in total.
2777  */
2778 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2779                         struct bnx2x_mcast_ramrod_params *p,
2780                         enum bnx2x_mcast_cmd cmd,
2781                         int start_cnt)
2782 {
2783         struct bnx2x_mcast_obj *o = p->mcast_obj;
2784         int cnt = start_cnt;
2785
2786         DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2787
2788         switch (cmd) {
2789         case BNX2X_MCAST_CMD_ADD:
2790                 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2791                 break;
2792
2793         case BNX2X_MCAST_CMD_DEL:
2794                 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2795                 break;
2796
2797         case BNX2X_MCAST_CMD_RESTORE:
2798                 o->hdl_restore(bp, o, 0, &cnt);
2799                 break;
2800
2801         default:
2802                 BNX2X_ERR("Unknown command: %d\n", cmd);
2803                 return -EINVAL;
2804         }
2805
2806         /* The current command has been handled */
2807         p->mcast_list_len = 0;
2808
2809         return cnt;
2810 }
2811
2812 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2813                                    struct bnx2x_mcast_ramrod_params *p,
2814                                    enum bnx2x_mcast_cmd cmd)
2815 {
2816         struct bnx2x_mcast_obj *o = p->mcast_obj;
2817         int reg_sz = o->get_registry_size(o);
2818
2819         switch (cmd) {
2820         /* DEL command deletes all currently configured MACs */
2821         case BNX2X_MCAST_CMD_DEL:
2822                 o->set_registry_size(o, 0);
2823                 /* Don't break */
2824
2825         /* RESTORE command will restore the entire multicast configuration */
2826         case BNX2X_MCAST_CMD_RESTORE:
2827                 /* Here we set the approximate amount of work to do, which in
2828                  * fact may be only less as some MACs in postponed ADD
2829                  * command(s) scheduled before this command may fall into
2830                  * the same bin and the actual number of bins set in the
2831                  * registry would be less than we estimated here. See
2832                  * bnx2x_mcast_set_one_rule_e2() for further details.
2833                  */
2834                 p->mcast_list_len = reg_sz;
2835                 break;
2836
2837         case BNX2X_MCAST_CMD_ADD:
2838         case BNX2X_MCAST_CMD_CONT:
2839                 /* Here we assume that all new MACs will fall into new bins.
2840                  * However we will correct the real registry size after we
2841                  * handle all pending commands.
2842                  */
2843                 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2844                 break;
2845
2846         default:
2847                 BNX2X_ERR("Unknown command: %d\n", cmd);
2848                 return -EINVAL;
2849         }
2850
2851         /* Increase the total number of MACs pending to be configured */
2852         o->total_pending_num += p->mcast_list_len;
2853
2854         return 0;
2855 }
2856
2857 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2858                                       struct bnx2x_mcast_ramrod_params *p,
2859                                       int old_num_bins)
2860 {
2861         struct bnx2x_mcast_obj *o = p->mcast_obj;
2862
2863         o->set_registry_size(o, old_num_bins);
2864         o->total_pending_num -= p->mcast_list_len;
2865 }
2866
2867 /**
2868  * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2869  *
2870  * @bp:         device handle
2871  * @p:
2872  * @len:        number of rules to handle
2873  */
2874 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2875                                         struct bnx2x_mcast_ramrod_params *p,
2876                                         u8 len)
2877 {
2878         struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2879         struct eth_multicast_rules_ramrod_data *data =
2880                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2881
2882         data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2883                                         (BNX2X_FILTER_MCAST_PENDING <<
2884                                          BNX2X_SWCID_SHIFT));
2885         data->header.rule_cnt = len;
2886 }
2887
2888 /**
2889  * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2890  *
2891  * @bp:         device handle
2892  * @o:
2893  *
2894  * Recalculate the actual number of set bins in the registry using Brian
2895  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2896  *
2897  * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2898  */
2899 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2900                                                   struct bnx2x_mcast_obj *o)
2901 {
2902         int i, cnt = 0;
2903         u64 elem;
2904
2905         for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2906                 elem = o->registry.aprox_match.vec[i];
2907                 for (; elem; cnt++)
2908                         elem &= elem - 1;
2909         }
2910
2911         o->set_registry_size(o, cnt);
2912
2913         return 0;
2914 }
2915
2916 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2917                                 struct bnx2x_mcast_ramrod_params *p,
2918                                 enum bnx2x_mcast_cmd cmd)
2919 {
2920         struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2921         struct bnx2x_mcast_obj *o = p->mcast_obj;
2922         struct eth_multicast_rules_ramrod_data *data =
2923                 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2924         int cnt = 0, rc;
2925
2926         /* Reset the ramrod data buffer */
2927         memset(data, 0, sizeof(*data));
2928
2929         cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2930
2931         /* If there are no more pending commands - clear SCHEDULED state */
2932         if (list_empty(&o->pending_cmds_head))
2933                 o->clear_sched(o);
2934
2935         /* The below may be true iff there was enough room in ramrod
2936          * data for all pending commands and for the current
2937          * command. Otherwise the current command would have been added
2938          * to the pending commands and p->mcast_list_len would have been
2939          * zeroed.
2940          */
2941         if (p->mcast_list_len > 0)
2942                 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2943
2944         /* We've pulled out some MACs - update the total number of
2945          * outstanding.
2946          */
2947         o->total_pending_num -= cnt;
2948
2949         /* send a ramrod */
2950         WARN_ON(o->total_pending_num < 0);
2951         WARN_ON(cnt > o->max_cmd_len);
2952
2953         bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2954
2955         /* Update a registry size if there are no more pending operations.
2956          *
2957          * We don't want to change the value of the registry size if there are
2958          * pending operations because we want it to always be equal to the
2959          * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2960          * set bins after the last requested operation in order to properly
2961          * evaluate the size of the next DEL/RESTORE operation.
2962          *
2963          * Note that we update the registry itself during command(s) handling
2964          * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2965          * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2966          * with a limited amount of update commands (per MAC/bin) and we don't
2967          * know in this scope what the actual state of bins configuration is
2968          * going to be after this ramrod.
2969          */
2970         if (!o->total_pending_num)
2971                 bnx2x_mcast_refresh_registry_e2(bp, o);
2972
2973         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2974          * RAMROD_PENDING status immediately.
2975          */
2976         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2977                 raw->clear_pending(raw);
2978                 return 0;
2979         } else {
2980                 /* No need for an explicit memory barrier here as long as we
2981                  * ensure the ordering of writing to the SPQ element
2982                  * and updating of the SPQ producer which involves a memory
2983                  * read. If the memory read is removed we will have to put a
2984                  * full memory barrier there (inside bnx2x_sp_post()).
2985                  */
2986
2987                 /* Send a ramrod */
2988                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2989                                    raw->cid, U64_HI(raw->rdata_mapping),
2990                                    U64_LO(raw->rdata_mapping),
2991                                    ETH_CONNECTION_TYPE);
2992                 if (rc)
2993                         return rc;
2994
2995                 /* Ramrod completion is pending */
2996                 return 1;
2997         }
2998 }
2999
3000 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3001                                     struct bnx2x_mcast_ramrod_params *p,
3002                                     enum bnx2x_mcast_cmd cmd)
3003 {
3004         /* Mark, that there is a work to do */
3005         if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3006                 p->mcast_list_len = 1;
3007
3008         return 0;
3009 }
3010
3011 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3012                                        struct bnx2x_mcast_ramrod_params *p,
3013                                        int old_num_bins)
3014 {
3015         /* Do nothing */
3016 }
3017
3018 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3019 do { \
3020         (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3021 } while (0)
3022
3023 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3024                                            struct bnx2x_mcast_obj *o,
3025                                            struct bnx2x_mcast_ramrod_params *p,
3026                                            u32 *mc_filter)
3027 {
3028         struct bnx2x_mcast_list_elem *mlist_pos;
3029         int bit;
3030
3031         list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3032                 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3033                 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3034
3035                 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3036                    mlist_pos->mac, bit);
3037
3038                 /* bookkeeping... */
3039                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3040                                   bit);
3041         }
3042 }
3043
3044 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3045         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3046         u32 *mc_filter)
3047 {
3048         int bit;
3049
3050         for (bit = bnx2x_mcast_get_next_bin(o, 0);
3051              bit >= 0;
3052              bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3053                 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3054                 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3055         }
3056 }
3057
3058 /* On 57711 we write the multicast MACs' approximate match
3059  * table by directly into the TSTORM's internal RAM. So we don't
3060  * really need to handle any tricks to make it work.
3061  */
3062 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3063                                  struct bnx2x_mcast_ramrod_params *p,
3064                                  enum bnx2x_mcast_cmd cmd)
3065 {
3066         int i;
3067         struct bnx2x_mcast_obj *o = p->mcast_obj;
3068         struct bnx2x_raw_obj *r = &o->raw;
3069
3070         /* If CLEAR_ONLY has been requested - clear the registry
3071          * and clear a pending bit.
3072          */
3073         if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3074                 u32 mc_filter[MC_HASH_SIZE] = {0};
3075
3076                 /* Set the multicast filter bits before writing it into
3077                  * the internal memory.
3078                  */
3079                 switch (cmd) {
3080                 case BNX2X_MCAST_CMD_ADD:
3081                         bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3082                         break;
3083
3084                 case BNX2X_MCAST_CMD_DEL:
3085                         DP(BNX2X_MSG_SP,
3086                            "Invalidating multicast MACs configuration\n");
3087
3088                         /* clear the registry */
3089                         memset(o->registry.aprox_match.vec, 0,
3090                                sizeof(o->registry.aprox_match.vec));
3091                         break;
3092
3093                 case BNX2X_MCAST_CMD_RESTORE:
3094                         bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3095                         break;
3096
3097                 default:
3098                         BNX2X_ERR("Unknown command: %d\n", cmd);
3099                         return -EINVAL;
3100                 }
3101
3102                 /* Set the mcast filter in the internal memory */
3103                 for (i = 0; i < MC_HASH_SIZE; i++)
3104                         REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3105         } else
3106                 /* clear the registry */
3107                 memset(o->registry.aprox_match.vec, 0,
3108                        sizeof(o->registry.aprox_match.vec));
3109
3110         /* We are done */
3111         r->clear_pending(r);
3112
3113         return 0;
3114 }
3115
3116 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3117                                    struct bnx2x_mcast_ramrod_params *p,
3118                                    enum bnx2x_mcast_cmd cmd)
3119 {
3120         struct bnx2x_mcast_obj *o = p->mcast_obj;
3121         int reg_sz = o->get_registry_size(o);
3122
3123         switch (cmd) {
3124         /* DEL command deletes all currently configured MACs */
3125         case BNX2X_MCAST_CMD_DEL:
3126                 o->set_registry_size(o, 0);
3127                 /* Don't break */
3128
3129         /* RESTORE command will restore the entire multicast configuration */
3130         case BNX2X_MCAST_CMD_RESTORE:
3131                 p->mcast_list_len = reg_sz;
3132                   DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3133                                    cmd, p->mcast_list_len);
3134                 break;
3135
3136         case BNX2X_MCAST_CMD_ADD:
3137         case BNX2X_MCAST_CMD_CONT:
3138                 /* Multicast MACs on 57710 are configured as unicast MACs and
3139                  * there is only a limited number of CAM entries for that
3140                  * matter.
3141                  */
3142                 if (p->mcast_list_len > o->max_cmd_len) {
3143                         BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3144                                   o->max_cmd_len);
3145                         return -EINVAL;
3146                 }
3147                 /* Every configured MAC should be cleared if DEL command is
3148                  * called. Only the last ADD command is relevant as long as
3149                  * every ADD commands overrides the previous configuration.
3150                  */
3151                 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3152                 if (p->mcast_list_len > 0)
3153                         o->set_registry_size(o, p->mcast_list_len);
3154
3155                 break;
3156
3157         default:
3158                 BNX2X_ERR("Unknown command: %d\n", cmd);
3159                 return -EINVAL;
3160         }
3161
3162         /* We want to ensure that commands are executed one by one for 57710.
3163          * Therefore each none-empty command will consume o->max_cmd_len.
3164          */
3165         if (p->mcast_list_len)
3166                 o->total_pending_num += o->max_cmd_len;
3167
3168         return 0;
3169 }
3170
3171 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3172                                       struct bnx2x_mcast_ramrod_params *p,
3173                                       int old_num_macs)
3174 {
3175         struct bnx2x_mcast_obj *o = p->mcast_obj;
3176
3177         o->set_registry_size(o, old_num_macs);
3178
3179         /* If current command hasn't been handled yet and we are
3180          * here means that it's meant to be dropped and we have to
3181          * update the number of outstanding MACs accordingly.
3182          */
3183         if (p->mcast_list_len)
3184                 o->total_pending_num -= o->max_cmd_len;
3185 }
3186
3187 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3188                                         struct bnx2x_mcast_obj *o, int idx,
3189                                         union bnx2x_mcast_config_data *cfg_data,
3190                                         enum bnx2x_mcast_cmd cmd)
3191 {
3192         struct bnx2x_raw_obj *r = &o->raw;
3193         struct mac_configuration_cmd *data =
3194                 (struct mac_configuration_cmd *)(r->rdata);
3195
3196         /* copy mac */
3197         if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3198                 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3199                                       &data->config_table[idx].middle_mac_addr,
3200                                       &data->config_table[idx].lsb_mac_addr,
3201                                       cfg_data->mac);
3202
3203                 data->config_table[idx].vlan_id = 0;
3204                 data->config_table[idx].pf_id = r->func_id;
3205                 data->config_table[idx].clients_bit_vector =
3206                         cpu_to_le32(1 << r->cl_id);
3207
3208                 SET_FLAG(data->config_table[idx].flags,
3209                          MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3210                          T_ETH_MAC_COMMAND_SET);
3211         }
3212 }
3213
3214 /**
3215  * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3216  *
3217  * @bp:         device handle
3218  * @p:
3219  * @len:        number of rules to handle
3220  */
3221 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3222                                         struct bnx2x_mcast_ramrod_params *p,
3223                                         u8 len)
3224 {
3225         struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3226         struct mac_configuration_cmd *data =
3227                 (struct mac_configuration_cmd *)(r->rdata);
3228
3229         u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3230                      BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3231                      BNX2X_MAX_MULTICAST*(1 + r->func_id));
3232
3233         data->hdr.offset = offset;
3234         data->hdr.client_id = cpu_to_le16(0xff);
3235         data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3236                                      (BNX2X_FILTER_MCAST_PENDING <<
3237                                       BNX2X_SWCID_SHIFT));
3238         data->hdr.length = len;
3239 }
3240
3241 /**
3242  * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3243  *
3244  * @bp:         device handle
3245  * @o:
3246  * @start_idx:  index in the registry to start from
3247  * @rdata_idx:  index in the ramrod data to start from
3248  *
3249  * restore command for 57710 is like all other commands - always a stand alone
3250  * command - start_idx and rdata_idx will always be 0. This function will always
3251  * succeed.
3252  * returns -1 to comply with 57712 variant.
3253  */
3254 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3255         struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3256         int *rdata_idx)
3257 {
3258         struct bnx2x_mcast_mac_elem *elem;
3259         int i = 0;
3260         union bnx2x_mcast_config_data cfg_data = {NULL};
3261
3262         /* go through the registry and configure the MACs from it. */
3263         list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3264                 cfg_data.mac = &elem->mac[0];
3265                 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3266
3267                 i++;
3268
3269                   DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3270                      cfg_data.mac);
3271         }
3272
3273         *rdata_idx = i;
3274
3275         return -1;
3276 }
3277
3278 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3279         struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3280 {
3281         struct bnx2x_pending_mcast_cmd *cmd_pos;
3282         struct bnx2x_mcast_mac_elem *pmac_pos;
3283         struct bnx2x_mcast_obj *o = p->mcast_obj;
3284         union bnx2x_mcast_config_data cfg_data = {NULL};
3285         int cnt = 0;
3286
3287         /* If nothing to be done - return */
3288         if (list_empty(&o->pending_cmds_head))
3289                 return 0;
3290
3291         /* Handle the first command */
3292         cmd_pos = list_first_entry(&o->pending_cmds_head,
3293                                    struct bnx2x_pending_mcast_cmd, link);
3294
3295         switch (cmd_pos->type) {
3296         case BNX2X_MCAST_CMD_ADD:
3297                 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3298                         cfg_data.mac = &pmac_pos->mac[0];
3299                         o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3300
3301                         cnt++;
3302
3303                         DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3304                            pmac_pos->mac);
3305                 }
3306                 break;
3307
3308         case BNX2X_MCAST_CMD_DEL:
3309                 cnt = cmd_pos->data.macs_num;
3310                 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3311                 break;
3312
3313         case BNX2X_MCAST_CMD_RESTORE:
3314                 o->hdl_restore(bp, o, 0, &cnt);
3315                 break;
3316
3317         default:
3318                 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3319                 return -EINVAL;
3320         }
3321
3322         list_del(&cmd_pos->link);
3323         kfree(cmd_pos);
3324
3325         return cnt;
3326 }
3327
3328 /**
3329  * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3330  *
3331  * @fw_hi:
3332  * @fw_mid:
3333  * @fw_lo:
3334  * @mac:
3335  */
3336 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3337                                          __le16 *fw_lo, u8 *mac)
3338 {
3339         mac[1] = ((u8 *)fw_hi)[0];
3340         mac[0] = ((u8 *)fw_hi)[1];
3341         mac[3] = ((u8 *)fw_mid)[0];
3342         mac[2] = ((u8 *)fw_mid)[1];
3343         mac[5] = ((u8 *)fw_lo)[0];
3344         mac[4] = ((u8 *)fw_lo)[1];
3345 }
3346
3347 /**
3348  * bnx2x_mcast_refresh_registry_e1 -
3349  *
3350  * @bp:         device handle
3351  * @cnt:
3352  *
3353  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3354  * and update the registry correspondingly: if ADD - allocate a memory and add
3355  * the entries to the registry (list), if DELETE - clear the registry and free
3356  * the memory.
3357  */
3358 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3359                                                   struct bnx2x_mcast_obj *o)
3360 {
3361         struct bnx2x_raw_obj *raw = &o->raw;
3362         struct bnx2x_mcast_mac_elem *elem;
3363         struct mac_configuration_cmd *data =
3364                         (struct mac_configuration_cmd *)(raw->rdata);
3365
3366         /* If first entry contains a SET bit - the command was ADD,
3367          * otherwise - DEL_ALL
3368          */
3369         if (GET_FLAG(data->config_table[0].flags,
3370                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3371                 int i, len = data->hdr.length;
3372
3373                 /* Break if it was a RESTORE command */
3374                 if (!list_empty(&o->registry.exact_match.macs))
3375                         return 0;
3376
3377                 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3378                 if (!elem) {
3379                         BNX2X_ERR("Failed to allocate registry memory\n");
3380                         return -ENOMEM;
3381                 }
3382
3383                 for (i = 0; i < len; i++, elem++) {
3384                         bnx2x_get_fw_mac_addr(
3385                                 &data->config_table[i].msb_mac_addr,
3386                                 &data->config_table[i].middle_mac_addr,
3387                                 &data->config_table[i].lsb_mac_addr,
3388                                 elem->mac);
3389                         DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3390                            elem->mac);
3391                         list_add_tail(&elem->link,
3392                                       &o->registry.exact_match.macs);
3393                 }
3394         } else {
3395                 elem = list_first_entry(&o->registry.exact_match.macs,
3396                                         struct bnx2x_mcast_mac_elem, link);
3397                 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3398                 kfree(elem);
3399                 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3400         }
3401
3402         return 0;
3403 }
3404
3405 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3406                                 struct bnx2x_mcast_ramrod_params *p,
3407                                 enum bnx2x_mcast_cmd cmd)
3408 {
3409         struct bnx2x_mcast_obj *o = p->mcast_obj;
3410         struct bnx2x_raw_obj *raw = &o->raw;
3411         struct mac_configuration_cmd *data =
3412                 (struct mac_configuration_cmd *)(raw->rdata);
3413         int cnt = 0, i, rc;
3414
3415         /* Reset the ramrod data buffer */
3416         memset(data, 0, sizeof(*data));
3417
3418         /* First set all entries as invalid */
3419         for (i = 0; i < o->max_cmd_len ; i++)
3420                 SET_FLAG(data->config_table[i].flags,
3421                          MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3422                          T_ETH_MAC_COMMAND_INVALIDATE);
3423
3424         /* Handle pending commands first */
3425         cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3426
3427         /* If there are no more pending commands - clear SCHEDULED state */
3428         if (list_empty(&o->pending_cmds_head))
3429                 o->clear_sched(o);
3430
3431         /* The below may be true iff there were no pending commands */
3432         if (!cnt)
3433                 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3434
3435         /* For 57710 every command has o->max_cmd_len length to ensure that
3436          * commands are done one at a time.
3437          */
3438         o->total_pending_num -= o->max_cmd_len;
3439
3440         /* send a ramrod */
3441
3442         WARN_ON(cnt > o->max_cmd_len);
3443
3444         /* Set ramrod header (in particular, a number of entries to update) */
3445         bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3446
3447         /* update a registry: we need the registry contents to be always up
3448          * to date in order to be able to execute a RESTORE opcode. Here
3449          * we use the fact that for 57710 we sent one command at a time
3450          * hence we may take the registry update out of the command handling
3451          * and do it in a simpler way here.
3452          */
3453         rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3454         if (rc)
3455                 return rc;
3456
3457         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3458          * RAMROD_PENDING status immediately.
3459          */
3460         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3461                 raw->clear_pending(raw);
3462                 return 0;
3463         } else {
3464                 /* No need for an explicit memory barrier here as long as we
3465                  * ensure the ordering of writing to the SPQ element
3466                  * and updating of the SPQ producer which involves a memory
3467                  * read. If the memory read is removed we will have to put a
3468                  * full memory barrier there (inside bnx2x_sp_post()).
3469                  */
3470
3471                 /* Send a ramrod */
3472                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3473                                    U64_HI(raw->rdata_mapping),
3474                                    U64_LO(raw->rdata_mapping),
3475                                    ETH_CONNECTION_TYPE);
3476                 if (rc)
3477                         return rc;
3478
3479                 /* Ramrod completion is pending */
3480                 return 1;
3481         }
3482 }
3483
3484 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3485 {
3486         return o->registry.exact_match.num_macs_set;
3487 }
3488
3489 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3490 {
3491         return o->registry.aprox_match.num_bins_set;
3492 }
3493
3494 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3495                                                 int n)
3496 {
3497         o->registry.exact_match.num_macs_set = n;
3498 }
3499
3500 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3501                                                 int n)
3502 {
3503         o->registry.aprox_match.num_bins_set = n;
3504 }
3505
3506 int bnx2x_config_mcast(struct bnx2x *bp,
3507                        struct bnx2x_mcast_ramrod_params *p,
3508                        enum bnx2x_mcast_cmd cmd)
3509 {
3510         struct bnx2x_mcast_obj *o = p->mcast_obj;
3511         struct bnx2x_raw_obj *r = &o->raw;
3512         int rc = 0, old_reg_size;
3513
3514         /* This is needed to recover number of currently configured mcast macs
3515          * in case of failure.
3516          */
3517         old_reg_size = o->get_registry_size(o);
3518
3519         /* Do some calculations and checks */
3520         rc = o->validate(bp, p, cmd);
3521         if (rc)
3522                 return rc;
3523
3524         /* Return if there is no work to do */
3525         if ((!p->mcast_list_len) && (!o->check_sched(o)))
3526                 return 0;
3527
3528         DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3529            o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3530
3531         /* Enqueue the current command to the pending list if we can't complete
3532          * it in the current iteration
3533          */
3534         if (r->check_pending(r) ||
3535             ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3536                 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3537                 if (rc < 0)
3538                         goto error_exit1;
3539
3540                 /* As long as the current command is in a command list we
3541                  * don't need to handle it separately.
3542                  */
3543                 p->mcast_list_len = 0;
3544         }
3545
3546         if (!r->check_pending(r)) {
3547
3548                 /* Set 'pending' state */
3549                 r->set_pending(r);
3550
3551                 /* Configure the new classification in the chip */
3552                 rc = o->config_mcast(bp, p, cmd);
3553                 if (rc < 0)
3554                         goto error_exit2;
3555
3556                 /* Wait for a ramrod completion if was requested */
3557                 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3558                         rc = o->wait_comp(bp, o);
3559         }
3560
3561         return rc;
3562
3563 error_exit2:
3564         r->clear_pending(r);
3565
3566 error_exit1:
3567         o->revert(bp, p, old_reg_size);
3568
3569         return rc;
3570 }
3571
3572 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3573 {
3574         smp_mb__before_atomic();
3575         clear_bit(o->sched_state, o->raw.pstate);
3576         smp_mb__after_atomic();
3577 }
3578
3579 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3580 {
3581         smp_mb__before_atomic();
3582         set_bit(o->sched_state, o->raw.pstate);
3583         smp_mb__after_atomic();
3584 }
3585
3586 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3587 {
3588         return !!test_bit(o->sched_state, o->raw.pstate);
3589 }
3590
3591 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3592 {
3593         return o->raw.check_pending(&o->raw) || o->check_sched(o);
3594 }
3595
3596 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3597                           struct bnx2x_mcast_obj *mcast_obj,
3598                           u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3599                           u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3600                           int state, unsigned long *pstate, bnx2x_obj_type type)
3601 {
3602         memset(mcast_obj, 0, sizeof(*mcast_obj));
3603
3604         bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3605                            rdata, rdata_mapping, state, pstate, type);
3606
3607         mcast_obj->engine_id = engine_id;
3608
3609         INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3610
3611         mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3612         mcast_obj->check_sched = bnx2x_mcast_check_sched;
3613         mcast_obj->set_sched = bnx2x_mcast_set_sched;
3614         mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3615
3616         if (CHIP_IS_E1(bp)) {
3617                 mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
3618                 mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3619                 mcast_obj->hdl_restore       =
3620                         bnx2x_mcast_handle_restore_cmd_e1;
3621                 mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3622
3623                 if (CHIP_REV_IS_SLOW(bp))
3624                         mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3625                 else
3626                         mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3627
3628                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3629                 mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
3630                 mcast_obj->validate          = bnx2x_mcast_validate_e1;
3631                 mcast_obj->revert            = bnx2x_mcast_revert_e1;
3632                 mcast_obj->get_registry_size =
3633                         bnx2x_mcast_get_registry_size_exact;
3634                 mcast_obj->set_registry_size =
3635                         bnx2x_mcast_set_registry_size_exact;
3636
3637                 /* 57710 is the only chip that uses the exact match for mcast
3638                  * at the moment.
3639                  */
3640                 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3641
3642         } else if (CHIP_IS_E1H(bp)) {
3643                 mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
3644                 mcast_obj->enqueue_cmd   = NULL;
3645                 mcast_obj->hdl_restore   = NULL;
3646                 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3647
3648                 /* 57711 doesn't send a ramrod, so it has unlimited credit
3649                  * for one command.
3650                  */
3651                 mcast_obj->max_cmd_len       = -1;
3652                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3653                 mcast_obj->set_one_rule      = NULL;
3654                 mcast_obj->validate          = bnx2x_mcast_validate_e1h;
3655                 mcast_obj->revert            = bnx2x_mcast_revert_e1h;
3656                 mcast_obj->get_registry_size =
3657                         bnx2x_mcast_get_registry_size_aprox;
3658                 mcast_obj->set_registry_size =
3659                         bnx2x_mcast_set_registry_size_aprox;
3660         } else {
3661                 mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
3662                 mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3663                 mcast_obj->hdl_restore       =
3664                         bnx2x_mcast_handle_restore_cmd_e2;
3665                 mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3666                 /* TODO: There should be a proper HSI define for this number!!!
3667                  */
3668                 mcast_obj->max_cmd_len       = 16;
3669                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3670                 mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
3671                 mcast_obj->validate          = bnx2x_mcast_validate_e2;
3672                 mcast_obj->revert            = bnx2x_mcast_revert_e2;
3673                 mcast_obj->get_registry_size =
3674                         bnx2x_mcast_get_registry_size_aprox;
3675                 mcast_obj->set_registry_size =
3676                         bnx2x_mcast_set_registry_size_aprox;
3677         }
3678 }
3679
3680 /*************************** Credit handling **********************************/
3681
3682 /**
3683  * atomic_add_ifless - add if the result is less than a given value.
3684  *
3685  * @v:  pointer of type atomic_t
3686  * @a:  the amount to add to v...
3687  * @u:  ...if (v + a) is less than u.
3688  *
3689  * returns true if (v + a) was less than u, and false otherwise.
3690  *
3691  */
3692 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3693 {
3694         int c, old;
3695
3696         c = atomic_read(v);
3697         for (;;) {
3698                 if (unlikely(c + a >= u))
3699                         return false;
3700
3701                 old = atomic_cmpxchg((v), c, c + a);
3702                 if (likely(old == c))
3703                         break;
3704                 c = old;
3705         }
3706
3707         return true;
3708 }
3709
3710 /**
3711  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3712  *
3713  * @v:  pointer of type atomic_t
3714  * @a:  the amount to dec from v...
3715  * @u:  ...if (v - a) is more or equal than u.
3716  *
3717  * returns true if (v - a) was more or equal than u, and false
3718  * otherwise.
3719  */
3720 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3721 {
3722         int c,&