BXE: last debugging attempts
[akaros.git] / kern / drivers / net / bxe / ecore_sp.c
1 /*-
2  * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24  * THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 //__FBSDID("$FreeBSD: head/sys/dev/bxe/ecore_sp.c 265411 2014-05-06 02:32:27Z davidcs $");
28
29 #include "bxe.h"
30 #include "ecore_init.h"
31
32 /**** Exe Queue interfaces ****/
33
34 /**
35  * ecore_exe_queue_init - init the Exe Queue object
36  *
37  * @o:          pointer to the object
38  * @exe_len:    length
39  * @owner:      pointer to the owner
40  * @validate:   validate function pointer
41  * @optimize:   optimize function pointer
42  * @exec:       execute function pointer
43  * @get:        get function pointer
44  */
45 static inline void ecore_exe_queue_init(struct bxe_adapter *sc,
46                                         struct ecore_exe_queue_obj *o,
47                                         int exe_len,
48                                         union ecore_qable_obj *owner,
49                                         exe_q_validate validate,
50                                         exe_q_remove remove,
51                                         exe_q_optimize optimize,
52                                         exe_q_execute exec,
53                                         exe_q_get get)
54 {
55         ECORE_MEMSET(o, 0, sizeof(*o));
56
57         ECORE_LIST_INIT(&o->exe_queue);
58         ECORE_LIST_INIT(&o->pending_comp);
59
60         ECORE_SPIN_LOCK_INIT(&o->lock, sc);
61
62         o->exe_chunk_len = exe_len;
63         o->owner         = owner;
64
65         /* Owner specific callbacks */
66         o->validate      = validate;
67         o->remove        = remove;
68         o->optimize      = optimize;
69         o->execute       = exec;
70         o->get           = get;
71
72         ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d\n",
73                   exe_len);
74 }
75
76 static inline void ecore_exe_queue_free_elem(struct bxe_adapter *sc,
77                                              struct ecore_exeq_elem *elem)
78 {
79         ECORE_MSG(sc, "Deleting an exe_queue element\n");
80         ECORE_FREE(sc, elem, sizeof(*elem));
81 }
82
83 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
84 {
85         struct ecore_exeq_elem *elem;
86         int cnt = 0;
87
88         ECORE_SPIN_LOCK_BH(&o->lock);
89
90         ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
91                                   struct ecore_exeq_elem)
92                 cnt++;
93
94         ECORE_SPIN_UNLOCK_BH(&o->lock);
95
96         return cnt;
97 }
98
99 /**
100  * ecore_exe_queue_add - add a new element to the execution queue
101  *
102  * @sc:         driver handle
103  * @o:          queue
104  * @cmd:        new command to add
105  * @restore:    true - do not optimize the command
106  *
107  * If the element is optimized or is illegal, frees it.
108  */
109 static inline int ecore_exe_queue_add(struct bxe_adapter *sc,
110                                       struct ecore_exe_queue_obj *o,
111                                       struct ecore_exeq_elem *elem,
112                                       bool restore)
113 {
114         int rc;
115
116         ECORE_SPIN_LOCK_BH(&o->lock);
117
118         if (!restore) {
119                 /* Try to cancel this element queue */
120                 rc = o->optimize(sc, o->owner, elem);
121                 if (rc)
122                         goto free_and_exit;
123
124                 /* Check if this request is ok */
125                 rc = o->validate(sc, o->owner, elem);
126                 if (rc) {
127                         ECORE_MSG(sc, "Preamble failed: %d\n", rc);
128                         goto free_and_exit;
129                 }
130         }
131
132         /* If so, add it to the execution queue */
133         ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
134
135         ECORE_SPIN_UNLOCK_BH(&o->lock);
136
137         return ECORE_SUCCESS;
138
139 free_and_exit:
140         ecore_exe_queue_free_elem(sc, elem);
141
142         ECORE_SPIN_UNLOCK_BH(&o->lock);
143
144         return rc;
145 }
146
147 static inline void __ecore_exe_queue_reset_pending(
148         struct bxe_adapter *sc,
149         struct ecore_exe_queue_obj *o)
150 {
151         struct ecore_exeq_elem *elem;
152
153         while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
154                 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
155                                               struct ecore_exeq_elem,
156                                               link);
157
158                 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
159                 ecore_exe_queue_free_elem(sc, elem);
160         }
161 }
162
163 /**
164  * ecore_exe_queue_step - execute one execution chunk atomically
165  *
166  * @sc:                 driver handle
167  * @o:                  queue
168  * @ramrod_flags:       flags
169  *
170  * (Should be called while holding the exe_queue->lock).
171  */
172 static inline int ecore_exe_queue_step(struct bxe_adapter *sc,
173                                        struct ecore_exe_queue_obj *o,
174                                        unsigned long *ramrod_flags)
175 {
176         struct ecore_exeq_elem *elem, spacer;
177         int cur_len = 0, rc;
178
179         ECORE_MEMSET(&spacer, 0, sizeof(spacer));
180
181         /* Next step should not be performed until the current is finished,
182          * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
183          * properly clear object internals without sending any command to the FW
184          * which also implies there won't be any completion to clear the
185          * 'pending' list.
186          */
187         if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
188                 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
189                         ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
190                         __ecore_exe_queue_reset_pending(sc, o);
191                 } else {
192                         return ECORE_PENDING;
193                 }
194         }
195
196         /* Run through the pending commands list and create a next
197          * execution chunk.
198          */
199         while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
200                 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
201                                               struct ecore_exeq_elem,
202                                               link);
203                 ECORE_DBG_BREAK_IF(!elem->cmd_len);
204
205                 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
206                         cur_len += elem->cmd_len;
207                         /* Prevent from both lists being empty when moving an
208                          * element. This will allow the call of
209                          * ecore_exe_queue_empty() without locking.
210                          */
211                         ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
212                         mb();
213                         ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
214                         ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
215                         ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
216                 } else
217                         break;
218         }
219
220         /* Sanity check */
221         if (!cur_len)
222                 return ECORE_SUCCESS;
223
224         rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
225         if (rc < 0)
226                 /* In case of an error return the commands back to the queue
227                  *  and reset the pending_comp.
228                  */
229                 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
230         else if (!rc)
231                 /* If zero is returned, means there are no outstanding pending
232                  * completions and we may dismiss the pending list.
233                  */
234                 __ecore_exe_queue_reset_pending(sc, o);
235
236         return rc;
237 }
238
239 static inline bool ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
240 {
241         bool empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
242
243         /* Don't reorder!!! */
244         mb();
245
246         return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
247 }
248
249 static inline struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(
250         struct bxe_adapter *sc)
251 {
252         ECORE_MSG(sc, "Allocating a new exe_queue element\n");
253         return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC,
254                             sc);
255 }
256
257 /************************ raw_obj functions ***********************************/
258 static bool ecore_raw_check_pending(struct ecore_raw_obj *o)
259 {
260         /*
261      * !! converts the value returned by ECORE_TEST_BIT such that it
262      * is guaranteed not to be truncated regardless of bool definition.
263          *
264          * Note we cannot simply define the function's return value type
265      * to match the type returned by ECORE_TEST_BIT, as it varies by
266      * platform/implementation.
267          */
268
269         return !!ECORE_TEST_BIT(o->state, o->pstate);
270 }
271
272 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
273 {
274         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
275         ECORE_CLEAR_BIT(o->state, o->pstate);
276         ECORE_SMP_MB_AFTER_CLEAR_BIT();
277 }
278
279 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
280 {
281         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
282         ECORE_SET_BIT(o->state, o->pstate);
283         ECORE_SMP_MB_AFTER_CLEAR_BIT();
284 }
285
286 /**
287  * ecore_state_wait - wait until the given bit(state) is cleared
288  *
289  * @sc:         device handle
290  * @state:      state which is to be cleared
291  * @state_p:    state buffer
292  *
293  */
294 static inline int ecore_state_wait(struct bxe_adapter *sc, int state,
295                                    unsigned long *pstate)
296 {
297         /* can take a while if any port is running */
298         int cnt = 5000;
299
300
301         if (CHIP_REV_IS_EMUL(sc))
302                 cnt *= 20;
303
304         ECORE_MSG(sc, "waiting for state to become %d\n", state);
305
306         ECORE_MIGHT_SLEEP();
307         while (cnt--) {
308                 if (!ECORE_TEST_BIT(state, pstate)) {
309 #ifdef ECORE_STOP_ON_ERROR
310                         ECORE_MSG(sc, "exit  (cnt %d)\n", 5000 - cnt);
311 #endif
312                         return ECORE_SUCCESS;
313                 }
314
315                 ECORE_WAIT(sc, 1000);
316
317                 if (sc->panic)
318                         return ECORE_IO;
319         }
320
321         /* timeout! */
322         ECORE_ERR("timeout waiting for state %d\n", state);
323 #ifdef ECORE_STOP_ON_ERROR
324         ecore_panic();
325 #endif
326
327         return ECORE_TIMEOUT;
328 }
329
330 static int ecore_raw_wait(struct bxe_adapter *sc, struct ecore_raw_obj *raw)
331 {
332         return ecore_state_wait(sc, raw->state, raw->pstate);
333 }
334
335 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
336 /* credit handling callbacks */
337 static bool ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
338 {
339         struct ecore_credit_pool_obj *mp = o->macs_pool;
340
341         ECORE_DBG_BREAK_IF(!mp);
342
343         return mp->get_entry(mp, offset);
344 }
345
346 static bool ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
347 {
348         struct ecore_credit_pool_obj *mp = o->macs_pool;
349
350         ECORE_DBG_BREAK_IF(!mp);
351
352         return mp->get(mp, 1);
353 }
354
355 static bool ecore_get_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int *offset)
356 {
357         struct ecore_credit_pool_obj *vp = o->vlans_pool;
358
359         ECORE_DBG_BREAK_IF(!vp);
360
361         return vp->get_entry(vp, offset);
362 }
363
364 static bool ecore_get_credit_vlan(struct ecore_vlan_mac_obj *o)
365 {
366         struct ecore_credit_pool_obj *vp = o->vlans_pool;
367
368         ECORE_DBG_BREAK_IF(!vp);
369
370         return vp->get(vp, 1);
371 }
372
373 static bool ecore_get_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
374 {
375         struct ecore_credit_pool_obj *mp = o->macs_pool;
376         struct ecore_credit_pool_obj *vp = o->vlans_pool;
377
378         if (!mp->get(mp, 1))
379                 return FALSE;
380
381         if (!vp->get(vp, 1)) {
382                 mp->put(mp, 1);
383                 return FALSE;
384         }
385
386         return TRUE;
387 }
388
389 static bool ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
390 {
391         struct ecore_credit_pool_obj *mp = o->macs_pool;
392
393         return mp->put_entry(mp, offset);
394 }
395
396 static bool ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
397 {
398         struct ecore_credit_pool_obj *mp = o->macs_pool;
399
400         return mp->put(mp, 1);
401 }
402
403 static bool ecore_put_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int offset)
404 {
405         struct ecore_credit_pool_obj *vp = o->vlans_pool;
406
407         return vp->put_entry(vp, offset);
408 }
409
410 static bool ecore_put_credit_vlan(struct ecore_vlan_mac_obj *o)
411 {
412         struct ecore_credit_pool_obj *vp = o->vlans_pool;
413
414         return vp->put(vp, 1);
415 }
416
417 static bool ecore_put_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
418 {
419         struct ecore_credit_pool_obj *mp = o->macs_pool;
420         struct ecore_credit_pool_obj *vp = o->vlans_pool;
421
422         if (!mp->put(mp, 1))
423                 return FALSE;
424
425         if (!vp->put(vp, 1)) {
426                 mp->get(mp, 1);
427                 return FALSE;
428         }
429
430         return TRUE;
431 }
432
433 /**
434  * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
435  * head list.
436  *
437  * @sc:         device handle
438  * @o:          vlan_mac object
439  *
440  * @details: Non-blocking implementation; should be called under execution
441  *           queue lock.
442  */
443 static int __ecore_vlan_mac_h_write_trylock(struct bxe_adapter *sc,
444                                             struct ecore_vlan_mac_obj *o)
445 {
446         if (o->head_reader) {
447                 ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy\n");
448                 return ECORE_BUSY;
449         }
450
451         ECORE_MSG(sc, "vlan_mac_lock writer - Taken\n");
452         return ECORE_SUCCESS;
453 }
454
455 /**
456  * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
457  * which wasn't able to run due to a taken lock on vlan mac head list.
458  *
459  * @sc:         device handle
460  * @o:          vlan_mac object
461  *
462  * @details Should be called under execution queue lock; notice it might release
463  *          and reclaim it during its run.
464  */
465 static void __ecore_vlan_mac_h_exec_pending(struct bxe_adapter *sc,
466                                             struct ecore_vlan_mac_obj *o)
467 {
468         int rc;
469         unsigned long ramrod_flags = o->saved_ramrod_flags;
470
471         ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
472                   ramrod_flags);
473         o->head_exe_request = FALSE;
474         o->saved_ramrod_flags = 0;
475         rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
476         if (rc != ECORE_SUCCESS) {
477                 ECORE_ERR("execution of pending commands failed with rc %d\n",
478                           rc);
479 #ifdef ECORE_STOP_ON_ERROR
480                 ecore_panic();
481 #endif
482         }
483 }
484
485 /**
486  * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
487  * called due to vlan mac head list lock being taken.
488  *
489  * @sc:                 device handle
490  * @o:                  vlan_mac object
491  * @ramrod_flags:       ramrod flags of missed execution
492  *
493  * @details Should be called under execution queue lock.
494  */
495 static void __ecore_vlan_mac_h_pend(struct bxe_adapter *sc,
496                                     struct ecore_vlan_mac_obj *o,
497                                     unsigned long ramrod_flags)
498 {
499         o->head_exe_request = TRUE;
500         o->saved_ramrod_flags = ramrod_flags;
501         ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu\n",
502                   ramrod_flags);
503 }
504
505 /**
506  * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
507  *
508  * @sc:                 device handle
509  * @o:                  vlan_mac object
510  *
511  * @details Should be called under execution queue lock. Notice if a pending
512  *          execution exists, it would perform it - possibly releasing and
513  *          reclaiming the execution queue lock.
514  */
515 static void __ecore_vlan_mac_h_write_unlock(struct bxe_adapter *sc,
516                                             struct ecore_vlan_mac_obj *o)
517 {
518         /* It's possible a new pending execution was added since this writer
519          * executed. If so, execute again. [Ad infinitum]
520          */
521         while(o->head_exe_request) {
522                 ECORE_MSG(sc, "vlan_mac_lock - writer release encountered a pending request\n");
523                 __ecore_vlan_mac_h_exec_pending(sc, o);
524         }
525 }
526
527 /**
528  * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
529  *
530  * @sc:                 device handle
531  * @o:                  vlan_mac object
532  *
533  * @details Notice if a pending execution exists, it would perform it -
534  *          possibly releasing and reclaiming the execution queue lock.
535  */
536 void ecore_vlan_mac_h_write_unlock(struct bxe_adapter *sc,
537                                    struct ecore_vlan_mac_obj *o)
538 {
539         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
540         __ecore_vlan_mac_h_write_unlock(sc, o);
541         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
542 }
543
544 /**
545  * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
546  *
547  * @sc:                 device handle
548  * @o:                  vlan_mac object
549  *
550  * @details Should be called under the execution queue lock. May sleep. May
551  *          release and reclaim execution queue lock during its run.
552  */
553 static int __ecore_vlan_mac_h_read_lock(struct bxe_adapter *sc,
554                                         struct ecore_vlan_mac_obj *o)
555 {
556         /* If we got here, we're holding lock --> no WRITER exists */
557         o->head_reader++;
558         ECORE_MSG(sc, "vlan_mac_lock - locked reader - number %d\n",
559                   o->head_reader);
560
561         return ECORE_SUCCESS;
562 }
563
564 /**
565  * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
566  *
567  * @sc:                 device handle
568  * @o:                  vlan_mac object
569  *
570  * @details May sleep. Claims and releases execution queue lock during its run.
571  */
572 int ecore_vlan_mac_h_read_lock(struct bxe_adapter *sc,
573                                struct ecore_vlan_mac_obj *o)
574 {
575         int rc;
576
577         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
578         rc = __ecore_vlan_mac_h_read_lock(sc, o);
579         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
580
581         return rc;
582 }
583
584 /**
585  * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
586  *
587  * @sc:                 device handle
588  * @o:                  vlan_mac object
589  *
590  * @details Should be called under execution queue lock. Notice if a pending
591  *          execution exists, it would be performed if this was the last
592  *          reader. possibly releasing and reclaiming the execution queue lock.
593  */
594 static void __ecore_vlan_mac_h_read_unlock(struct bxe_adapter *sc,
595                                           struct ecore_vlan_mac_obj *o)
596 {
597         if (!o->head_reader) {
598                 ECORE_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
599 #ifdef ECORE_STOP_ON_ERROR
600                 ecore_panic();
601 #endif
602         } else {
603                 o->head_reader--;
604                 ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d\n",
605                           o->head_reader);
606         }
607
608         /* It's possible a new pending execution was added, and that this reader
609          * was last - if so we need to execute the command.
610          */
611         if (!o->head_reader && o->head_exe_request) {
612                 ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request\n");
613
614                 /* Writer release will do the trick */
615                 __ecore_vlan_mac_h_write_unlock(sc, o);
616         }
617 }
618
619 /**
620  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
621  *
622  * @sc:                 device handle
623  * @o:                  vlan_mac object
624  *
625  * @details Notice if a pending execution exists, it would be performed if this
626  *          was the last reader. Claims and releases the execution queue lock
627  *          during its run.
628  */
629 void ecore_vlan_mac_h_read_unlock(struct bxe_adapter *sc,
630                                   struct ecore_vlan_mac_obj *o)
631 {
632         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
633         __ecore_vlan_mac_h_read_unlock(sc, o);
634         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
635 }
636
637 /**
638  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
639  *
640  * @sc:                 device handle
641  * @o:                  vlan_mac object
642  * @n:                  number of elements to get
643  * @base:               base address for element placement
644  * @stride:             stride between elements (in bytes)
645  */
646 static int ecore_get_n_elements(struct bxe_adapter *sc, struct ecore_vlan_mac_obj *o,
647                                  int n, uint8_t *base, uint8_t stride, uint8_t size)
648 {
649         struct ecore_vlan_mac_registry_elem *pos;
650         uint8_t *next = base;
651         int counter = 0;
652         int read_lock;
653
654         ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)\n");
655         read_lock = ecore_vlan_mac_h_read_lock(sc, o);
656         if (read_lock != ECORE_SUCCESS)
657                 ECORE_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
658
659         /* traverse list */
660         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
661                                   struct ecore_vlan_mac_registry_elem) {
662                 if (counter < n) {
663                         ECORE_MEMCPY(next, &pos->u, size);
664                         counter++;
665                         ECORE_MSG(sc, "copied element number %d to address %p element was:\n",
666                                   counter, next);
667                         next += stride + size;
668                 }
669         }
670
671         if (read_lock == ECORE_SUCCESS) {
672                 ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)\n");
673                 ecore_vlan_mac_h_read_unlock(sc, o);
674         }
675
676         return counter * ETH_ALEN;
677 }
678
679 /* check_add() callbacks */
680 static int ecore_check_mac_add(struct bxe_adapter *sc,
681                                struct ecore_vlan_mac_obj *o,
682                                union ecore_classification_ramrod_data *data)
683 {
684         struct ecore_vlan_mac_registry_elem *pos;
685
686         ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
687
688         if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
689                 return ECORE_INVAL;
690
691         /* Check if a requested MAC already exists */
692         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
693                                   struct ecore_vlan_mac_registry_elem)
694                 if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
695                     (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
696                         return ECORE_EXISTS;
697
698         return ECORE_SUCCESS;
699 }
700
701 static int ecore_check_vlan_add(struct bxe_adapter *sc,
702                                 struct ecore_vlan_mac_obj *o,
703                                 union ecore_classification_ramrod_data *data)
704 {
705         struct ecore_vlan_mac_registry_elem *pos;
706
707         ECORE_MSG(sc, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
708
709         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
710                                   struct ecore_vlan_mac_registry_elem)
711                 if (data->vlan.vlan == pos->u.vlan.vlan)
712                         return ECORE_EXISTS;
713
714         return ECORE_SUCCESS;
715 }
716
717 static int ecore_check_vlan_mac_add(struct bxe_adapter *sc,
718                                     struct ecore_vlan_mac_obj *o,
719                                    union ecore_classification_ramrod_data *data)
720 {
721         struct ecore_vlan_mac_registry_elem *pos;
722
723         ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for ADD command\n",
724                   data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
725
726         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
727                                   struct ecore_vlan_mac_registry_elem)
728                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
729                     (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
730                                   ETH_ALEN)) &&
731                     (data->vlan_mac.is_inner_mac ==
732                      pos->u.vlan_mac.is_inner_mac))
733                         return ECORE_EXISTS;
734
735         return ECORE_SUCCESS;
736 }
737
738 /* check_del() callbacks */
739 static struct ecore_vlan_mac_registry_elem *
740         ecore_check_mac_del(struct bxe_adapter *sc,
741                             struct ecore_vlan_mac_obj *o,
742                             union ecore_classification_ramrod_data *data)
743 {
744         struct ecore_vlan_mac_registry_elem *pos;
745
746         ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
747
748         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
749                                   struct ecore_vlan_mac_registry_elem)
750                 if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
751                     (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
752                         return pos;
753
754         return NULL;
755 }
756
757 static struct ecore_vlan_mac_registry_elem *
758         ecore_check_vlan_del(struct bxe_adapter *sc,
759                              struct ecore_vlan_mac_obj *o,
760                              union ecore_classification_ramrod_data *data)
761 {
762         struct ecore_vlan_mac_registry_elem *pos;
763
764         ECORE_MSG(sc, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
765
766         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
767                                   struct ecore_vlan_mac_registry_elem)
768                 if (data->vlan.vlan == pos->u.vlan.vlan)
769                         return pos;
770
771         return NULL;
772 }
773
774 static struct ecore_vlan_mac_registry_elem *
775         ecore_check_vlan_mac_del(struct bxe_adapter *sc,
776                                  struct ecore_vlan_mac_obj *o,
777                                  union ecore_classification_ramrod_data *data)
778 {
779         struct ecore_vlan_mac_registry_elem *pos;
780
781         ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for DEL command\n",
782                   data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
783
784         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
785                                   struct ecore_vlan_mac_registry_elem)
786                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
787                     (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
788                              ETH_ALEN)) &&
789                     (data->vlan_mac.is_inner_mac ==
790                      pos->u.vlan_mac.is_inner_mac))
791                         return pos;
792
793         return NULL;
794 }
795
796 /* check_move() callback */
797 static bool ecore_check_move(struct bxe_adapter *sc,
798                              struct ecore_vlan_mac_obj *src_o,
799                              struct ecore_vlan_mac_obj *dst_o,
800                              union ecore_classification_ramrod_data *data)
801 {
802         struct ecore_vlan_mac_registry_elem *pos;
803         int rc;
804
805         /* Check if we can delete the requested configuration from the first
806          * object.
807          */
808         pos = src_o->check_del(sc, src_o, data);
809
810         /*  check if configuration can be added */
811         rc = dst_o->check_add(sc, dst_o, data);
812
813         /* If this classification can not be added (is already set)
814          * or can't be deleted - return an error.
815          */
816         if (rc || !pos)
817                 return FALSE;
818
819         return TRUE;
820 }
821
822 static bool ecore_check_move_always_err(
823         struct bxe_adapter *sc,
824         struct ecore_vlan_mac_obj *src_o,
825         struct ecore_vlan_mac_obj *dst_o,
826         union ecore_classification_ramrod_data *data)
827 {
828         return FALSE;
829 }
830
831 static inline uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj *o)
832 {
833         struct ecore_raw_obj *raw = &o->raw;
834         uint8_t rx_tx_flag = 0;
835
836         if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
837             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
838                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
839
840         if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
841             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
842                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
843
844         return rx_tx_flag;
845 }
846
847 void ecore_set_mac_in_nig(struct bxe_adapter *sc,
848                           bool add, unsigned char *dev_addr, int index)
849 {
850         uint32_t wb_data[2];
851         uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
852                          NIG_REG_LLH0_FUNC_MEM;
853
854         if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
855                 return;
856
857         if (index > ECORE_LLH_CAM_MAX_PF_LINE)
858                 return;
859
860         ECORE_MSG(sc, "Going to %s LLH configuration at entry %d\n",
861                   (add ? "ADD" : "DELETE"), index);
862
863         if (add) {
864                 /* LLH_FUNC_MEM is a uint64_t WB register */
865                 reg_offset += 8*index;
866
867                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
868                               (dev_addr[4] <<  8) |  dev_addr[5]);
869                 wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
870
871                 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
872         }
873
874         REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
875                                   NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
876 }
877
878 /**
879  * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
880  *
881  * @sc:         device handle
882  * @o:          queue for which we want to configure this rule
883  * @add:        if TRUE the command is an ADD command, DEL otherwise
884  * @opcode:     CLASSIFY_RULE_OPCODE_XXX
885  * @hdr:        pointer to a header to setup
886  *
887  */
888 static inline void ecore_vlan_mac_set_cmd_hdr_e2(struct bxe_adapter *sc,
889         struct ecore_vlan_mac_obj *o, bool add, int opcode,
890         struct eth_classify_cmd_header *hdr)
891 {
892         struct ecore_raw_obj *raw = &o->raw;
893
894         hdr->client_id = raw->cl_id;
895         hdr->func_id = raw->func_id;
896
897         /* Rx or/and Tx (internal switching) configuration ? */
898         hdr->cmd_general_data |=
899                 ecore_vlan_mac_get_rx_tx_flag(o);
900
901         if (add)
902                 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
903
904         hdr->cmd_general_data |=
905                 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
906 }
907
908 /**
909  * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
910  *
911  * @cid:        connection id
912  * @type:       ECORE_FILTER_XXX_PENDING
913  * @hdr:        pointer to header to setup
914  * @rule_cnt:
915  *
916  * currently we always configure one rule and echo field to contain a CID and an
917  * opcode type.
918  */
919 static inline void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type,
920                                 struct eth_classify_header *hdr, int rule_cnt)
921 {
922         hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
923                                 (type << ECORE_SWCID_SHIFT));
924         hdr->rule_cnt = (uint8_t)rule_cnt;
925 }
926
927 /* hw_config() callbacks */
928 static void ecore_set_one_mac_e2(struct bxe_adapter *sc,
929                                  struct ecore_vlan_mac_obj *o,
930                                  struct ecore_exeq_elem *elem, int rule_idx,
931                                  int cam_offset)
932 {
933         struct ecore_raw_obj *raw = &o->raw;
934         struct eth_classify_rules_ramrod_data *data =
935                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
936         int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
937         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
938         bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
939         unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
940         uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
941
942         /* Set LLH CAM entry: currently only iSCSI and ETH macs are
943          * relevant. In addition, current implementation is tuned for a
944          * single ETH MAC.
945          *
946          * When multiple unicast ETH MACs PF configuration in switch
947          * independent mode is required (NetQ, multiple netdev MACs,
948          * etc.), consider better utilisation of 8 per function MAC
949          * entries in the LLH register. There is also
950          * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
951          * total number of CAM entries to 16.
952          *
953          * Currently we won't configure NIG for MACs other than a primary ETH
954          * MAC and iSCSI L2 MAC.
955          *
956          * If this MAC is moving from one Queue to another, no need to change
957          * NIG configuration.
958          */
959         if (cmd != ECORE_VLAN_MAC_MOVE) {
960                 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
961                         ecore_set_mac_in_nig(sc, add, mac,
962                                              ECORE_LLH_CAM_ISCSI_ETH_LINE);
963                 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
964                         ecore_set_mac_in_nig(sc, add, mac,
965                                              ECORE_LLH_CAM_ETH_LINE);
966         }
967
968         /* Reset the ramrod data buffer for the first rule */
969         if (rule_idx == 0)
970                 ECORE_MEMSET(data, 0, sizeof(*data));
971
972         /* Setup a command header */
973         ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_MAC,
974                                       &rule_entry->mac.header);
975
976         ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d\n",
977                   (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id);
978
979         /* Set a MAC itself */
980         ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
981                               &rule_entry->mac.mac_mid,
982                               &rule_entry->mac.mac_lsb, mac);
983         rule_entry->mac.inner_mac =
984                 elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
985
986         /* MOVE: Add a rule that will add this MAC to the target Queue */
987         if (cmd == ECORE_VLAN_MAC_MOVE) {
988                 rule_entry++;
989                 rule_cnt++;
990
991                 /* Setup ramrod data */
992                 ecore_vlan_mac_set_cmd_hdr_e2(sc,
993                                         elem->cmd_data.vlan_mac.target_obj,
994                                               TRUE, CLASSIFY_RULE_OPCODE_MAC,
995                                               &rule_entry->mac.header);
996
997                 /* Set a MAC itself */
998                 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
999                                       &rule_entry->mac.mac_mid,
1000                                       &rule_entry->mac.mac_lsb, mac);
1001                 rule_entry->mac.inner_mac =
1002                         elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
1003         }
1004
1005         /* Set the ramrod data header */
1006         /* TODO: take this to the higher level in order to prevent multiple
1007                  writing */
1008         ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1009                                         rule_cnt);
1010 }
1011
1012 /**
1013  * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
1014  *
1015  * @sc:         device handle
1016  * @o:          queue
1017  * @type:
1018  * @cam_offset: offset in cam memory
1019  * @hdr:        pointer to a header to setup
1020  *
1021  * E1/E1H
1022  */
1023 static inline void ecore_vlan_mac_set_rdata_hdr_e1x(struct bxe_adapter *sc,
1024         struct ecore_vlan_mac_obj *o, int type, int cam_offset,
1025         struct mac_configuration_hdr *hdr)
1026 {
1027         struct ecore_raw_obj *r = &o->raw;
1028
1029         hdr->length = 1;
1030         hdr->offset = (uint8_t)cam_offset;
1031         hdr->client_id = ECORE_CPU_TO_LE16(0xff);
1032         hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
1033                                 (type << ECORE_SWCID_SHIFT));
1034 }
1035
1036 static inline void ecore_vlan_mac_set_cfg_entry_e1x(struct bxe_adapter *sc,
1037         struct ecore_vlan_mac_obj *o, bool add, int opcode, uint8_t *mac,
1038         uint16_t vlan_id, struct mac_configuration_entry *cfg_entry)
1039 {
1040         struct ecore_raw_obj *r = &o->raw;
1041         uint32_t cl_bit_vec = (1 << r->cl_id);
1042
1043         cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
1044         cfg_entry->pf_id = r->func_id;
1045         cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
1046
1047         if (add) {
1048                 ECORE_SET_FLAG(cfg_entry->flags,
1049                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1050                                T_ETH_MAC_COMMAND_SET);
1051                 ECORE_SET_FLAG(cfg_entry->flags,
1052                                MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
1053                                opcode);
1054
1055                 /* Set a MAC in a ramrod data */
1056                 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1057                                       &cfg_entry->middle_mac_addr,
1058                                       &cfg_entry->lsb_mac_addr, mac);
1059         } else
1060                 ECORE_SET_FLAG(cfg_entry->flags,
1061                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1062                                T_ETH_MAC_COMMAND_INVALIDATE);
1063 }
1064
1065 static inline void ecore_vlan_mac_set_rdata_e1x(struct bxe_adapter *sc,
1066         struct ecore_vlan_mac_obj *o, int type, int cam_offset, bool add,
1067         uint8_t *mac, uint16_t vlan_id, int opcode, struct mac_configuration_cmd *config)
1068 {
1069         struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1070         struct ecore_raw_obj *raw = &o->raw;
1071
1072         ecore_vlan_mac_set_rdata_hdr_e1x(sc, o, type, cam_offset,
1073                                          &config->hdr);
1074         ecore_vlan_mac_set_cfg_entry_e1x(sc, o, add, opcode, mac, vlan_id,
1075                                          cfg_entry);
1076
1077         ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d\n",
1078                   (add ? "setting" : "clearing"),
1079                   mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id, cam_offset);
1080 }
1081
1082 /**
1083  * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
1084  *
1085  * @sc:         device handle
1086  * @o:          ecore_vlan_mac_obj
1087  * @elem:       ecore_exeq_elem
1088  * @rule_idx:   rule_idx
1089  * @cam_offset: cam_offset
1090  */
1091 static void ecore_set_one_mac_e1x(struct bxe_adapter *sc,
1092                                   struct ecore_vlan_mac_obj *o,
1093                                   struct ecore_exeq_elem *elem, int rule_idx,
1094                                   int cam_offset)
1095 {
1096         struct ecore_raw_obj *raw = &o->raw;
1097         struct mac_configuration_cmd *config =
1098                 (struct mac_configuration_cmd *)(raw->rdata);
1099         /* 57710 and 57711 do not support MOVE command,
1100          * so it's either ADD or DEL
1101          */
1102         bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1103                 TRUE : FALSE;
1104
1105         /* Reset the ramrod data buffer */
1106         ECORE_MEMSET(config, 0, sizeof(*config));
1107
1108         ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
1109                                      cam_offset, add,
1110                                      elem->cmd_data.vlan_mac.u.mac.mac, 0,
1111                                      ETH_VLAN_FILTER_ANY_VLAN, config);
1112 }
1113
1114 static void ecore_set_one_vlan_e2(struct bxe_adapter *sc,
1115                                   struct ecore_vlan_mac_obj *o,
1116                                   struct ecore_exeq_elem *elem, int rule_idx,
1117                                   int cam_offset)
1118 {
1119         struct ecore_raw_obj *raw = &o->raw;
1120         struct eth_classify_rules_ramrod_data *data =
1121                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1122         int rule_cnt = rule_idx + 1;
1123         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1124         enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1125         bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1126         uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1127
1128         /* Reset the ramrod data buffer for the first rule */
1129         if (rule_idx == 0)
1130                 ECORE_MEMSET(data, 0, sizeof(*data));
1131
1132         /* Set a rule header */
1133         ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1134                                       &rule_entry->vlan.header);
1135
1136         ECORE_MSG(sc, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1137                   vlan);
1138
1139         /* Set a VLAN itself */
1140         rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1141
1142         /* MOVE: Add a rule that will add this MAC to the target Queue */
1143         if (cmd == ECORE_VLAN_MAC_MOVE) {
1144                 rule_entry++;
1145                 rule_cnt++;
1146
1147                 /* Setup ramrod data */
1148                 ecore_vlan_mac_set_cmd_hdr_e2(sc,
1149                                         elem->cmd_data.vlan_mac.target_obj,
1150                                               TRUE, CLASSIFY_RULE_OPCODE_VLAN,
1151                                               &rule_entry->vlan.header);
1152
1153                 /* Set a VLAN itself */
1154                 rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1155         }
1156
1157         /* Set the ramrod data header */
1158         /* TODO: take this to the higher level in order to prevent multiple
1159                  writing */
1160         ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1161                                         rule_cnt);
1162 }
1163
1164 static void ecore_set_one_vlan_mac_e2(struct bxe_adapter *sc,
1165                                       struct ecore_vlan_mac_obj *o,
1166                                       struct ecore_exeq_elem *elem,
1167                                       int rule_idx, int cam_offset)
1168 {
1169         struct ecore_raw_obj *raw = &o->raw;
1170         struct eth_classify_rules_ramrod_data *data =
1171                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1172         int rule_cnt = rule_idx + 1;
1173         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1174         enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1175         bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1176         uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1177         uint8_t *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1178
1179         /* Reset the ramrod data buffer for the first rule */
1180         if (rule_idx == 0)
1181                 ECORE_MEMSET(data, 0, sizeof(*data));
1182
1183         /* Set a rule header */
1184         ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1185                                       &rule_entry->pair.header);
1186
1187         /* Set VLAN and MAC themselves */
1188         rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1189         ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1190                               &rule_entry->pair.mac_mid,
1191                               &rule_entry->pair.mac_lsb, mac);
1192         rule_entry->pair.inner_mac =
1193                         elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1194         /* MOVE: Add a rule that will add this MAC to the target Queue */
1195         if (cmd == ECORE_VLAN_MAC_MOVE) {
1196                 rule_entry++;
1197                 rule_cnt++;
1198
1199                 /* Setup ramrod data */
1200                 ecore_vlan_mac_set_cmd_hdr_e2(sc,
1201                                         elem->cmd_data.vlan_mac.target_obj,
1202                                               TRUE, CLASSIFY_RULE_OPCODE_PAIR,
1203                                               &rule_entry->pair.header);
1204
1205                 /* Set a VLAN itself */
1206                 rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1207                 ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1208                                       &rule_entry->pair.mac_mid,
1209                                       &rule_entry->pair.mac_lsb, mac);
1210                 rule_entry->pair.inner_mac =
1211                         elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1212         }
1213
1214         /* Set the ramrod data header */
1215         /* TODO: take this to the higher level in order to prevent multiple
1216                  writing */
1217         ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1218                                         rule_cnt);
1219 }
1220
1221 /**
1222  * ecore_set_one_vlan_mac_e1h -
1223  *
1224  * @sc:         device handle
1225  * @o:          ecore_vlan_mac_obj
1226  * @elem:       ecore_exeq_elem
1227  * @rule_idx:   rule_idx
1228  * @cam_offset: cam_offset
1229  */
1230 static void ecore_set_one_vlan_mac_e1h(struct bxe_adapter *sc,
1231                                        struct ecore_vlan_mac_obj *o,
1232                                        struct ecore_exeq_elem *elem,
1233                                        int rule_idx, int cam_offset)
1234 {
1235         struct ecore_raw_obj *raw = &o->raw;
1236         struct mac_configuration_cmd *config =
1237                 (struct mac_configuration_cmd *)(raw->rdata);
1238         /* 57710 and 57711 do not support MOVE command,
1239          * so it's either ADD or DEL
1240          */
1241         bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1242                 TRUE : FALSE;
1243
1244         /* Reset the ramrod data buffer */
1245         ECORE_MEMSET(config, 0, sizeof(*config));
1246
1247         ecore_vlan_mac_set_rdata_e1x(sc, o, ECORE_FILTER_VLAN_MAC_PENDING,
1248                                      cam_offset, add,
1249                                      elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1250                                      elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1251                                      ETH_VLAN_FILTER_CLASSIFY, config);
1252 }
1253
1254 #define list_next_entry(pos, member) \
1255         list_entry((pos)->member.next, typeof(*(pos)), member)
1256
1257 /**
1258  * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1259  *
1260  * @sc:         device handle
1261  * @p:          command parameters
1262  * @ppos:       pointer to the cookie
1263  *
1264  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1265  * previously configured elements list.
1266  *
1267  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1268  * into an account
1269  *
1270  * pointer to the cookie  - that should be given back in the next call to make
1271  * function handle the next element. If *ppos is set to NULL it will restart the
1272  * iterator. If returned *ppos == NULL this means that the last element has been
1273  * handled.
1274  *
1275  */
1276 static int ecore_vlan_mac_restore(struct bxe_adapter *sc,
1277                            struct ecore_vlan_mac_ramrod_params *p,
1278                            struct ecore_vlan_mac_registry_elem **ppos)
1279 {
1280         struct ecore_vlan_mac_registry_elem *pos;
1281         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1282
1283         /* If list is empty - there is nothing to do here */
1284         if (ECORE_LIST_IS_EMPTY(&o->head)) {
1285                 *ppos = NULL;
1286                 return 0;
1287         }
1288
1289         /* make a step... */
1290         if (*ppos == NULL)
1291                 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head,
1292                                             struct ecore_vlan_mac_registry_elem,
1293                                                link);
1294         else
1295                 *ppos = ECORE_LIST_NEXT(*ppos, link,
1296                                         struct ecore_vlan_mac_registry_elem);
1297
1298         pos = *ppos;
1299
1300         /* If it's the last step - return NULL */
1301         if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1302                 *ppos = NULL;
1303
1304         /* Prepare a 'user_req' */
1305         ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1306
1307         /* Set the command */
1308         p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1309
1310         /* Set vlan_mac_flags */
1311         p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1312
1313         /* Set a restore bit */
1314         ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1315
1316         return ecore_config_vlan_mac(sc, p);
1317 }
1318
1319 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1320  * pointer to an element with a specific criteria and NULL if such an element
1321  * hasn't been found.
1322  */
1323 static struct ecore_exeq_elem *ecore_exeq_get_mac(
1324         struct ecore_exe_queue_obj *o,
1325         struct ecore_exeq_elem *elem)
1326 {
1327         struct ecore_exeq_elem *pos;
1328         struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1329
1330         /* Check pending for execution commands */
1331         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1332                                   struct ecore_exeq_elem)
1333                 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1334                               sizeof(*data)) &&
1335                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1336                         return pos;
1337
1338         return NULL;
1339 }
1340
1341 static struct ecore_exeq_elem *ecore_exeq_get_vlan(
1342         struct ecore_exe_queue_obj *o,
1343         struct ecore_exeq_elem *elem)
1344 {
1345         struct ecore_exeq_elem *pos;
1346         struct ecore_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1347
1348         /* Check pending for execution commands */
1349         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1350                                   struct ecore_exeq_elem)
1351                 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan, data,
1352                               sizeof(*data)) &&
1353                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1354                         return pos;
1355
1356         return NULL;
1357 }
1358
1359 static struct ecore_exeq_elem *ecore_exeq_get_vlan_mac(
1360         struct ecore_exe_queue_obj *o,
1361         struct ecore_exeq_elem *elem)
1362 {
1363         struct ecore_exeq_elem *pos;
1364         struct ecore_vlan_mac_ramrod_data *data =
1365                 &elem->cmd_data.vlan_mac.u.vlan_mac;
1366
1367         /* Check pending for execution commands */
1368         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1369                                   struct ecore_exeq_elem)
1370                 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1371                               sizeof(*data)) &&
1372                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1373                         return pos;
1374
1375         return NULL;
1376 }
1377
1378 /**
1379  * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1380  *
1381  * @sc:         device handle
1382  * @qo:         ecore_qable_obj
1383  * @elem:       ecore_exeq_elem
1384  *
1385  * Checks that the requested configuration can be added. If yes and if
1386  * requested, consume CAM credit.
1387  *
1388  * The 'validate' is run after the 'optimize'.
1389  *
1390  */
1391 static inline int ecore_validate_vlan_mac_add(struct bxe_adapter *sc,
1392                                               union ecore_qable_obj *qo,
1393                                               struct ecore_exeq_elem *elem)
1394 {
1395         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1396         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1397         int rc;
1398
1399         /* Check the registry */
1400         rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1401         if (rc) {
1402                 ECORE_MSG(sc, "ADD command is not allowed considering current registry state.\n");
1403                 return rc;
1404         }
1405
1406         /* Check if there is a pending ADD command for this
1407          * MAC/VLAN/VLAN-MAC. Return an error if there is.
1408          */
1409         if (exeq->get(exeq, elem)) {
1410                 ECORE_MSG(sc, "There is a pending ADD command already\n");
1411                 return ECORE_EXISTS;
1412         }
1413
1414         /* TODO: Check the pending MOVE from other objects where this
1415          * object is a destination object.
1416          */
1417
1418         /* Consume the credit if not requested not to */
1419         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1420                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1421             o->get_credit(o)))
1422                 return ECORE_INVAL;
1423
1424         return ECORE_SUCCESS;
1425 }
1426
1427 /**
1428  * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1429  *
1430  * @sc:         device handle
1431  * @qo:         quable object to check
1432  * @elem:       element that needs to be deleted
1433  *
1434  * Checks that the requested configuration can be deleted. If yes and if
1435  * requested, returns a CAM credit.
1436  *
1437  * The 'validate' is run after the 'optimize'.
1438  */
1439 static inline int ecore_validate_vlan_mac_del(struct bxe_adapter *sc,
1440                                               union ecore_qable_obj *qo,
1441                                               struct ecore_exeq_elem *elem)
1442 {
1443         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1444         struct ecore_vlan_mac_registry_elem *pos;
1445         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1446         struct ecore_exeq_elem query_elem;
1447
1448         /* If this classification can not be deleted (doesn't exist)
1449          * - return a ECORE_EXIST.
1450          */
1451         pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1452         if (!pos) {
1453                 ECORE_MSG(sc, "DEL command is not allowed considering current registry state\n");
1454                 return ECORE_EXISTS;
1455         }
1456
1457         /* Check if there are pending DEL or MOVE commands for this
1458          * MAC/VLAN/VLAN-MAC. Return an error if so.
1459          */
1460         ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1461
1462         /* Check for MOVE commands */
1463         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1464         if (exeq->get(exeq, &query_elem)) {
1465                 ECORE_ERR("There is a pending MOVE command already\n");
1466                 return ECORE_INVAL;
1467         }
1468
1469         /* Check for DEL commands */
1470         if (exeq->get(exeq, elem)) {
1471                 ECORE_MSG(sc, "There is a pending DEL command already\n");
1472                 return ECORE_EXISTS;
1473         }
1474
1475         /* Return the credit to the credit pool if not requested not to */
1476         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1477                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1478             o->put_credit(o))) {
1479                 ECORE_ERR("Failed to return a credit\n");
1480                 return ECORE_INVAL;
1481         }
1482
1483         return ECORE_SUCCESS;
1484 }
1485
1486 /**
1487  * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1488  *
1489  * @sc:         device handle
1490  * @qo:         quable object to check (source)
1491  * @elem:       element that needs to be moved
1492  *
1493  * Checks that the requested configuration can be moved. If yes and if
1494  * requested, returns a CAM credit.
1495  *
1496  * The 'validate' is run after the 'optimize'.
1497  */
1498 static inline int ecore_validate_vlan_mac_move(struct bxe_adapter *sc,
1499                                                union ecore_qable_obj *qo,
1500                                                struct ecore_exeq_elem *elem)
1501 {
1502         struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1503         struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1504         struct ecore_exeq_elem query_elem;
1505         struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1506         struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1507
1508         /* Check if we can perform this operation based on the current registry
1509          * state.
1510          */
1511         if (!src_o->check_move(sc, src_o, dest_o,
1512                                &elem->cmd_data.vlan_mac.u)) {
1513                 ECORE_MSG(sc, "MOVE command is not allowed considering current registry state\n");
1514                 return ECORE_INVAL;
1515         }
1516
1517         /* Check if there is an already pending DEL or MOVE command for the
1518          * source object or ADD command for a destination object. Return an
1519          * error if so.
1520          */
1521         ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1522
1523         /* Check DEL on source */
1524         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1525         if (src_exeq->get(src_exeq, &query_elem)) {
1526                 ECORE_ERR("There is a pending DEL command on the source queue already\n");
1527                 return ECORE_INVAL;
1528         }
1529
1530         /* Check MOVE on source */
1531         if (src_exeq->get(src_exeq, elem)) {
1532                 ECORE_MSG(sc, "There is a pending MOVE command already\n");
1533                 return ECORE_EXISTS;
1534         }
1535
1536         /* Check ADD on destination */
1537         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1538         if (dest_exeq->get(dest_exeq, &query_elem)) {
1539                 ECORE_ERR("There is a pending ADD command on the destination queue already\n");
1540                 return ECORE_INVAL;
1541         }
1542
1543         /* Consume the credit if not requested not to */
1544         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1545                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1546             dest_o->get_credit(dest_o)))
1547                 return ECORE_INVAL;
1548
1549         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1550                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1551             src_o->put_credit(src_o))) {
1552                 /* return the credit taken from dest... */
1553                 dest_o->put_credit(dest_o);
1554                 return ECORE_INVAL;
1555         }
1556
1557         return ECORE_SUCCESS;
1558 }
1559
1560 static int ecore_validate_vlan_mac(struct bxe_adapter *sc,
1561                                    union ecore_qable_obj *qo,
1562                                    struct ecore_exeq_elem *elem)
1563 {
1564         switch (elem->cmd_data.vlan_mac.cmd) {
1565         case ECORE_VLAN_MAC_ADD:
1566                 return ecore_validate_vlan_mac_add(sc, qo, elem);
1567         case ECORE_VLAN_MAC_DEL:
1568                 return ecore_validate_vlan_mac_del(sc, qo, elem);
1569         case ECORE_VLAN_MAC_MOVE:
1570                 return ecore_validate_vlan_mac_move(sc, qo, elem);
1571         default:
1572                 return ECORE_INVAL;
1573         }
1574 }
1575
1576 static int ecore_remove_vlan_mac(struct bxe_adapter *sc,
1577                                   union ecore_qable_obj *qo,
1578                                   struct ecore_exeq_elem *elem)
1579 {
1580         int rc = 0;
1581
1582         /* If consumption wasn't required, nothing to do */
1583         if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1584                            &elem->cmd_data.vlan_mac.vlan_mac_flags))
1585                 return ECORE_SUCCESS;
1586
1587         switch (elem->cmd_data.vlan_mac.cmd) {
1588         case ECORE_VLAN_MAC_ADD:
1589         case ECORE_VLAN_MAC_MOVE:
1590                 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1591                 break;
1592         case ECORE_VLAN_MAC_DEL:
1593                 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1594                 break;
1595         default:
1596                 return ECORE_INVAL;
1597         }
1598
1599         if (rc != TRUE)
1600                 return ECORE_INVAL;
1601
1602         return ECORE_SUCCESS;
1603 }
1604
1605 /**
1606  * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1607  *
1608  * @sc:         device handle
1609  * @o:          ecore_vlan_mac_obj
1610  *
1611  */
1612 static int ecore_wait_vlan_mac(struct bxe_adapter *sc,
1613                                struct ecore_vlan_mac_obj *o)
1614 {
1615         int cnt = 5000, rc;
1616         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1617         struct ecore_raw_obj *raw = &o->raw;
1618
1619         while (cnt--) {
1620                 /* Wait for the current command to complete */
1621                 rc = raw->wait_comp(sc, raw);
1622                 if (rc)
1623                         return rc;
1624
1625                 /* Wait until there are no pending commands */
1626                 if (!ecore_exe_queue_empty(exeq))
1627                         ECORE_WAIT(sc, 1000);
1628                 else
1629                         return ECORE_SUCCESS;
1630         }
1631
1632         return ECORE_TIMEOUT;
1633 }
1634
1635 static int __ecore_vlan_mac_execute_step(struct bxe_adapter *sc,
1636                                          struct ecore_vlan_mac_obj *o,
1637                                          unsigned long *ramrod_flags)
1638 {
1639         int rc = ECORE_SUCCESS;
1640
1641         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1642
1643         ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock\n");
1644         rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1645
1646         if (rc != ECORE_SUCCESS) {
1647                 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1648
1649                 /** Calling function should not diffrentiate between this case
1650                  *  and the case in which there is already a pending ramrod
1651                  */
1652                 rc = ECORE_PENDING;
1653         } else {
1654                 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1655         }
1656         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1657
1658         return rc;
1659 }
1660
1661 /**
1662  * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1663  *
1664  * @sc:         device handle
1665  * @o:          ecore_vlan_mac_obj
1666  * @cqe:
1667  * @cont:       if TRUE schedule next execution chunk
1668  *
1669  */
1670 static int ecore_complete_vlan_mac(struct bxe_adapter *sc,
1671                                    struct ecore_vlan_mac_obj *o,
1672                                    union event_ring_elem *cqe,
1673                                    unsigned long *ramrod_flags)
1674 {
1675         struct ecore_raw_obj *r = &o->raw;
1676         int rc;
1677
1678         /* Clearing the pending list & raw state should be made
1679          * atomically (as execution flow assumes they represent the same)
1680          */
1681         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1682
1683         /* Reset pending list */
1684         __ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1685
1686         /* Clear pending */
1687         r->clear_pending(r);
1688
1689         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1690
1691         /* If ramrod failed this is most likely a SW bug */
1692         if (cqe->message.error)
1693                 return ECORE_INVAL;
1694
1695         /* Run the next bulk of pending commands if requested */
1696         if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1697                 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1698                 if (rc < 0)
1699                         return rc;
1700         }
1701
1702         /* If there is more work to do return PENDING */
1703         if (!ecore_exe_queue_empty(&o->exe_queue))
1704                 return ECORE_PENDING;
1705
1706         return ECORE_SUCCESS;
1707 }
1708
1709 /**
1710  * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1711  *
1712  * @sc:         device handle
1713  * @o:          ecore_qable_obj
1714  * @elem:       ecore_exeq_elem
1715  */
1716 static int ecore_optimize_vlan_mac(struct bxe_adapter *sc,
1717                                    union ecore_qable_obj *qo,
1718                                    struct ecore_exeq_elem *elem)
1719 {
1720         struct ecore_exeq_elem query, *pos;
1721         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1722         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1723
1724         ECORE_MEMCPY(&query, elem, sizeof(query));
1725
1726         switch (elem->cmd_data.vlan_mac.cmd) {
1727         case ECORE_VLAN_MAC_ADD:
1728                 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1729                 break;
1730         case ECORE_VLAN_MAC_DEL:
1731                 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1732                 break;
1733         default:
1734                 /* Don't handle anything other than ADD or DEL */
1735                 return 0;
1736         }
1737
1738         /* If we found the appropriate element - delete it */
1739         pos = exeq->get(exeq, &query);
1740         if (pos) {
1741
1742                 /* Return the credit of the optimized command */
1743                 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1744                                      &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1745                         if ((query.cmd_data.vlan_mac.cmd ==
1746                              ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1747                                 ECORE_ERR("Failed to return the credit for the optimized ADD command\n");
1748                                 return ECORE_INVAL;
1749                         } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1750                                 ECORE_ERR("Failed to recover the credit from the optimized DEL command\n");
1751                                 return ECORE_INVAL;
1752                         }
1753                 }
1754
1755                 ECORE_MSG(sc, "Optimizing %s command\n",
1756                           (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1757                           "ADD" : "DEL");
1758
1759                 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1760                 ecore_exe_queue_free_elem(sc, pos);
1761                 return 1;
1762         }
1763
1764         return 0;
1765 }
1766
1767 /**
1768  * ecore_vlan_mac_get_registry_elem - prepare a registry element
1769  *
1770  * @sc:   device handle
1771  * @o:
1772  * @elem:
1773  * @restore:
1774  * @re:
1775  *
1776  * prepare a registry element according to the current command request.
1777  */
1778 static inline int ecore_vlan_mac_get_registry_elem(
1779         struct bxe_adapter *sc,
1780         struct ecore_vlan_mac_obj *o,
1781         struct ecore_exeq_elem *elem,
1782         bool restore,
1783         struct ecore_vlan_mac_registry_elem **re)
1784 {
1785         enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1786         struct ecore_vlan_mac_registry_elem *reg_elem;
1787
1788         /* Allocate a new registry element if needed. */
1789         if (!restore &&
1790             ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1791                 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1792                 if (!reg_elem)
1793                         return ECORE_NOMEM;
1794
1795                 /* Get a new CAM offset */
1796                 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1797                         /* This shall never happen, because we have checked the
1798                          * CAM availability in the 'validate'.
1799                          */
1800                         ECORE_DBG_BREAK_IF(1);
1801                         ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1802                         return ECORE_INVAL;
1803                 }
1804
1805                 ECORE_MSG(sc, "Got cam offset %d\n", reg_elem->cam_offset);
1806
1807                 /* Set a VLAN-MAC data */
1808                 ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1809                           sizeof(reg_elem->u));
1810
1811                 /* Copy the flags (needed for DEL and RESTORE flows) */
1812                 reg_elem->vlan_mac_flags =
1813                         elem->cmd_data.vlan_mac.vlan_mac_flags;
1814         } else /* DEL, RESTORE */
1815                 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1816
1817         *re = reg_elem;
1818         return ECORE_SUCCESS;
1819 }
1820
1821 /**
1822  * ecore_execute_vlan_mac - execute vlan mac command
1823  *
1824  * @sc:                 device handle
1825  * @qo:
1826  * @exe_chunk:
1827  * @ramrod_flags:
1828  *
1829  * go and send a ramrod!
1830  */
1831 static int ecore_execute_vlan_mac(struct bxe_adapter *sc,
1832                                   union ecore_qable_obj *qo,
1833                                   ecore_list_t *exe_chunk,
1834                                   unsigned long *ramrod_flags)
1835 {
1836         struct ecore_exeq_elem *elem;
1837         struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1838         struct ecore_raw_obj *r = &o->raw;
1839         int rc, idx = 0;
1840         bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1841         bool drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1842         struct ecore_vlan_mac_registry_elem *reg_elem;
1843         enum ecore_vlan_mac_cmd cmd;
1844
1845         /* If DRIVER_ONLY execution is requested, cleanup a registry
1846          * and exit. Otherwise send a ramrod to FW.
1847          */
1848         if (!drv_only) {
1849                 ECORE_DBG_BREAK_IF(r->check_pending(r));
1850
1851                 /* Set pending */
1852                 r->set_pending(r);
1853
1854                 /* Fill the ramrod data */
1855                 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1856                                           struct ecore_exeq_elem) {
1857                         cmd = elem->cmd_data.vlan_mac.cmd;
1858                         /* We will add to the target object in MOVE command, so
1859                          * change the object for a CAM search.
1860                          */
1861                         if (cmd == ECORE_VLAN_MAC_MOVE)
1862                                 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1863                         else
1864                                 cam_obj = o;
1865
1866                         rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1867                                                               elem, restore,
1868                                                               &reg_elem);
1869                         if (rc)
1870                                 goto error_exit;
1871
1872                         ECORE_DBG_BREAK_IF(!reg_elem);
1873
1874                         /* Push a new entry into the registry */
1875                         if (!restore &&
1876                             ((cmd == ECORE_VLAN_MAC_ADD) ||
1877                             (cmd == ECORE_VLAN_MAC_MOVE)))
1878                                 ECORE_LIST_PUSH_HEAD(&reg_elem->link,
1879                                                      &cam_obj->head);
1880
1881                         /* Configure a single command in a ramrod data buffer */
1882                         o->set_one_rule(sc, o, elem, idx,
1883                                         reg_elem->cam_offset);
1884
1885                         /* MOVE command consumes 2 entries in the ramrod data */
1886                         if (cmd == ECORE_VLAN_MAC_MOVE)
1887                                 idx += 2;
1888                         else
1889                                 idx++;
1890                 }
1891
1892                 /*
1893                  *  No need for an explicit memory barrier here as long we would
1894                  *  need to ensure the ordering of writing to the SPQ element
1895                  *  and updating of the SPQ producer which involves a memory
1896                  *  read and we will have to put a full memory barrier there
1897                  *  (inside ecore_sp_post()).
1898                  */
1899
1900                 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1901                                    r->rdata_mapping,
1902                                    ETH_CONNECTION_TYPE);
1903                 if (rc)
1904                         goto error_exit;
1905         }
1906
1907         /* Now, when we are done with the ramrod - clean up the registry */
1908         ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1909                                   struct ecore_exeq_elem) {
1910                 cmd = elem->cmd_data.vlan_mac.cmd;
1911                 if ((cmd == ECORE_VLAN_MAC_DEL) ||
1912                     (cmd == ECORE_VLAN_MAC_MOVE)) {
1913                         reg_elem = o->check_del(sc, o,
1914                                                 &elem->cmd_data.vlan_mac.u);
1915
1916                         ECORE_DBG_BREAK_IF(!reg_elem);
1917
1918                         o->put_cam_offset(o, reg_elem->cam_offset);
1919                         ECORE_LIST_REMOVE_ENTRY(&reg_elem->link, &o->head);
1920                         ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1921                 }
1922         }
1923
1924         if (!drv_only)
1925                 return ECORE_PENDING;
1926         else
1927                 return ECORE_SUCCESS;
1928
1929 error_exit:
1930         r->clear_pending(r);
1931
1932         /* Cleanup a registry in case of a failure */
1933         ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1934                                   struct ecore_exeq_elem) {
1935                 cmd = elem->cmd_data.vlan_mac.cmd;
1936
1937                 if (cmd == ECORE_VLAN_MAC_MOVE)
1938                         cam_obj = elem->cmd_data.vlan_mac.target_obj;
1939                 else
1940                         cam_obj = o;
1941
1942                 /* Delete all newly added above entries */
1943                 if (!restore &&
1944                     ((cmd == ECORE_VLAN_MAC_ADD) ||
1945                     (cmd == ECORE_VLAN_MAC_MOVE))) {
1946                         reg_elem = o->check_del(sc, cam_obj,
1947                                                 &elem->cmd_data.vlan_mac.u);
1948                         if (reg_elem) {
1949                                 ECORE_LIST_REMOVE_ENTRY(&reg_elem->link,
1950                                                         &cam_obj->head);
1951                                 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1952                         }
1953                 }
1954         }
1955
1956         return rc;
1957 }
1958
1959 static inline int ecore_vlan_mac_push_new_cmd(
1960         struct bxe_adapter *sc,
1961         struct ecore_vlan_mac_ramrod_params *p)
1962 {
1963         struct ecore_exeq_elem *elem;
1964         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1965         bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1966
1967         /* Allocate the execution queue element */
1968         elem = ecore_exe_queue_alloc_elem(sc);
1969         if (!elem)
1970                 return ECORE_NOMEM;
1971
1972         /* Set the command 'length' */
1973         switch (p->user_req.cmd) {
1974         case ECORE_VLAN_MAC_MOVE:
1975                 elem->cmd_len = 2;
1976                 break;
1977         default:
1978                 elem->cmd_len = 1;
1979         }
1980
1981         /* Fill the object specific info */
1982         ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1983
1984         /* Try to add a new command to the pending list */
1985         return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1986 }
1987
1988 /**
1989  * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1990  *
1991  * @sc:   device handle
1992  * @p:
1993  *
1994  */
1995 int ecore_config_vlan_mac(struct bxe_adapter *sc,
1996                            struct ecore_vlan_mac_ramrod_params *p)
1997 {
1998         int rc = ECORE_SUCCESS;
1999         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
2000         unsigned long *ramrod_flags = &p->ramrod_flags;
2001         bool cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
2002         struct ecore_raw_obj *raw = &o->raw;
2003
2004         /*
2005          * Add new elements to the execution list for commands that require it.
2006          */
2007         if (!cont) {
2008                 rc = ecore_vlan_mac_push_new_cmd(sc, p);
2009                 if (rc)
2010                         return rc;
2011         }
2012
2013         /* If nothing will be executed further in this iteration we want to
2014          * return PENDING if there are pending commands
2015          */
2016         if (!ecore_exe_queue_empty(&o->exe_queue))
2017                 rc = ECORE_PENDING;
2018
2019         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
2020                 ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
2021                 raw->clear_pending(raw);
2022         }
2023
2024         /* Execute commands if required */
2025         if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
2026             ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
2027                 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
2028                                                    &p->ramrod_flags);
2029                 if (rc < 0)
2030                         return rc;
2031         }
2032
2033         /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
2034          * then user want to wait until the last command is done.
2035          */
2036         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2037                 /* Wait maximum for the current exe_queue length iterations plus
2038                  * one (for the current pending command).
2039                  */
2040                 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
2041
2042                 while (!ecore_exe_queue_empty(&o->exe_queue) &&
2043                        max_iterations--) {
2044
2045                         /* Wait for the current command to complete */
2046                         rc = raw->wait_comp(sc, raw);
2047                         if (rc)
2048                                 return rc;
2049
2050                         /* Make a next step */
2051                         rc = __ecore_vlan_mac_execute_step(sc,
2052                                                            p->vlan_mac_obj,
2053                                                            &p->ramrod_flags);
2054                         if (rc < 0)
2055                                 return rc;
2056                 }
2057
2058                 return ECORE_SUCCESS;
2059         }
2060
2061         return rc;
2062 }
2063
2064 /**
2065  * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2066  *
2067  * @sc:                 device handle
2068  * @o:
2069  * @vlan_mac_flags:
2070  * @ramrod_flags:       execution flags to be used for this deletion
2071  *
2072  * if the last operation has completed successfully and there are no
2073  * more elements left, positive value if the last operation has completed
2074  * successfully and there are more previously configured elements, negative
2075  * value is current operation has failed.
2076  */
2077 static int ecore_vlan_mac_del_all(struct bxe_adapter *sc,
2078                                   struct ecore_vlan_mac_obj *o,
2079                                   unsigned long *vlan_mac_flags,
2080                                   unsigned long *ramrod_flags)
2081 {
2082         struct ecore_vlan_mac_registry_elem *pos = NULL;
2083         struct ecore_vlan_mac_ramrod_params p;
2084         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
2085         struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
2086         int read_lock;
2087         int rc = 0;
2088
2089         /* Clear pending commands first */
2090
2091         ECORE_SPIN_LOCK_BH(&exeq->lock);
2092
2093         ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
2094                                        &exeq->exe_queue, link,
2095                                        struct ecore_exeq_elem) {
2096                 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
2097                     *vlan_mac_flags) {
2098                         rc = exeq->remove(sc, exeq->owner, exeq_pos);
2099                         if (rc) {
2100                                 ECORE_ERR("Failed to remove command\n");
2101                                 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2102                                 return rc;
2103                         }
2104                         ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
2105                                                 &exeq->exe_queue);
2106                         ecore_exe_queue_free_elem(sc, exeq_pos);
2107                 }
2108         }
2109
2110         ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2111
2112         /* Prepare a command request */
2113         ECORE_MEMSET(&p, 0, sizeof(p));
2114         p.vlan_mac_obj = o;
2115         p.ramrod_flags = *ramrod_flags;
2116         p.user_req.cmd = ECORE_VLAN_MAC_DEL;
2117
2118         /* Add all but the last VLAN-MAC to the execution queue without actually
2119          * execution anything.
2120          */
2121         ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
2122         ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
2123         ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2124
2125         ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2126         read_lock = ecore_vlan_mac_h_read_lock(sc, o);
2127         if (read_lock != ECORE_SUCCESS)
2128                 return read_lock;
2129
2130         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
2131                                   struct ecore_vlan_mac_registry_elem) {
2132                 if (pos->vlan_mac_flags == *vlan_mac_flags) {
2133                         p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2134                         ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
2135                         rc = ecore_config_vlan_mac(sc, &p);
2136                         if (rc < 0) {
2137                                 ECORE_ERR("Failed to add a new DEL command\n");
2138                                 ecore_vlan_mac_h_read_unlock(sc, o);
2139                                 return rc;
2140                         }
2141                 }
2142         }
2143
2144         ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2145         ecore_vlan_mac_h_read_unlock(sc, o);
2146
2147         p.ramrod_flags = *ramrod_flags;
2148         ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2149
2150         return ecore_config_vlan_mac(sc, &p);
2151 }
2152
2153 static inline void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
2154         uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping, int state,
2155         unsigned long *pstate, ecore_obj_type type)
2156 {
2157         raw->func_id = func_id;
2158         raw->cid = cid;
2159         raw->cl_id = cl_id;
2160         raw->rdata = rdata;
2161         raw->rdata_mapping = rdata_mapping;
2162         raw->state = state;
2163         raw->pstate = pstate;
2164         raw->obj_type = type;
2165         raw->check_pending = ecore_raw_check_pending;
2166         raw->clear_pending = ecore_raw_clear_pending;
2167         raw->set_pending = ecore_raw_set_pending;
2168         raw->wait_comp = ecore_raw_wait;
2169 }
2170
2171 static inline void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
2172         uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping,
2173         int state, unsigned long *pstate, ecore_obj_type type,
2174         struct ecore_credit_pool_obj *macs_pool,
2175         struct ecore_credit_pool_obj *vlans_pool)
2176 {
2177         ECORE_LIST_INIT(&o->head);
2178         o->head_reader = 0;
2179         o->head_exe_request = FALSE;
2180         o->saved_ramrod_flags = 0;
2181
2182         o->macs_pool = macs_pool;
2183         o->vlans_pool = vlans_pool;
2184
2185         o->delete_all = ecore_vlan_mac_del_all;
2186         o->restore = ecore_vlan_mac_restore;
2187         o->complete = ecore_complete_vlan_mac;
2188         o->wait = ecore_wait_vlan_mac;
2189
2190         ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2191                            state, pstate, type);
2192 }
2193
2194 void ecore_init_mac_obj(struct bxe_adapter *sc,
2195                         struct ecore_vlan_mac_obj *mac_obj,
2196                         uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2197                         ecore_dma_addr_t rdata_mapping, int state,
2198                         unsigned long *pstate, ecore_obj_type type,
2199                         struct ecore_credit_pool_obj *macs_pool)
2200 {
2201         union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
2202
2203         ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2204                                    rdata_mapping, state, pstate, type,
2205                                    macs_pool, NULL);
2206
2207         /* CAM credit pool handling */
2208         mac_obj->get_credit = ecore_get_credit_mac;
2209         mac_obj->put_credit = ecore_put_credit_mac;
2210         mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2211         mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2212
2213         if (CHIP_IS_E1x(sc)) {
2214                 mac_obj->set_one_rule      = ecore_set_one_mac_e1x;
2215                 mac_obj->check_del         = ecore_check_mac_del;
2216                 mac_obj->check_add         = ecore_check_mac_add;
2217                 mac_obj->check_move        = ecore_check_move_always_err;
2218                 mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2219
2220                 /* Exe Queue */
2221                 ecore_exe_queue_init(sc,
2222                                      &mac_obj->exe_queue, 1, qable_obj,
2223                                      ecore_validate_vlan_mac,
2224                                      ecore_remove_vlan_mac,
2225                                      ecore_optimize_vlan_mac,
2226                                      ecore_execute_vlan_mac,
2227                                      ecore_exeq_get_mac);
2228         } else {
2229                 mac_obj->set_one_rule      = ecore_set_one_mac_e2;
2230                 mac_obj->check_del         = ecore_check_mac_del;
2231                 mac_obj->check_add         = ecore_check_mac_add;
2232                 mac_obj->check_move        = ecore_check_move;
2233                 mac_obj->ramrod_cmd        =
2234                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2235                 mac_obj->get_n_elements    = ecore_get_n_elements;
2236
2237                 /* Exe Queue */
2238                 ecore_exe_queue_init(sc,
2239                                      &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2240                                      qable_obj, ecore_validate_vlan_mac,
2241                                      ecore_remove_vlan_mac,
2242                                      ecore_optimize_vlan_mac,
2243                                      ecore_execute_vlan_mac,
2244                                      ecore_exeq_get_mac);
2245         }
2246 }
2247
2248 void ecore_init_vlan_obj(struct bxe_adapter *sc,
2249                          struct ecore_vlan_mac_obj *vlan_obj,
2250                          uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2251                          ecore_dma_addr_t rdata_mapping, int state,
2252                          unsigned long *pstate, ecore_obj_type type,
2253                          struct ecore_credit_pool_obj *vlans_pool)
2254 {
2255         union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)vlan_obj;
2256
2257         ecore_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2258                                    rdata_mapping, state, pstate, type, NULL,
2259                                    vlans_pool);
2260
2261         vlan_obj->get_credit = ecore_get_credit_vlan;
2262         vlan_obj->put_credit = ecore_put_credit_vlan;
2263         vlan_obj->get_cam_offset = ecore_get_cam_offset_vlan;
2264         vlan_obj->put_cam_offset = ecore_put_cam_offset_vlan;
2265
2266         if (CHIP_IS_E1x(sc)) {
2267                 ECORE_ERR("Do not support chips others than E2 and newer\n");
2268                 ECORE_BUG();
2269         } else {
2270                 vlan_obj->set_one_rule      = ecore_set_one_vlan_e2;
2271                 vlan_obj->check_del         = ecore_check_vlan_del;
2272                 vlan_obj->check_add         = ecore_check_vlan_add;
2273                 vlan_obj->check_move        = ecore_check_move;
2274                 vlan_obj->ramrod_cmd        =
2275                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2276                 vlan_obj->get_n_elements    = ecore_get_n_elements;
2277
2278                 /* Exe Queue */
2279                 ecore_exe_queue_init(sc,
2280                                      &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2281                                      qable_obj, ecore_validate_vlan_mac,
2282                                      ecore_remove_vlan_mac,
2283                                      ecore_optimize_vlan_mac,
2284                                      ecore_execute_vlan_mac,
2285                                      ecore_exeq_get_vlan);
2286         }
2287 }
2288
2289 void ecore_init_vlan_mac_obj(struct bxe_adapter *sc,
2290                              struct ecore_vlan_mac_obj *vlan_mac_obj,
2291                              uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2292                              ecore_dma_addr_t rdata_mapping, int state,
2293                              unsigned long *pstate, ecore_obj_type type,
2294                              struct ecore_credit_pool_obj *macs_pool,
2295                              struct ecore_credit_pool_obj *vlans_pool)
2296 {
2297         union ecore_qable_obj *qable_obj =
2298                 (union ecore_qable_obj *)vlan_mac_obj;
2299
2300         ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2301                                    rdata_mapping, state, pstate, type,
2302                                    macs_pool, vlans_pool);
2303
2304         /* CAM pool handling */
2305         vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac;
2306         vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac;
2307         /* CAM offset is relevant for 57710 and 57711 chips only which have a
2308          * single CAM for both MACs and VLAN-MAC pairs. So the offset
2309          * will be taken from MACs' pool object only.
2310          */
2311         vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2312         vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2313
2314         if (CHIP_IS_E1(sc)) {
2315                 ECORE_ERR("Do not support chips others than E2\n");
2316                 ECORE_BUG();
2317         } else if (CHIP_IS_E1H(sc)) {
2318                 vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e1h;
2319                 vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
2320                 vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
2321                 vlan_mac_obj->check_move        = ecore_check_move_always_err;
2322                 vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2323
2324                 /* Exe Queue */
2325                 ecore_exe_queue_init(sc,
2326                                      &vlan_mac_obj->exe_queue, 1, qable_obj,
2327                                      ecore_validate_vlan_mac,
2328                                      ecore_remove_vlan_mac,
2329                                      ecore_optimize_vlan_mac,
2330                                      ecore_execute_vlan_mac,
2331                                      ecore_exeq_get_vlan_mac);
2332         } else {
2333                 vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e2;
2334                 vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
2335                 vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
2336                 vlan_mac_obj->check_move        = ecore_check_move;
2337                 vlan_mac_obj->ramrod_cmd        =
2338                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2339
2340                 /* Exe Queue */
2341                 ecore_exe_queue_init(sc,
2342                                      &vlan_mac_obj->exe_queue,
2343                                      CLASSIFY_RULES_COUNT,
2344                                      qable_obj, ecore_validate_vlan_mac,
2345                                      ecore_remove_vlan_mac,
2346                                      ecore_optimize_vlan_mac,
2347                                      ecore_execute_vlan_mac,
2348                                      ecore_exeq_get_vlan_mac);
2349         }
2350 }
2351
2352 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2353 static inline void __storm_memset_mac_filters(struct bxe_adapter *sc,
2354                         struct tstorm_eth_mac_filter_config *mac_filters,
2355                         uint16_t pf_id)
2356 {
2357         size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2358
2359         uint32_t addr = BAR_TSTRORM_INTMEM +
2360                         TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2361
2362         ecore_storm_memset_struct(sc, addr, size, (uint32_t *)mac_filters);
2363 }
2364
2365 static int ecore_set_rx_mode_e1x(struct bxe_adapter *sc,
2366                                  struct ecore_rx_mode_ramrod_params *p)
2367 {
2368         /* update the sc MAC filter structure */
2369         uint32_t mask = (1 << p->cl_id);
2370
2371         struct tstorm_eth_mac_filter_config *mac_filters =
2372                 (struct tstorm_eth_mac_filter_config *)p->rdata;
2373
2374         /* initial setting is drop-all */
2375         uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
2376         uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2377         uint8_t unmatched_unicast = 0;
2378
2379     /* In e1x there we only take into account rx accept flag since tx switching
2380      * isn't enabled. */
2381         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
2382                 /* accept matched ucast */
2383                 drop_all_ucast = 0;
2384
2385         if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
2386                 /* accept matched mcast */
2387                 drop_all_mcast = 0;
2388
2389         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2390                 /* accept all mcast */
2391                 drop_all_ucast = 0;
2392                 accp_all_ucast = 1;
2393         }
2394         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2395                 /* accept all mcast */
2396                 drop_all_mcast = 0;
2397                 accp_all_mcast = 1;
2398         }
2399         if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
2400                 /* accept (all) bcast */
2401                 accp_all_bcast = 1;
2402         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2403                 /* accept unmatched unicasts */
2404                 unmatched_unicast = 1;
2405
2406         mac_filters->ucast_drop_all = drop_all_ucast ?
2407                 mac_filters->ucast_drop_all | mask :
2408                 mac_filters->ucast_drop_all & ~mask;
2409
2410         mac_filters->mcast_drop_all = drop_all_mcast ?
2411                 mac_filters->mcast_drop_all | mask :
2412                 mac_filters->mcast_drop_all & ~mask;
2413
2414         mac_filters->ucast_accept_all = accp_all_ucast ?
2415                 mac_filters->ucast_accept_all | mask :
2416                 mac_filters->ucast_accept_all & ~mask;
2417
2418         mac_filters->mcast_accept_all = accp_all_mcast ?
2419                 mac_filters->mcast_accept_all | mask :
2420                 mac_filters->mcast_accept_all & ~mask;
2421
2422         mac_filters->bcast_accept_all = accp_all_bcast ?
2423                 mac_filters->bcast_accept_all | mask :
2424                 mac_filters->bcast_accept_all & ~mask;
2425
2426         mac_filters->unmatched_unicast = unmatched_unicast ?
2427                 mac_filters->unmatched_unicast | mask :
2428                 mac_filters->unmatched_unicast & ~mask;
2429
2430         ECORE_MSG(sc, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2431                          "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2432            mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2433            mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2434            mac_filters->bcast_accept_all);
2435
2436         /* write the MAC filter structure*/
2437         __storm_memset_mac_filters(sc, mac_filters, p->func_id);
2438
2439         /* The operation is completed */
2440         ECORE_CLEAR_BIT(p->state, p->pstate);
2441         ECORE_SMP_MB_AFTER_CLEAR_BIT();
2442
2443         return ECORE_SUCCESS;
2444 }
2445
2446 /* Setup ramrod data */
2447 static inline void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid,
2448                                 struct eth_classify_header *hdr,
2449                                 uint8_t rule_cnt)
2450 {
2451         hdr->echo = ECORE_CPU_TO_LE32(cid);
2452         hdr->rule_cnt = rule_cnt;
2453 }
2454
2455 static inline void ecore_rx_mode_set_cmd_state_e2(struct bxe_adapter *sc,
2456                                 unsigned long *accept_flags,
2457                                 struct eth_filter_rules_cmd *cmd,
2458                                 bool clear_accept_all)
2459 {
2460         uint16_t state;
2461
2462         /* start with 'drop-all' */
2463         state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2464                 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2465
2466         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2467                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2468
2469         if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2470                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2471
2472         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2473                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2474                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2475         }
2476
2477         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2478                 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2479                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2480         }
2481         if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2482                 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2483
2484         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2485                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2486                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2487         }
2488         if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2489                 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2490
2491         /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2492         if (clear_accept_all) {
2493                 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2494                 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2495                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2496                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2497         }
2498
2499         cmd->state = ECORE_CPU_TO_LE16(state);
2500 }
2501
2502 static int ecore_set_rx_mode_e2(struct bxe_adapter *sc,
2503                                 struct ecore_rx_mode_ramrod_params *p)
2504 {
2505         struct eth_filter_rules_ramrod_data *data = p->rdata;
2506         int rc;
2507         uint8_t rule_idx = 0;
2508
2509         /* Reset the ramrod data buffer */
2510         ECORE_MEMSET(data, 0, sizeof(*data));
2511
2512         /* Setup ramrod data */
2513
2514         /* Tx (internal switching) */
2515         if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2516                 data->rules[rule_idx].client_id = p->cl_id;
2517                 data->rules[rule_idx].func_id = p->func_id;
2518
2519                 data->rules[rule_idx].cmd_general_data =
2520                         ETH_FILTER_RULES_CMD_TX_CMD;
2521
2522                 ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2523                                                &(data->rules[rule_idx++]),
2524                                                FALSE);
2525         }
2526
2527         /* Rx */
2528         if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2529                 data->rules[rule_idx].client_id = p->cl_id;
2530                 data->rules[rule_idx].func_id = p->func_id;
2531
2532                 data->rules[rule_idx].cmd_general_data =
2533                         ETH_FILTER_RULES_CMD_RX_CMD;
2534
2535                 ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2536                                                &(data->rules[rule_idx++]),
2537                                                FALSE);
2538         }
2539
2540         /* If FCoE Queue configuration has been requested configure the Rx and
2541          * internal switching modes for this queue in separate rules.
2542          *
2543          * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2544          * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2545          */
2546         if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2547                 /*  Tx (internal switching) */
2548                 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2549                         data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2550                         data->rules[rule_idx].func_id = p->func_id;
2551
2552                         data->rules[rule_idx].cmd_general_data =
2553                                                 ETH_FILTER_RULES_CMD_TX_CMD;
2554
2555                         ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2556                                                        &(data->rules[rule_idx]),
2557                                                        TRUE);
2558                         rule_idx++;
2559                 }
2560
2561                 /* Rx */
2562                 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2563                         data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2564                         data->rules[rule_idx].func_id = p->func_id;
2565
2566                         data->rules[rule_idx].cmd_general_data =
2567                                                 ETH_FILTER_RULES_CMD_RX_CMD;
2568
2569                         ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2570                                                        &(data->rules[rule_idx]),
2571                                                        TRUE);
2572                         rule_idx++;
2573                 }
2574         }
2575
2576         /* Set the ramrod header (most importantly - number of rules to
2577          * configure).
2578          */
2579         ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2580
2581         ECORE_MSG(sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2582                   data->header.rule_cnt, p->rx_accept_flags,
2583                   p->tx_accept_flags);
2584
2585         /* No need for an explicit memory barrier here as long we would
2586          * need to ensure the ordering of writing to the SPQ element
2587          * and updating of the SPQ producer which involves a memory
2588          * read and we will have to put a full memory barrier there
2589          * (inside ecore_sp_post()).
2590          */
2591
2592         /* Send a ramrod */
2593         rc = ecore_sp_post(sc,
2594                            RAMROD_CMD_ID_ETH_FILTER_RULES,
2595                            p->cid,
2596                            p->rdata_mapping,
2597                            ETH_CONNECTION_TYPE);
2598         if (rc)
2599                 return rc;
2600
2601         /* Ramrod completion is pending */
2602         return ECORE_PENDING;
2603 }
2604
2605 static int ecore_wait_rx_mode_comp_e2(struct bxe_adapter *sc,
2606                                       struct ecore_rx_mode_ramrod_params *p)
2607 {
2608         return ecore_state_wait(sc, p->state, p->pstate);
2609 }
2610
2611 static int ecore_empty_rx_mode_wait(struct bxe_adapter *sc,
2612                                     struct ecore_rx_mode_ramrod_params *p)
2613 {
2614         /* Do nothing */
2615         return ECORE_SUCCESS;
2616 }
2617
2618 int ecore_config_rx_mode(struct bxe_adapter *sc,
2619                          struct ecore_rx_mode_ramrod_params *p)
2620 {
2621         int rc;
2622
2623         /* Configure the new classification in the chip */
2624         rc = p->rx_mode_obj->config_rx_mode(sc, p);
2625         if (rc < 0)
2626                 return rc;
2627
2628         /* Wait for a ramrod completion if was requested */
2629         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2630                 rc = p->rx_mode_obj->wait_comp(sc, p);
2631                 if (rc)
2632                         return rc;
2633         }
2634
2635         return rc;
2636 }
2637
2638 void ecore_init_rx_mode_obj(struct bxe_adapter *sc,
2639                             struct ecore_rx_mode_obj *o)
2640 {
2641         if (CHIP_IS_E1x(sc)) {
2642                 o->wait_comp      = ecore_empty_rx_mode_wait;
2643                 o->config_rx_mode = ecore_set_rx_mode_e1x;
2644         } else {
2645                 o->wait_comp      = ecore_wait_rx_mode_comp_e2;
2646                 o->config_rx_mode = ecore_set_rx_mode_e2;
2647         }
2648 }
2649
2650 /********************* Multicast verbs: SET, CLEAR ****************************/
2651 static inline uint8_t ecore_mcast_bin_from_mac(uint8_t *mac)
2652 {
2653         return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2654 }
2655
2656 struct ecore_mcast_mac_elem {
2657         ecore_list_entry_t link;
2658         uint8_t mac[ETH_ALEN];
2659         uint8_t pad[2]; /* For a natural alignment of the following buffer */
2660 };
2661
2662 struct ecore_pending_mcast_cmd {
2663         ecore_list_entry_t link;
2664         int type; /* ECORE_MCAST_CMD_X */
2665         union {
2666                 ecore_list_t macs_head;
2667                 uint32_t macs_num; /* Needed for DEL command */
2668                 int next_bin; /* Needed for RESTORE flow with aprox match */
2669         } data;
2670
2671         bool done; /* set to TRUE, when the command has been handled,
2672                     * practically used in 57712 handling only, where one pending
2673                     * command may be handled in a few operations. As long as for
2674                     * other chips every operation handling is completed in a
2675                     * single ramrod, there is no need to utilize this field.
2676                     */
2677 };
2678
2679 static int ecore_mcast_wait(struct bxe_adapter *sc,
2680                             struct ecore_mcast_obj *o)
2681 {
2682         if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2683                         o->raw.wait_comp(sc, &o->raw))
2684                 return ECORE_TIMEOUT;
2685
2686         return ECORE_SUCCESS;
2687 }
2688
2689 static int ecore_mcast_enqueue_cmd(struct bxe_adapter *sc,
2690                                    struct ecore_mcast_obj *o,
2691                                    struct ecore_mcast_ramrod_params *p,
2692                                    enum ecore_mcast_cmd cmd)
2693 {
2694         int total_sz;
2695         struct ecore_pending_mcast_cmd *new_cmd;
2696         struct ecore_mcast_mac_elem *cur_mac = NULL;
2697         struct ecore_mcast_list_elem *pos;
2698         int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2699                              p->mcast_list_len : 0);
2700
2701         /* If the command is empty ("handle pending commands only"), break */
2702         if (!p->mcast_list_len)
2703                 return ECORE_SUCCESS;
2704
2705         total_sz = sizeof(*new_cmd) +
2706                 macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2707
2708         /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2709         new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2710
2711         if (!new_cmd)
2712                 return ECORE_NOMEM;
2713
2714         ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d\n",
2715                   cmd, macs_list_len);
2716
2717         ECORE_LIST_INIT(&new_cmd->data.macs_head);
2718
2719         new_cmd->type = cmd;
2720         new_cmd->done = FALSE;
2721
2722         switch (cmd) {
2723         case ECORE_MCAST_CMD_ADD:
2724                 cur_mac = (struct ecore_mcast_mac_elem *)
2725                           ((uint8_t *)new_cmd + sizeof(*new_cmd));
2726
2727                 /* Push the MACs of the current command into the pending command
2728                  * MACs list: FIFO
2729                  */
2730                 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2731                                           struct ecore_mcast_list_elem) {
2732                         ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2733                         ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2734                                              &new_cmd->data.macs_head);
2735                         cur_mac++;
2736                 }
2737
2738                 break;
2739
2740         case ECORE_MCAST_CMD_DEL:
2741                 new_cmd->data.macs_num = p->mcast_list_len;
2742                 break;
2743
2744         case ECORE_MCAST_CMD_RESTORE:
2745                 new_cmd->data.next_bin = 0;
2746                 break;
2747
2748         default:
2749                 ECORE_FREE(sc, new_cmd, total_sz);
2750                 ECORE_ERR("Unknown command: %d\n", cmd);
2751                 return ECORE_INVAL;
2752         }
2753
2754         /* Push the new pending command to the tail of the pending list: FIFO */
2755         ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2756
2757         o->set_sched(o);
2758
2759         return ECORE_PENDING;
2760 }
2761
2762 /**
2763  * ecore_mcast_get_next_bin - get the next set bin (index)
2764  *
2765  * @o:
2766  * @last:       index to start looking from (including)
2767  *
2768  * returns the next found (set) bin or a negative value if none is found.
2769  */
2770 static inline int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2771 {
2772         int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2773
2774         for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2775                 if (o->registry.aprox_match.vec[i])
2776                         for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2777                                 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2778                                 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2779                                                        vec, cur_bit)) {
2780                                         return cur_bit;
2781                                 }
2782                         }
2783                 inner_start = 0;
2784         }
2785
2786         /* None found */
2787         return -1;
2788 }
2789
2790 /**
2791  * ecore_mcast_clear_first_bin - find the first set bin and clear it
2792  *
2793  * @o:
2794  *
2795  * returns the index of the found bin or -1 if none is found
2796  */
2797 static inline int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2798 {
2799         int cur_bit = ecore_mcast_get_next_bin(o, 0);
2800
2801         if (cur_bit >= 0)
2802                 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2803
2804         return cur_bit;
2805 }
2806
2807 static inline uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2808 {
2809         struct ecore_raw_obj *raw = &o->raw;
2810         uint8_t rx_tx_flag = 0;
2811
2812         if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2813             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2814                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2815
2816         if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2817             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2818                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2819
2820         return rx_tx_flag;
2821 }
2822
2823 static void ecore_mcast_set_one_rule_e2(struct bxe_adapter *sc,
2824                                         struct ecore_mcast_obj *o, int idx,
2825                                         union ecore_mcast_config_data *cfg_data,
2826                                         enum ecore_mcast_cmd cmd)
2827 {
2828         struct ecore_raw_obj *r = &o->raw;
2829         struct eth_multicast_rules_ramrod_data *data =
2830                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2831         uint8_t func_id = r->func_id;
2832         uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2833         int bin;
2834
2835         if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2836                 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2837
2838         data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2839
2840         /* Get a bin and update a bins' vector */
2841         switch (cmd) {
2842         case ECORE_MCAST_CMD_ADD:
2843                 bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2844                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2845                 break;
2846
2847         case ECORE_MCAST_CMD_DEL:
2848                 /* If there were no more bins to clear
2849                  * (ecore_mcast_clear_first_bin() returns -1) then we would
2850                  * clear any (0xff) bin.
2851                  * See ecore_mcast_validate_e2() for explanation when it may
2852                  * happen.
2853                  */
2854                 bin = ecore_mcast_clear_first_bin(o);
2855                 break;
2856
2857         case ECORE_MCAST_CMD_RESTORE:
2858                 bin = cfg_data->bin;
2859                 break;
2860
2861         default:
2862                 ECORE_ERR("Unknown command: %d\n", cmd);
2863                 return;
2864         }
2865
2866         ECORE_MSG(sc, "%s bin %d\n",
2867                   ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2868                    "Setting"  : "Clearing"), bin);
2869
2870         data->rules[idx].bin_id    = (uint8_t)bin;
2871         data->rules[idx].func_id   = func_id;
2872         data->rules[idx].engine_id = o->engine_id;
2873 }
2874
2875 /**
2876  * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2877  *
2878  * @sc:         device handle
2879  * @o:
2880  * @start_bin:  index in the registry to start from (including)
2881  * @rdata_idx:  index in the ramrod data to start from
2882  *
2883  * returns last handled bin index or -1 if all bins have been handled
2884  */
2885 static inline int ecore_mcast_handle_restore_cmd_e2(
2886         struct bxe_adapter *sc, struct ecore_mcast_obj *o , int start_bin,
2887         int *rdata_idx)
2888 {
2889         int cur_bin, cnt = *rdata_idx;
2890         union ecore_mcast_config_data cfg_data = {NULL};
2891
2892         /* go through the registry and configure the bins from it */
2893         for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2894             cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2895
2896                 cfg_data.bin = (uint8_t)cur_bin;
2897                 o->set_one_rule(sc, o, cnt, &cfg_data,
2898                                 ECORE_MCAST_CMD_RESTORE);
2899
2900                 cnt++;
2901
2902                 ECORE_MSG(sc, "About to configure a bin %d\n", cur_bin);
2903
2904                 /* Break if we reached the maximum number
2905                  * of rules.
2906                  */
2907                 if (cnt >= o->max_cmd_len)
2908                         break;
2909         }
2910
2911         *rdata_idx = cnt;
2912
2913         return cur_bin;
2914 }
2915
2916 static inline void ecore_mcast_hdl_pending_add_e2(struct bxe_adapter *sc,
2917         struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2918         int *line_idx)
2919 {
2920         struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2921         int cnt = *line_idx;
2922         union ecore_mcast_config_data cfg_data = {NULL};
2923
2924         ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2925                 &cmd_pos->data.macs_head, link, struct ecore_mcast_mac_elem) {
2926
2927                 cfg_data.mac = &pmac_pos->mac[0];
2928                 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2929
2930                 cnt++;
2931
2932                 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
2933                           pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2934
2935                 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2936                                         &cmd_pos->data.macs_head);
2937
2938                 /* Break if we reached the maximum number
2939                  * of rules.
2940                  */
2941                 if (cnt >= o->max_cmd_len)
2942                         break;
2943         }
2944
2945         *line_idx = cnt;
2946
2947         /* if no more MACs to configure - we are done */
2948         if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2949                 cmd_pos->done = TRUE;
2950 }
2951
2952 static inline void ecore_mcast_hdl_pending_del_e2(struct bxe_adapter *sc,
2953         struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2954         int *line_idx)
2955 {
2956         int cnt = *line_idx;
2957
2958         while (cmd_pos->data.macs_num) {
2959                 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2960
2961                 cnt++;
2962
2963                 cmd_pos->data.macs_num--;
2964
2965                   ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d\n",
2966                                   cmd_pos->data.macs_num, cnt);
2967
2968                 /* Break if we reached the maximum
2969                  * number of rules.
2970                  */
2971                 if (cnt >= o->max_cmd_len)
2972                         break;
2973         }
2974
2975         *line_idx = cnt;
2976
2977         /* If we cleared all bins - we are done */
2978         if (!cmd_pos->data.macs_num)
2979                 cmd_pos->done = TRUE;
2980 }
2981
2982 static inline void ecore_mcast_hdl_pending_restore_e2(struct bxe_adapter *sc,
2983         struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2984         int *line_idx)
2985 {
2986         cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2987                                                 line_idx);
2988
2989         if (cmd_pos->data.next_bin < 0)
2990                 /* If o->set_restore returned -1 we are done */
2991                 cmd_pos->done = TRUE;
2992         else
2993                 /* Start from the next bin next time */
2994                 cmd_pos->data.next_bin++;
2995 }
2996
2997 static inline int ecore_mcast_handle_pending_cmds_e2(struct bxe_adapter *sc,
2998                                 struct ecore_mcast_ramrod_params *p)
2999 {
3000         struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
3001         int cnt = 0;
3002         struct ecore_mcast_obj *o = p->mcast_obj;
3003
3004         ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
3005                 &o->pending_cmds_head, link, struct ecore_pending_mcast_cmd) {
3006                 switch (cmd_pos->type) {
3007                 case ECORE_MCAST_CMD_ADD:
3008                         ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
3009                         break;
3010
3011                 case ECORE_MCAST_CMD_DEL:
3012                         ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
3013                         break;
3014
3015                 case ECORE_MCAST_CMD_RESTORE:
3016                         ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
3017                                                            &cnt);
3018                         break;
3019
3020                 default:
3021                         ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3022                         return ECORE_INVAL;
3023                 }
3024
3025                 /* If the command has been completed - remove it from the list
3026                  * and free the memory
3027                  */
3028                 if (cmd_pos->done) {
3029                         ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
3030                                                 &o->pending_cmds_head);
3031                         ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3032                 }
3033
3034                 /* Break if we reached the maximum number of rules */
3035                 if (cnt >= o->max_cmd_len)
3036                         break;
3037         }
3038
3039         return cnt;
3040 }
3041
3042 static inline void ecore_mcast_hdl_add(struct bxe_adapter *sc,
3043         struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3044         int *line_idx)
3045 {
3046         struct ecore_mcast_list_elem *mlist_pos;
3047         union ecore_mcast_config_data cfg_data = {NULL};
3048         int cnt = *line_idx;
3049
3050         ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3051                                   struct ecore_mcast_list_elem) {
3052                 cfg_data.mac = mlist_pos->mac;
3053                 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
3054
3055                 cnt++;
3056
3057                 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3058                           mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
3059         }
3060
3061         *line_idx = cnt;
3062 }
3063
3064 static inline void ecore_mcast_hdl_del(struct bxe_adapter *sc,
3065         struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3066         int *line_idx)
3067 {
3068         int cnt = *line_idx, i;
3069
3070         for (i = 0; i < p->mcast_list_len; i++) {
3071                 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
3072
3073                 cnt++;
3074
3075                 ECORE_MSG(sc, "Deleting MAC. %d left\n",
3076                           p->mcast_list_len - i - 1);
3077         }
3078
3079         *line_idx = cnt;
3080 }
3081
3082 /**
3083  * ecore_mcast_handle_current_cmd -
3084  *
3085  * @sc:         device handle
3086  * @p:
3087  * @cmd:
3088  * @start_cnt:  first line in the ramrod data that may be used
3089  *
3090  * This function is called iff there is enough place for the current command in
3091  * the ramrod data.
3092  * Returns number of lines filled in the ramrod data in total.
3093  */
3094 static inline int ecore_mcast_handle_current_cmd(struct bxe_adapter *sc,
3095                         struct ecore_mcast_ramrod_params *p,
3096                         enum ecore_mcast_cmd cmd,
3097                         int start_cnt)
3098 {
3099         struct ecore_mcast_obj *o = p->mcast_obj;
3100         int cnt = start_cnt;
3101
3102         ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3103
3104         switch (cmd) {
3105         case ECORE_MCAST_CMD_ADD:
3106                 ecore_mcast_hdl_add(sc, o, p, &cnt);
3107                 break;
3108
3109         case ECORE_MCAST_CMD_DEL:
3110                 ecore_mcast_hdl_del(sc, o, p, &cnt);
3111                 break;
3112
3113         case ECORE_MCAST_CMD_RESTORE:
3114                 o->hdl_restore(sc, o, 0, &cnt);
3115                 break;
3116
3117         default:
3118                 ECORE_ERR("Unknown command: %d\n", cmd);
3119                 return ECORE_INVAL;
3120         }
3121
3122         /* The current command has been handled */
3123         p->mcast_list_len = 0;
3124
3125         return cnt;
3126 }
3127
3128 static int ecore_mcast_validate_e2(struct bxe_adapter *sc,
3129                                    struct ecore_mcast_ramrod_params *p,
3130                                    enum ecore_mcast_cmd cmd)
3131 {
3132         struct ecore_mcast_obj *o = p->mcast_obj;
3133         int reg_sz = o->get_registry_size(o);
3134
3135         switch (cmd) {
3136         /* DEL command deletes all currently configured MACs */
3137         case ECORE_MCAST_CMD_DEL:
3138                 o->set_registry_size(o, 0);
3139                 /* Don't break */
3140
3141         /* RESTORE command will restore the entire multicast configuration */
3142         case ECORE_MCAST_CMD_RESTORE:
3143                 /* Here we set the approximate amount of work to do, which in
3144                  * fact may be only less as some MACs in postponed ADD
3145                  * command(s) scheduled before this command may fall into
3146                  * the same bin and the actual number of bins set in the
3147                  * registry would be less than we estimated here. See
3148                  * ecore_mcast_set_one_rule_e2() for further details.
3149                  */
3150                 p->mcast_list_len = reg_sz;
3151                 break;
3152
3153         case ECORE_MCAST_CMD_ADD:
3154         case ECORE_MCAST_CMD_CONT:
3155                 /* Here we assume that all new MACs will fall into new bins.
3156                  * However we will correct the real registry size after we
3157                  * handle all pending commands.
3158                  */
3159                 o->set_registry_size(o, reg_sz + p->mcast_list_len);
3160                 break;
3161
3162         default:
3163                 ECORE_ERR("Unknown command: %d\n", cmd);
3164                 return ECORE_INVAL;
3165         }
3166
3167         /* Increase the total number of MACs pending to be configured */
3168         o->total_pending_num += p->mcast_list_len;
3169
3170         return ECORE_SUCCESS;
3171 }
3172
3173 static void ecore_mcast_revert_e2(struct bxe_adapter *sc,
3174                                       struct ecore_mcast_ramrod_params *p,
3175                                       int old_num_bins)
3176 {
3177         struct ecore_mcast_obj *o = p->mcast_obj;
3178
3179         o->set_registry_size(o, old_num_bins);
3180         o->total_pending_num -= p->mcast_list_len;
3181 }
3182
3183 /**
3184  * ecore_mcast_set_rdata_hdr_e2 - sets a header values
3185  *
3186  * @sc:         device handle
3187  * @p:
3188  * @len:        number of rules to handle
3189  */
3190 static inline void ecore_mcast_set_rdata_hdr_e2(struct bxe_adapter *sc,
3191                                         struct ecore_mcast_ramrod_params *p,
3192                                         uint8_t len)
3193 {
3194         struct ecore_raw_obj *r = &p->mcast_obj->raw;
3195         struct eth_multicast_rules_ramrod_data *data =
3196                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
3197
3198         data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3199                                         (ECORE_FILTER_MCAST_PENDING <<
3200                                          ECORE_SWCID_SHIFT));
3201         data->header.rule_cnt = len;
3202 }
3203
3204 /**
3205  * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3206  *
3207  * @sc:         device handle
3208  * @o:
3209  *
3210  * Recalculate the actual number of set bins in the registry using Brian
3211  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3212  *
3213  * returns 0 for the compliance with ecore_mcast_refresh_registry_e1().
3214  */
3215 static inline int ecore_mcast_refresh_registry_e2(struct bxe_adapter *sc,
3216                                                   struct ecore_mcast_obj *o)
3217 {
3218         int i, cnt = 0;
3219         uint64_t elem;
3220
3221         for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
3222                 elem = o->registry.aprox_match.vec[i];
3223                 for (; elem; cnt++)
3224                         elem &= elem - 1;
3225         }
3226
3227         o->set_registry_size(o, cnt);
3228
3229         return ECORE_SUCCESS;
3230 }
3231
3232 static int ecore_mcast_setup_e2(struct bxe_adapter *sc,
3233                                 struct ecore_mcast_ramrod_params *p,
3234                                 enum ecore_mcast_cmd cmd)
3235 {
3236         struct ecore_raw_obj *raw = &p->mcast_obj->raw;
3237         struct ecore_mcast_obj *o = p->mcast_obj;
3238         struct eth_multicast_rules_ramrod_data *data =
3239                 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3240         int cnt = 0, rc;
3241
3242         /* Reset the ramrod data buffer */
3243         ECORE_MEMSET(data, 0, sizeof(*data));
3244
3245         cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
3246
3247         /* If there are no more pending commands - clear SCHEDULED state */
3248         if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3249                 o->clear_sched(o);
3250
3251         /* The below may be TRUE iff there was enough room in ramrod
3252          * data for all pending commands and for the current
3253          * command. Otherwise the current command would have been added
3254          * to the pending commands and p->mcast_list_len would have been
3255          * zeroed.
3256          */
3257         if (p->mcast_list_len > 0)
3258                 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
3259
3260         /* We've pulled out some MACs - update the total number of
3261          * outstanding.
3262          */
3263         o->total_pending_num -= cnt;
3264
3265         /* send a ramrod */
3266         ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
3267         ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3268
3269         ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t)cnt);
3270
3271         /* Update a registry size if there are no more pending operations.
3272          *
3273          * We don't want to change the value of the registry size if there are
3274          * pending operations because we want it to always be equal to the
3275          * exact or the approximate number (see ecore_mcast_validate_e2()) of
3276          * set bins after the last requested operation in order to properly
3277          * evaluate the size of the next DEL/RESTORE operation.
3278          *
3279          * Note that we update the registry itself during command(s) handling
3280          * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
3281          * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3282          * with a limited amount of update commands (per MAC/bin) and we don't
3283          * know in this scope what the actual state of bins configuration is
3284          * going to be after this ramrod.
3285          */
3286         if (!o->total_pending_num)
3287                 ecore_mcast_refresh_registry_e2(sc, o);
3288
3289         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3290          * RAMROD_PENDING status immediately.
3291          */
3292         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3293                 raw->clear_pending(raw);
3294                 return ECORE_SUCCESS;
3295         } else {
3296                 /* No need for an explicit memory barrier here as long we would
3297                  * need to ensure the ordering of writing to the SPQ element
3298                  * and updating of the SPQ producer which involves a memory
3299                  * read and we will have to put a full memory barrier there
3300                  * (inside ecore_sp_post()).
3301                  */
3302
3303                 /* Send a ramrod */
3304                 rc = ecore_sp_post( sc,
3305                                     RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3306                                     raw->cid,
3307                                     raw->rdata_mapping,
3308                                     ETH_CONNECTION_TYPE);
3309                 if (rc)
3310                         return rc;
3311
3312                 /* Ramrod completion is pending */
3313                 return ECORE_PENDING;
3314         }
3315 }
3316
3317 static int ecore_mcast_validate_e1h(struct bxe_adapter *sc,
3318                                     struct ecore_mcast_ramrod_params *p,
3319                                     enum ecore_mcast_cmd cmd)
3320 {
3321         /* Mark, that there is a work to do */
3322         if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
3323                 p->mcast_list_len = 1;
3324
3325         return ECORE_SUCCESS;
3326 }
3327
3328 static void ecore_mcast_revert_e1h(struct bxe_adapter *sc,
3329                                        struct ecore_mcast_ramrod_params *p,
3330                                        int old_num_bins)
3331 {
3332         /* Do nothing */
3333 }
3334
3335 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
3336 do { \
3337         (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3338 } while (0)
3339
3340 static inline void ecore_mcast_hdl_add_e1h(struct bxe_adapter *sc,
3341                                            struct ecore_mcast_obj *o,
3342                                            struct ecore_mcast_ramrod_params *p,
3343                                            uint32_t *mc_filter)
3344 {
3345         struct ecore_mcast_list_elem *mlist_pos;
3346         int bit;
3347
3348         ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3349                                   struct ecore_mcast_list_elem) {
3350                 bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
3351                 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3352
3353                 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d\n",
3354                           mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], bit);
3355
3356                 /* bookkeeping... */
3357                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3358                                   bit);
3359         }
3360 }
3361
3362 static inline void ecore_mcast_hdl_restore_e1h(struct bxe_adapter *sc,
3363         struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3364         uint32_t *mc_filter)
3365 {
3366         int bit;
3367
3368         for (bit = ecore_mcast_get_next_bin(o, 0);
3369              bit >= 0;
3370              bit = ecore_mcast_get_next_bin(o, bit + 1)) {
3371                 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3372                 ECORE_MSG(sc, "About to set bin %d\n", bit);
3373         }
3374 }
3375
3376 /* On 57711 we write the multicast MACs' approximate match
3377  * table by directly into the TSTORM's internal RAM. So we don't
3378  * really need to handle any tricks to make it work.
3379  */
3380 static int ecore_mcast_setup_e1h(struct bxe_adapter *sc,
3381                                  struct ecore_mcast_ramrod_params *p,
3382                                  enum ecore_mcast_cmd cmd)
3383 {
3384         int i;
3385         struct ecore_mcast_obj *o = p->mcast_obj;
3386         struct ecore_raw_obj *r = &o->raw;
3387
3388         /* If CLEAR_ONLY has been requested - clear the registry
3389          * and clear a pending bit.
3390          */
3391         if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3392                 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = {0};
3393
3394                 /* Set the multicast filter bits before writing it into
3395                  * the internal memory.
3396                  */
3397                 switch (cmd) {
3398                 case ECORE_MCAST_CMD_ADD:
3399                         ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
3400                         break;
3401
3402                 case ECORE_MCAST_CMD_DEL:
3403                         ECORE_MSG(sc,
3404                                   "Invalidating multicast MACs configuration\n");
3405
3406                         /* clear the registry */
3407                         ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3408                                sizeof(o->registry.aprox_match.vec));
3409                         break;
3410
3411                 case ECORE_MCAST_CMD_RESTORE:
3412                         ecore_mcast_hdl_restore_e1h(sc, o, p, mc_filter);
3413                         break;
3414
3415                 default:
3416                         ECORE_ERR("Unknown command: %d\n", cmd);
3417                         return ECORE_INVAL;
3418                 }
3419
3420                 /* Set the mcast filter in the internal memory */
3421                 for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3422                         REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3423         } else
3424                 /* clear the registry */
3425                 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3426                        sizeof(o->registry.aprox_match.vec));
3427
3428         /* We are done */
3429         r->clear_pending(r);
3430
3431         return ECORE_SUCCESS;
3432 }
3433
3434 static int ecore_mcast_validate_e1(struct bxe_adapter *sc,
3435                                    struct ecore_mcast_ramrod_params *p,
3436                                    enum ecore_mcast_cmd cmd)
3437 {
3438         struct ecore_mcast_obj *o = p->mcast_obj;
3439         int reg_sz = o->get_registry_size(o);
3440
3441         switch (cmd) {
3442         /* DEL command deletes all currently configured MACs */
3443         case ECORE_MCAST_CMD_DEL:
3444                 o->set_registry_size(o, 0);
3445                 /* Don't break */
3446
3447         /* RESTORE command will restore the entire multicast configuration */
3448         case ECORE_MCAST_CMD_RESTORE:
3449                 p->mcast_list_len = reg_sz;
3450                   ECORE_MSG(sc, "Command %d, p->mcast_list_len=%d\n",
3451                                   cmd, p->mcast_list_len);
3452                 break;
3453
3454         case ECORE_MCAST_CMD_ADD:
3455         case ECORE_MCAST_CMD_CONT:
3456                 /* Multicast MACs on 57710 are configured as unicast MACs and
3457                  * there is only a limited number of CAM entries for that
3458                  * matter.
3459                  */
3460                 if (p->mcast_list_len > o->max_cmd_len) {
3461                         ECORE_ERR("Can't configure more than %d multicast MACs on 57710\n",
3462                                   o->max_cmd_len);
3463                         return ECORE_INVAL;
3464                 }
3465                 /* Every configured MAC should be cleared if DEL command is
3466                  * called. Only the last ADD command is relevant as long as
3467                  * every ADD commands overrides the previous configuration.
3468                  */
3469                 ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3470                 if (p->mcast_list_len > 0)
3471                         o->set_registry_size(o, p->mcast_list_len);
3472
3473                 break;
3474
3475         default:
3476                 ECORE_ERR("Unknown command: %d\n", cmd);
3477                 return ECORE_INVAL;
3478         }
3479
3480         /* We want to ensure that commands are executed one by one for 57710.
3481          * Therefore each none-empty command will consume o->max_cmd_len.
3482          */