3892d9f8a7db84b7c7e27027056fd86d1468eb3b
[akaros.git] / kern / drivers / net / mlx4 / en_netdev.c
1 /*
2  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33
34 #include <linux_compat.h>
35 #include <linux/mlx4/driver.h>
36 #include <linux/mlx4/device.h>
37 #include <linux/mlx4/cmd.h>
38 #include <linux/mlx4/cq.h>
39
40 #include "mlx4_en.h"
41 #include "en_port.h"
42
43 int mlx4_en_setup_tc(struct ether *dev, uint8_t up)
44 {
45         panic("Disabled");
46 #if 0 // AKAROS_PORT
47         struct mlx4_en_priv *priv = netdev_priv(dev);
48         int i;
49         unsigned int offset = 0;
50
51         if (up && up != MLX4_EN_NUM_UP)
52                 return -EINVAL;
53
54         netdev_set_num_tc(dev, up);
55
56         /* Partition Tx queues evenly amongst UP's */
57         for (i = 0; i < up; i++) {
58                 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
59                 offset += priv->num_tx_rings_p_up;
60         }
61
62         return 0;
63 #endif
64 }
65
66 #ifdef CONFIG_NET_RX_BUSY_POLL
67 /* must be called with local_bh_disable()d */
68 static int mlx4_en_low_latency_recv(struct napi_struct *napi)
69 {
70         struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
71         struct ether *dev = cq->dev;
72         struct mlx4_en_priv *priv = netdev_priv(dev);
73         struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
74         int done;
75
76         if (!priv->port_up)
77                 return LL_FLUSH_FAILED;
78
79         if (!mlx4_en_cq_lock_poll(cq))
80                 return LL_FLUSH_BUSY;
81
82         done = mlx4_en_process_rx_cq(dev, cq, 4);
83         if (likely(done))
84                 rx_ring->cleaned += done;
85         else
86                 rx_ring->misses++;
87
88         mlx4_en_cq_unlock_poll(cq);
89
90         return done;
91 }
92 #endif  /* CONFIG_NET_RX_BUSY_POLL */
93
94 #ifdef CONFIG_RFS_ACCEL
95
96 struct mlx4_en_filter {
97         struct list_head next;
98         struct work_struct work;
99
100         uint8_t     ip_proto;
101         __be32 src_ip;
102         __be32 dst_ip;
103         __be16 src_port;
104         __be16 dst_port;
105
106         int rxq_index;
107         struct mlx4_en_priv *priv;
108         uint32_t flow_id;                       /* RFS infrastructure id */
109         int id;                         /* mlx4_en driver id */
110         uint64_t reg_id;                        /* Flow steering API id */
111         uint8_t activated;                      /* Used to prevent expiry before filter
112                                          * is attached
113                                          */
114         struct hlist_node filter_chain;
115 };
116
117 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
118
119 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(uint8_t ip_proto)
120 {
121         switch (ip_proto) {
122         case IPPROTO_UDP:
123                 return MLX4_NET_TRANS_RULE_ID_UDP;
124         case IPPROTO_TCP:
125                 return MLX4_NET_TRANS_RULE_ID_TCP;
126         default:
127                 return MLX4_NET_TRANS_RULE_NUM;
128         }
129 };
130
131 static void mlx4_en_filter_work(struct work_struct *work)
132 {
133         struct mlx4_en_filter *filter = container_of(work,
134                                                      struct mlx4_en_filter,
135                                                      work);
136         struct mlx4_en_priv *priv = filter->priv;
137         struct mlx4_spec_list spec_tcp_udp = {
138                 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
139                 {
140                         .tcp_udp = {
141                                 .dst_port = filter->dst_port,
142                                 .dst_port_msk = (__force __be16)-1,
143                                 .src_port = filter->src_port,
144                                 .src_port_msk = (__force __be16)-1,
145                         },
146                 },
147         };
148         struct mlx4_spec_list spec_ip = {
149                 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
150                 {
151                         .ipv4 = {
152                                 .dst_ip = filter->dst_ip,
153                                 .dst_ip_msk = (__force __be32)-1,
154                                 .src_ip = filter->src_ip,
155                                 .src_ip_msk = (__force __be32)-1,
156                         },
157                 },
158         };
159         struct mlx4_spec_list spec_eth = {
160                 .id = MLX4_NET_TRANS_RULE_ID_ETH,
161         };
162         struct mlx4_net_trans_rule rule = {
163                 .list = LIST_HEAD_INIT(rule.list),
164                 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
165                 .exclusive = 1,
166                 .allow_loopback = 1,
167                 .promisc_mode = MLX4_FS_REGULAR,
168                 .port = priv->port,
169                 .priority = MLX4_DOMAIN_RFS,
170         };
171         int rc;
172         __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
173
174         if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
175                 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
176                         filter->ip_proto);
177                 goto ignore;
178         }
179         list_add_tail(&spec_eth.list, &rule.list);
180         list_add_tail(&spec_ip.list, &rule.list);
181         list_add_tail(&spec_tcp_udp.list, &rule.list);
182
183         rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
184         memcpy(spec_eth.eth.dst_mac, priv->dev->ea, Eaddrlen);
185         memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, Eaddrlen);
186
187         filter->activated = 0;
188
189         if (filter->reg_id) {
190                 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
191                 if (rc && rc != -ENOENT)
192                         en_err(priv, "Error detaching flow. rc = %d\n", rc);
193         }
194
195         rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
196         if (rc)
197                 en_err(priv, "Error attaching flow. err = %d\n", rc);
198
199 ignore:
200         mlx4_en_filter_rfs_expire(priv);
201
202         filter->activated = 1;
203 }
204
205 static inline struct hlist_head *
206 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
207                    __be16 src_port, __be16 dst_port)
208 {
209         unsigned long l;
210         int bucket_idx;
211
212         l = (__force unsigned long)src_port |
213             ((__force unsigned long)dst_port << 2);
214         l ^= (__force unsigned long)(src_ip ^ dst_ip);
215
216         bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
217
218         return &priv->filter_hash[bucket_idx];
219 }
220
221 static struct mlx4_en_filter *
222 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
223                      __be32 dst_ip, uint8_t ip_proto, __be16 src_port,
224                      __be16 dst_port, uint32_t flow_id)
225 {
226         struct mlx4_en_filter *filter = NULL;
227
228         filter = kzmalloc(sizeof(struct mlx4_en_filter), 0);
229         if (!filter)
230                 return NULL;
231
232         filter->priv = priv;
233         filter->rxq_index = rxq_index;
234         INIT_WORK(&filter->work, mlx4_en_filter_work);
235
236         filter->src_ip = src_ip;
237         filter->dst_ip = dst_ip;
238         filter->ip_proto = ip_proto;
239         filter->src_port = src_port;
240         filter->dst_port = dst_port;
241
242         filter->flow_id = flow_id;
243
244         filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
245
246         list_add_tail(&filter->next, &priv->filters);
247         hlist_add_head(&filter->filter_chain,
248                        filter_hash_bucket(priv, src_ip, dst_ip, src_port,
249                                           dst_port));
250
251         return filter;
252 }
253
254 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
255 {
256         struct mlx4_en_priv *priv = filter->priv;
257         int rc;
258
259         list_del(&filter->next);
260
261         rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
262         if (rc && rc != -ENOENT)
263                 en_err(priv, "Error detaching flow. rc = %d\n", rc);
264
265         kfree(filter);
266 }
267
268 static inline struct mlx4_en_filter *
269 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
270                     uint8_t ip_proto, __be16 src_port, __be16 dst_port)
271 {
272         struct mlx4_en_filter *filter;
273         struct mlx4_en_filter *ret = NULL;
274
275         hlist_for_each_entry(filter,
276                              filter_hash_bucket(priv, src_ip, dst_ip,
277                                                 src_port, dst_port),
278                              filter_chain) {
279                 if (filter->src_ip == src_ip &&
280                     filter->dst_ip == dst_ip &&
281                     filter->ip_proto == ip_proto &&
282                     filter->src_port == src_port &&
283                     filter->dst_port == dst_port) {
284                         ret = filter;
285                         break;
286                 }
287         }
288
289         return ret;
290 }
291
292 static int
293 mlx4_en_filter_rfs(struct ether *net_dev, const struct sk_buff *skb,
294                    uint16_t rxq_index, uint32_t flow_id)
295 {
296         struct mlx4_en_priv *priv = netdev_priv(net_dev);
297         struct mlx4_en_filter *filter;
298         const struct iphdr *ip;
299         const __be16 *ports;
300         uint8_t ip_proto;
301         __be32 src_ip;
302         __be32 dst_ip;
303         __be16 src_port;
304         __be16 dst_port;
305         int nhoff = skb_network_offset(skb);
306         int ret = 0;
307
308         if (skb->protocol != cpu_to_be16(ETH_P_IP))
309                 return -EPROTONOSUPPORT;
310
311         ip = (const struct iphdr *)(skb->data + nhoff);
312         if (ip_is_fragment(ip))
313                 return -EPROTONOSUPPORT;
314
315         if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
316                 return -EPROTONOSUPPORT;
317         ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
318
319         ip_proto = ip->protocol;
320         src_ip = ip->saddr;
321         dst_ip = ip->daddr;
322         src_port = ports[0];
323         dst_port = ports[1];
324
325         spin_lock(&priv->filters_lock);
326         filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
327                                      src_port, dst_port);
328         if (filter) {
329                 if (filter->rxq_index == rxq_index)
330                         goto out;
331
332                 filter->rxq_index = rxq_index;
333         } else {
334                 filter = mlx4_en_filter_alloc(priv, rxq_index,
335                                               src_ip, dst_ip, ip_proto,
336                                               src_port, dst_port, flow_id);
337                 if (!filter) {
338                         ret = -ENOMEM;
339                         goto err;
340                 }
341         }
342
343         queue_work(priv->mdev->workqueue, &filter->work);
344
345 out:
346         ret = filter->id;
347 err:
348         spin_unlock(&priv->filters_lock);
349
350         return ret;
351 }
352
353 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
354 {
355         struct mlx4_en_filter *filter, *tmp;
356         LIST_HEAD(del_list);
357
358         spin_lock(&priv->filters_lock);
359         list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
360                 list_move(&filter->next, &del_list);
361                 hlist_del(&filter->filter_chain);
362         }
363         spin_unlock(&priv->filters_lock);
364
365         list_for_each_entry_safe(filter, tmp, &del_list, next) {
366                 cancel_work_sync(&filter->work);
367                 mlx4_en_filter_free(filter);
368         }
369 }
370
371 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
372 {
373         struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
374         LIST_HEAD(del_list);
375         int i = 0;
376
377         spin_lock(&priv->filters_lock);
378         list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
379                 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
380                         break;
381
382                 if (filter->activated &&
383                     !work_pending(&filter->work) &&
384                     rps_may_expire_flow(priv->dev,
385                                         filter->rxq_index, filter->flow_id,
386                                         filter->id)) {
387                         list_move(&filter->next, &del_list);
388                         hlist_del(&filter->filter_chain);
389                 } else
390                         last_filter = filter;
391
392                 i++;
393         }
394
395         if (last_filter && (&last_filter->next != priv->filters.next))
396                 list_move(&priv->filters, &last_filter->next);
397
398         spin_unlock(&priv->filters_lock);
399
400         list_for_each_entry_safe(filter, tmp, &del_list, next)
401                 mlx4_en_filter_free(filter);
402 }
403 #endif
404
405 static int mlx4_en_vlan_rx_add_vid(struct ether *dev,
406                                    __be16 proto, uint16_t vid)
407 {
408         struct mlx4_en_priv *priv = netdev_priv(dev);
409         struct mlx4_en_dev *mdev = priv->mdev;
410         int err;
411         int idx;
412
413         en_dbg(HW, priv, "adding VLAN:%d\n", vid);
414
415         set_bit(vid, priv->active_vlans);
416
417         /* Add VID to port VLAN filter */
418         qlock(&mdev->state_lock);
419         if (mdev->device_up && priv->port_up) {
420                 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
421                 if (err)
422                         en_err(priv, "Failed configuring VLAN filter\n");
423         }
424         if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
425                 en_dbg(HW, priv, "failed adding vlan %d\n", vid);
426         qunlock(&mdev->state_lock);
427
428         return 0;
429 }
430
431 static int mlx4_en_vlan_rx_kill_vid(struct ether *dev,
432                                     __be16 proto, uint16_t vid)
433 {
434         struct mlx4_en_priv *priv = netdev_priv(dev);
435         struct mlx4_en_dev *mdev = priv->mdev;
436         int err;
437
438         en_dbg(HW, priv, "Killing VID:%d\n", vid);
439
440         clear_bit(vid, priv->active_vlans);
441
442         /* Remove VID from port VLAN filter */
443         qlock(&mdev->state_lock);
444         mlx4_unregister_vlan(mdev->dev, priv->port, vid);
445
446         if (mdev->device_up && priv->port_up) {
447                 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
448                 if (err)
449                         en_err(priv, "Failed configuring VLAN filter\n");
450         }
451         qunlock(&mdev->state_lock);
452
453         return 0;
454 }
455
456 static void mlx4_en_u64_to_mac(unsigned char dst_mac[Eaddrlen + 2],
457                                uint64_t src_mac)
458 {
459         int i;
460         for (i = Eaddrlen - 1; i >= 0; --i) {
461                 dst_mac[i] = src_mac & 0xff;
462                 src_mac >>= 8;
463         }
464         memset(&dst_mac[Eaddrlen], 0, 2);
465 }
466
467
468 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
469                                     int qpn, uint64_t *reg_id)
470 {
471         int err;
472
473         if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
474             priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
475                 return 0; /* do nothing */
476
477         err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
478                                     MLX4_DOMAIN_NIC, reg_id);
479         if (err) {
480                 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
481                 return err;
482         }
483         en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
484         return 0;
485 }
486
487
488 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
489                                 unsigned char *mac, int *qpn,
490                                 uint64_t *reg_id)
491 {
492         struct mlx4_en_dev *mdev = priv->mdev;
493         struct mlx4_dev *dev = mdev->dev;
494         int err;
495
496         switch (dev->caps.steering_mode) {
497         case MLX4_STEERING_MODE_B0: {
498                 struct mlx4_qp qp;
499                 uint8_t gid[16] = {0};
500
501                 qp.qpn = *qpn;
502                 memcpy(&gid[10], mac, Eaddrlen);
503                 gid[5] = priv->port;
504
505                 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
506                 break;
507         }
508         case MLX4_STEERING_MODE_DEVICE_MANAGED: {
509                 struct mlx4_spec_list spec_eth = { {NULL} };
510                 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
511
512                 struct mlx4_net_trans_rule rule = {
513                         .queue_mode = MLX4_NET_TRANS_Q_FIFO,
514                         .exclusive = 0,
515                         .allow_loopback = 1,
516                         .promisc_mode = MLX4_FS_REGULAR,
517                         .priority = MLX4_DOMAIN_NIC,
518                 };
519
520                 rule.port = priv->port;
521                 rule.qpn = *qpn;
522                 INIT_LIST_HEAD(&rule.list);
523
524                 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
525                 memcpy(spec_eth.eth.dst_mac, mac, Eaddrlen);
526                 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, Eaddrlen);
527                 list_add_tail(&spec_eth.list, &rule.list);
528
529                 err = mlx4_flow_attach(dev, &rule, reg_id);
530                 break;
531         }
532         default:
533                 return -EINVAL;
534         }
535         if (err)
536                 en_warn(priv, "Failed Attaching Unicast\n");
537
538         return err;
539 }
540
541 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
542                                      unsigned char *mac, int qpn,
543                                      uint64_t reg_id)
544 {
545         struct mlx4_en_dev *mdev = priv->mdev;
546         struct mlx4_dev *dev = mdev->dev;
547
548         switch (dev->caps.steering_mode) {
549         case MLX4_STEERING_MODE_B0: {
550                 struct mlx4_qp qp;
551                 uint8_t gid[16] = {0};
552
553                 qp.qpn = qpn;
554                 memcpy(&gid[10], mac, Eaddrlen);
555                 gid[5] = priv->port;
556
557                 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
558                 break;
559         }
560         case MLX4_STEERING_MODE_DEVICE_MANAGED: {
561                 mlx4_flow_detach(dev, reg_id);
562                 break;
563         }
564         default:
565                 en_err(priv, "Invalid steering mode.\n");
566         }
567 }
568
569 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
570 {
571         struct mlx4_en_dev *mdev = priv->mdev;
572         struct mlx4_dev *dev = mdev->dev;
573         struct mlx4_mac_entry *entry;
574         int index = 0;
575         int err = 0;
576         uint64_t reg_id = 0;
577         int *qpn = &priv->base_qpn;
578         uint64_t mac = mlx4_mac_to_u64(priv->dev->ea);
579
580 #if 0 // AKAROS_PORT
581         en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
582                priv->dev->ea);
583 #else
584         en_dbg(DRV, priv, "Registering MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
585                priv->dev->ea[0], priv->dev->ea[1], priv->dev->ea[2],
586                priv->dev->ea[3], priv->dev->ea[4], priv->dev->ea[5]);
587 #endif
588         index = mlx4_register_mac(dev, priv->port, mac);
589         if (index < 0) {
590                 err = index;
591                 en_err(priv, "Failed adding MAC: %pM\n",
592                        priv->dev->ea);
593                 return err;
594         }
595
596         if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
597                 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
598                 *qpn = base_qpn + index;
599                 return 0;
600         }
601
602         err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
603         en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
604         if (err) {
605                 en_err(priv, "Failed to reserve qp for mac registration\n");
606                 goto qp_err;
607         }
608
609         err = mlx4_en_uc_steer_add(priv, priv->dev->ea, qpn, &reg_id);
610         if (err)
611                 goto steer_err;
612
613 #if 0 // AKAROS_PORT
614         err = mlx4_en_tunnel_steer_add(priv, priv->dev->ea, *qpn,
615                                        &priv->tunnel_reg_id);
616         if (err)
617                 goto tunnel_err;
618 #endif
619
620         entry = kmalloc(sizeof(*entry), MEM_WAIT);
621         if (!entry) {
622                 err = -ENOMEM;
623                 goto alloc_err;
624         }
625         memcpy(entry->mac, priv->dev->ea, sizeof(entry->mac));
626         memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
627         entry->reg_id = reg_id;
628
629 #if 0 // AKAROS_PORT
630         hlist_add_head_rcu(&entry->hlist,
631                            &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
632 #else
633         hlist_add_head(&entry->hlist,
634                        &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
635         /* FIXME why is rcu significant? */
636 #endif
637
638         return 0;
639
640 alloc_err:
641         if (priv->tunnel_reg_id)
642                 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
643 #if 0 // AKAROS_PORT
644 tunnel_err:
645         mlx4_en_uc_steer_release(priv, priv->dev->ea, *qpn, reg_id);
646 #endif
647
648 steer_err:
649         mlx4_qp_release_range(dev, *qpn, 1);
650
651 qp_err:
652         mlx4_unregister_mac(dev, priv->port, mac);
653         return err;
654 }
655
656 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
657 {
658         panic("Disabled");
659 #if 0 // AKAROS_PORT
660         struct mlx4_en_dev *mdev = priv->mdev;
661         struct mlx4_dev *dev = mdev->dev;
662         int qpn = priv->base_qpn;
663         uint64_t mac;
664
665         if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
666                 mac = mlx4_mac_to_u64(priv->dev->ea);
667                 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
668                        priv->dev->ea);
669                 mlx4_unregister_mac(dev, priv->port, mac);
670         } else {
671                 struct mlx4_mac_entry *entry;
672                 struct hlist_node *tmp;
673                 struct hlist_head *bucket;
674                 unsigned int i;
675
676                 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
677                         bucket = &priv->mac_hash[i];
678                         hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
679                                 mac = mlx4_mac_to_u64(entry->mac);
680                                 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
681                                        entry->mac);
682                                 mlx4_en_uc_steer_release(priv, entry->mac,
683                                                          qpn, entry->reg_id);
684
685                                 mlx4_unregister_mac(dev, priv->port, mac);
686                                 hlist_del_rcu(&entry->hlist);
687                                 kfree_rcu(entry, rcu);
688                         }
689                 }
690
691                 if (priv->tunnel_reg_id) {
692                         mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
693                         priv->tunnel_reg_id = 0;
694                 }
695
696                 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
697                        priv->port, qpn);
698                 mlx4_qp_release_range(dev, qpn, 1);
699                 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
700         }
701 #endif
702 }
703
704 #if 0 // AKAROS_PORT
705 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
706                                unsigned char *new_mac, unsigned char *prev_mac)
707 {
708         struct mlx4_en_dev *mdev = priv->mdev;
709         struct mlx4_dev *dev = mdev->dev;
710         int err = 0;
711         uint64_t new_mac_u64 = mlx4_mac_to_u64(new_mac);
712
713         if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
714                 struct hlist_head *bucket;
715                 unsigned int mac_hash;
716                 struct mlx4_mac_entry *entry;
717                 struct hlist_node *tmp;
718                 uint64_t prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
719
720                 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
721                 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
722                         if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
723                                 mlx4_en_uc_steer_release(priv, entry->mac,
724                                                          qpn, entry->reg_id);
725                                 mlx4_unregister_mac(dev, priv->port,
726                                                     prev_mac_u64);
727                                 hlist_del_rcu(&entry->hlist);
728                                 synchronize_rcu();
729                                 memcpy(entry->mac, new_mac, Eaddrlen);
730                                 entry->reg_id = 0;
731                                 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
732                                 hlist_add_head_rcu(&entry->hlist,
733                                                    &priv->mac_hash[mac_hash]);
734                                 mlx4_register_mac(dev, priv->port, new_mac_u64);
735                                 err = mlx4_en_uc_steer_add(priv, new_mac,
736                                                            &qpn,
737                                                            &entry->reg_id);
738                                 if (err)
739                                         return err;
740                                 if (priv->tunnel_reg_id) {
741                                         mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
742                                         priv->tunnel_reg_id = 0;
743                                 }
744                                 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
745                                                                &priv->tunnel_reg_id);
746                                 return err;
747                         }
748                 }
749                 return -EINVAL;
750         }
751
752         return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
753 }
754
755 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
756                               unsigned char new_mac[Eaddrlen + 2])
757 {
758         int err = 0;
759
760         if (priv->port_up) {
761                 /* Remove old MAC and insert the new one */
762                 err = mlx4_en_replace_mac(priv, priv->base_qpn,
763                                           new_mac, priv->current_mac);
764                 if (err)
765                         en_err(priv, "Failed changing HW MAC address\n");
766         } else
767                 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
768
769         if (!err)
770                 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
771
772         return err;
773 }
774
775 static int mlx4_en_set_mac(struct ether *dev, void *addr)
776 {
777         struct mlx4_en_priv *priv = netdev_priv(dev);
778         struct mlx4_en_dev *mdev = priv->mdev;
779         struct sockaddr *saddr = addr;
780         unsigned char new_mac[Eaddrlen + 2];
781         int err;
782
783         if (!is_valid_ether_addr(saddr->sa_data))
784                 return -EADDRNOTAVAIL;
785
786         qlock(&mdev->state_lock);
787         memcpy(new_mac, saddr->sa_data, Eaddrlen);
788         err = mlx4_en_do_set_mac(priv, new_mac);
789         if (!err)
790                 memcpy(dev->ea, saddr->sa_data, Eaddrlen);
791         qunlock(&mdev->state_lock);
792
793         return err;
794 }
795 #endif
796
797 static void mlx4_en_clear_list(struct ether *dev)
798 {
799         struct mlx4_en_priv *priv = netdev_priv(dev);
800         struct mlx4_en_mc_list *tmp, *mc_to_del;
801
802         list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
803                 list_del(&mc_to_del->list);
804                 kfree(mc_to_del);
805         }
806 }
807
808 #if 0 // AKAROS_PORT
809 static void mlx4_en_cache_mclist(struct ether *dev)
810 {
811         struct mlx4_en_priv *priv = netdev_priv(dev);
812         struct netdev_hw_addr *ha;
813         struct mlx4_en_mc_list *tmp;
814
815         mlx4_en_clear_list(dev);
816         netdev_for_each_mc_addr(ha, dev) {
817                 tmp = kzmalloc(sizeof(struct mlx4_en_mc_list), 0);
818                 if (!tmp) {
819                         mlx4_en_clear_list(dev);
820                         return;
821                 }
822                 memcpy(tmp->addr, ha->addr, Eaddrlen);
823                 list_add_tail(&tmp->list, &priv->mc_list);
824         }
825 }
826
827 static void update_mclist_flags(struct mlx4_en_priv *priv,
828                                 struct list_head *dst,
829                                 struct list_head *src)
830 {
831         struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
832         bool found;
833
834         /* Find all the entries that should be removed from dst,
835          * These are the entries that are not found in src
836          */
837         list_for_each_entry(dst_tmp, dst, list) {
838                 found = false;
839                 list_for_each_entry(src_tmp, src, list) {
840                         if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
841                                 found = true;
842                                 break;
843                         }
844                 }
845                 if (!found)
846                         dst_tmp->action = MCLIST_REM;
847         }
848
849         /* Add entries that exist in src but not in dst
850          * mark them as need to add
851          */
852         list_for_each_entry(src_tmp, src, list) {
853                 found = false;
854                 list_for_each_entry(dst_tmp, dst, list) {
855                         if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
856                                 dst_tmp->action = MCLIST_NONE;
857                                 found = true;
858                                 break;
859                         }
860                 }
861                 if (!found) {
862                         new_mc = kmemdup(src_tmp,
863                                          sizeof(struct mlx4_en_mc_list),
864                                          MEM_WAIT);
865                         if (!new_mc)
866                                 return;
867
868                         new_mc->action = MCLIST_ADD;
869                         list_add_tail(&new_mc->list, dst);
870                 }
871         }
872 }
873
874 static void mlx4_en_set_rx_mode(struct ether *dev)
875 {
876         struct mlx4_en_priv *priv = netdev_priv(dev);
877
878         if (!priv->port_up)
879                 return;
880
881         queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
882 }
883
884 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
885                                      struct mlx4_en_dev *mdev)
886 {
887         int err = 0;
888
889         if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
890                 if (netif_msg_rx_status(priv))
891                         en_warn(priv, "Entering promiscuous mode\n");
892                 priv->flags |= MLX4_EN_FLAG_PROMISC;
893
894                 /* Enable promiscouos mode */
895                 switch (mdev->dev->caps.steering_mode) {
896                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
897                         err = mlx4_flow_steer_promisc_add(mdev->dev,
898                                                           priv->port,
899                                                           priv->base_qpn,
900                                                           MLX4_FS_ALL_DEFAULT);
901                         if (err)
902                                 en_err(priv, "Failed enabling promiscuous mode\n");
903                         priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
904                         break;
905
906                 case MLX4_STEERING_MODE_B0:
907                         err = mlx4_unicast_promisc_add(mdev->dev,
908                                                        priv->base_qpn,
909                                                        priv->port);
910                         if (err)
911                                 en_err(priv, "Failed enabling unicast promiscuous mode\n");
912
913                         /* Add the default qp number as multicast
914                          * promisc
915                          */
916                         if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
917                                 err = mlx4_multicast_promisc_add(mdev->dev,
918                                                                  priv->base_qpn,
919                                                                  priv->port);
920                                 if (err)
921                                         en_err(priv, "Failed enabling multicast promiscuous mode\n");
922                                 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
923                         }
924                         break;
925
926                 case MLX4_STEERING_MODE_A0:
927                         err = mlx4_SET_PORT_qpn_calc(mdev->dev,
928                                                      priv->port,
929                                                      priv->base_qpn,
930                                                      1);
931                         if (err)
932                                 en_err(priv, "Failed enabling promiscuous mode\n");
933                         break;
934                 }
935
936                 /* Disable port multicast filter (unconditionally) */
937                 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
938                                           0, MLX4_MCAST_DISABLE);
939                 if (err)
940                         en_err(priv, "Failed disabling multicast filter\n");
941         }
942 }
943
944 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
945                                        struct mlx4_en_dev *mdev)
946 {
947         int err = 0;
948
949         if (netif_msg_rx_status(priv))
950                 en_warn(priv, "Leaving promiscuous mode\n");
951         priv->flags &= ~MLX4_EN_FLAG_PROMISC;
952
953         /* Disable promiscouos mode */
954         switch (mdev->dev->caps.steering_mode) {
955         case MLX4_STEERING_MODE_DEVICE_MANAGED:
956                 err = mlx4_flow_steer_promisc_remove(mdev->dev,
957                                                      priv->port,
958                                                      MLX4_FS_ALL_DEFAULT);
959                 if (err)
960                         en_err(priv, "Failed disabling promiscuous mode\n");
961                 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
962                 break;
963
964         case MLX4_STEERING_MODE_B0:
965                 err = mlx4_unicast_promisc_remove(mdev->dev,
966                                                   priv->base_qpn,
967                                                   priv->port);
968                 if (err)
969                         en_err(priv, "Failed disabling unicast promiscuous mode\n");
970                 /* Disable Multicast promisc */
971                 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
972                         err = mlx4_multicast_promisc_remove(mdev->dev,
973                                                             priv->base_qpn,
974                                                             priv->port);
975                         if (err)
976                                 en_err(priv, "Failed disabling multicast promiscuous mode\n");
977                         priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
978                 }
979                 break;
980
981         case MLX4_STEERING_MODE_A0:
982                 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
983                                              priv->port,
984                                              priv->base_qpn, 0);
985                 if (err)
986                         en_err(priv, "Failed disabling promiscuous mode\n");
987                 break;
988         }
989 }
990
991 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
992                                  struct ether *dev,
993                                  struct mlx4_en_dev *mdev)
994 {
995         struct mlx4_en_mc_list *mclist, *tmp;
996         uint64_t mcast_addr = 0;
997         uint8_t mc_list[16] = {0};
998         int err = 0;
999
1000         /* Enable/disable the multicast filter according to IFF_ALLMULTI */
1001         if (dev->flags & IFF_ALLMULTI) {
1002                 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1003                                           0, MLX4_MCAST_DISABLE);
1004                 if (err)
1005                         en_err(priv, "Failed disabling multicast filter\n");
1006
1007                 /* Add the default qp number as multicast promisc */
1008                 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
1009                         switch (mdev->dev->caps.steering_mode) {
1010                         case MLX4_STEERING_MODE_DEVICE_MANAGED:
1011                                 err = mlx4_flow_steer_promisc_add(mdev->dev,
1012                                                                   priv->port,
1013                                                                   priv->base_qpn,
1014                                                                   MLX4_FS_MC_DEFAULT);
1015                                 break;
1016
1017                         case MLX4_STEERING_MODE_B0:
1018                                 err = mlx4_multicast_promisc_add(mdev->dev,
1019                                                                  priv->base_qpn,
1020                                                                  priv->port);
1021                                 break;
1022
1023                         case MLX4_STEERING_MODE_A0:
1024                                 break;
1025                         }
1026                         if (err)
1027                                 en_err(priv, "Failed entering multicast promisc mode\n");
1028                         priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
1029                 }
1030         } else {
1031                 /* Disable Multicast promisc */
1032                 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1033                         switch (mdev->dev->caps.steering_mode) {
1034                         case MLX4_STEERING_MODE_DEVICE_MANAGED:
1035                                 err = mlx4_flow_steer_promisc_remove(mdev->dev,
1036                                                                      priv->port,
1037                                                                      MLX4_FS_MC_DEFAULT);
1038                                 break;
1039
1040                         case MLX4_STEERING_MODE_B0:
1041                                 err = mlx4_multicast_promisc_remove(mdev->dev,
1042                                                                     priv->base_qpn,
1043                                                                     priv->port);
1044                                 break;
1045
1046                         case MLX4_STEERING_MODE_A0:
1047                                 break;
1048                         }
1049                         if (err)
1050                                 en_err(priv, "Failed disabling multicast promiscuous mode\n");
1051                         priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1052                 }
1053
1054                 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1055                                           0, MLX4_MCAST_DISABLE);
1056                 if (err)
1057                         en_err(priv, "Failed disabling multicast filter\n");
1058
1059                 /* Flush mcast filter and init it with broadcast address */
1060                 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1061                                     1, MLX4_MCAST_CONFIG);
1062
1063                 /* Update multicast list - we cache all addresses so they won't
1064                  * change while HW is updated holding the command semaphor */
1065                 qlock(&dev->qlock);
1066                 mlx4_en_cache_mclist(dev);
1067                 qunlock(&dev->qlock);
1068                 list_for_each_entry(mclist, &priv->mc_list, list) {
1069                         mcast_addr = mlx4_mac_to_u64(mclist->addr);
1070                         mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1071                                             mcast_addr, 0, MLX4_MCAST_CONFIG);
1072                 }
1073                 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1074                                           0, MLX4_MCAST_ENABLE);
1075                 if (err)
1076                         en_err(priv, "Failed enabling multicast filter\n");
1077
1078                 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1079                 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1080                         if (mclist->action == MCLIST_REM) {
1081                                 /* detach this address and delete from list */
1082                                 memcpy(&mc_list[10], mclist->addr, Eaddrlen);
1083                                 mc_list[5] = priv->port;
1084                                 err = mlx4_multicast_detach(mdev->dev,
1085                                                             &priv->rss_map.indir_qp,
1086                                                             mc_list,
1087                                                             MLX4_PROT_ETH,
1088                                                             mclist->reg_id);
1089                                 if (err)
1090                                         en_err(priv, "Fail to detach multicast address\n");
1091
1092                                 if (mclist->tunnel_reg_id) {
1093                                         err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1094                                         if (err)
1095                                                 en_err(priv, "Failed to detach multicast address\n");
1096                                 }
1097
1098                                 /* remove from list */
1099                                 list_del(&mclist->list);
1100                                 kfree(mclist);
1101                         } else if (mclist->action == MCLIST_ADD) {
1102                                 /* attach the address */
1103                                 memcpy(&mc_list[10], mclist->addr, Eaddrlen);
1104                                 /* needed for B0 steering support */
1105                                 mc_list[5] = priv->port;
1106                                 err = mlx4_multicast_attach(mdev->dev,
1107                                                             &priv->rss_map.indir_qp,
1108                                                             mc_list,
1109                                                             priv->port, 0,
1110                                                             MLX4_PROT_ETH,
1111                                                             &mclist->reg_id);
1112                                 if (err)
1113                                         en_err(priv, "Fail to attach multicast address\n");
1114
1115                                 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1116                                                                &mclist->tunnel_reg_id);
1117                                 if (err)
1118                                         en_err(priv, "Failed to attach multicast address\n");
1119                         }
1120                 }
1121         }
1122 }
1123
1124 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1125                                  struct ether *dev,
1126                                  struct mlx4_en_dev *mdev)
1127 {
1128         struct netdev_hw_addr *ha;
1129         struct mlx4_mac_entry *entry;
1130         struct hlist_node *tmp;
1131         bool found;
1132         uint64_t mac;
1133         int err = 0;
1134         struct hlist_head *bucket;
1135         unsigned int i;
1136         int removed = 0;
1137         uint32_t prev_flags;
1138
1139         /* Note that we do not need to protect our mac_hash traversal with rcu,
1140          * since all modification code is protected by mdev->state_lock
1141          */
1142
1143         /* find what to remove */
1144         for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1145                 bucket = &priv->mac_hash[i];
1146                 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1147                         found = false;
1148                         netdev_for_each_uc_addr(ha, dev) {
1149                                 if (ether_addr_equal_64bits(entry->mac,
1150                                                             ha->addr)) {
1151                                         found = true;
1152                                         break;
1153                                 }
1154                         }
1155
1156                         /* MAC address of the port is not in uc list */
1157                         if (ether_addr_equal_64bits(entry->mac,
1158                                                     priv->current_mac))
1159                                 found = true;
1160
1161                         if (!found) {
1162                                 mac = mlx4_mac_to_u64(entry->mac);
1163                                 mlx4_en_uc_steer_release(priv, entry->mac,
1164                                                          priv->base_qpn,
1165                                                          entry->reg_id);
1166                                 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1167
1168                                 hlist_del_rcu(&entry->hlist);
1169                                 kfree_rcu(entry, rcu);
1170                                 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1171                                        entry->mac, priv->port);
1172                                 ++removed;
1173                         }
1174                 }
1175         }
1176
1177         /* if we didn't remove anything, there is no use in trying to add
1178          * again once we are in a forced promisc mode state
1179          */
1180         if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1181                 return;
1182
1183         prev_flags = priv->flags;
1184         priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1185
1186         /* find what to add */
1187         netdev_for_each_uc_addr(ha, dev) {
1188                 found = false;
1189                 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1190                 hlist_for_each_entry(entry, bucket, hlist) {
1191                         if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1192                                 found = true;
1193                                 break;
1194                         }
1195                 }
1196
1197                 if (!found) {
1198                         entry = kmalloc(sizeof(*entry), MEM_WAIT);
1199                         if (!entry) {
1200                                 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1201                                        ha->addr, priv->port);
1202                                 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1203                                 break;
1204                         }
1205                         mac = mlx4_mac_to_u64(ha->addr);
1206                         memcpy(entry->mac, ha->addr, Eaddrlen);
1207                         err = mlx4_register_mac(mdev->dev, priv->port, mac);
1208                         if (err < 0) {
1209                                 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1210                                        ha->addr, priv->port, err);
1211                                 kfree(entry);
1212                                 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1213                                 break;
1214                         }
1215                         err = mlx4_en_uc_steer_add(priv, ha->addr,
1216                                                    &priv->base_qpn,
1217                                                    &entry->reg_id);
1218                         if (err) {
1219                                 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1220                                        ha->addr, priv->port, err);
1221                                 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1222                                 kfree(entry);
1223                                 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1224                                 break;
1225                         } else {
1226                                 unsigned int mac_hash;
1227                                 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1228                                        ha->addr, priv->port);
1229                                 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1230                                 bucket = &priv->mac_hash[mac_hash];
1231                                 hlist_add_head_rcu(&entry->hlist, bucket);
1232                         }
1233                 }
1234         }
1235
1236         if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1237                 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1238                         priv->port);
1239         } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1240                 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1241                         priv->port);
1242         }
1243 }
1244 #endif
1245
1246 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1247 {
1248         struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1249                                                  rx_mode_task);
1250         struct mlx4_en_dev *mdev = priv->mdev;
1251         struct ether *dev = priv->dev;
1252
1253         qlock(&mdev->state_lock);
1254         if (!mdev->device_up) {
1255                 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1256                 goto out;
1257         }
1258         if (!priv->port_up) {
1259                 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1260                 goto out;
1261         }
1262
1263         if (!netif_carrier_ok(dev)) {
1264                 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1265                         if (priv->port_state.link_state) {
1266                                 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1267                                 netif_carrier_on(dev);
1268                                 en_dbg(LINK, priv, "Link Up\n");
1269                         }
1270                 }
1271         }
1272
1273 #if 0 // AKAROS_PORT
1274         if (dev->priv_flags & IFF_UNICAST_FLT)
1275                 mlx4_en_do_uc_filter(priv, dev, mdev);
1276
1277         /* Promsicuous mode: disable all filters */
1278         if ((dev->flags & IFF_PROMISC) ||
1279             (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1280                 mlx4_en_set_promisc_mode(priv, mdev);
1281                 goto out;
1282         }
1283
1284         /* Not in promiscuous mode */
1285         if (priv->flags & MLX4_EN_FLAG_PROMISC)
1286                 mlx4_en_clear_promisc_mode(priv, mdev);
1287
1288         mlx4_en_do_multicast(priv, dev, mdev);
1289 #endif
1290 out:
1291         qunlock(&mdev->state_lock);
1292 }
1293
1294 #ifdef CONFIG_NET_POLL_CONTROLLER
1295 static void mlx4_en_netpoll(struct ether *dev)
1296 {
1297         struct mlx4_en_priv *priv = netdev_priv(dev);
1298         struct mlx4_en_cq *cq;
1299         int i;
1300
1301         for (i = 0; i < priv->rx_ring_num; i++) {
1302                 cq = priv->rx_cq[i];
1303                 napi_schedule(&cq->napi);
1304         }
1305 }
1306 #endif
1307
1308 static void mlx4_en_tx_timeout(struct ether *dev)
1309 {
1310         panic("Disabled");
1311 #if 0 // AKAROS_PORT
1312         struct mlx4_en_priv *priv = netdev_priv(dev);
1313         struct mlx4_en_dev *mdev = priv->mdev;
1314         int i;
1315
1316         if (netif_msg_timer(priv))
1317                 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1318
1319         for (i = 0; i < priv->tx_ring_num; i++) {
1320                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1321                         continue;
1322                 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1323                         i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
1324                         priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
1325         }
1326
1327         priv->port_stats.tx_timeout++;
1328         en_dbg(DRV, priv, "Scheduling watchdog\n");
1329         queue_work(mdev->workqueue, &priv->watchdog_task);
1330 #endif
1331 }
1332
1333
1334 static struct netif_stats *mlx4_en_get_stats(struct ether *dev)
1335 {
1336         struct mlx4_en_priv *priv = netdev_priv(dev);
1337
1338         spin_lock(&priv->stats_lock);
1339         memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
1340         spin_unlock(&priv->stats_lock);
1341
1342         return &priv->ret_stats;
1343 }
1344
1345 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1346 {
1347         struct mlx4_en_cq *cq;
1348         int i;
1349
1350         /* If we haven't received a specific coalescing setting
1351          * (module param), we set the moderation parameters as follows:
1352          * - moder_cnt is set to the number of mtu sized packets to
1353          *   satisfy our coalescing target.
1354          * - moder_time is set to a fixed value.
1355          */
1356         priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1357         priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1358         priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1359         priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1360         en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1361                priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1362
1363         /* Setup cq moderation params */
1364         for (i = 0; i < priv->rx_ring_num; i++) {
1365                 cq = priv->rx_cq[i];
1366                 cq->moder_cnt = priv->rx_frames;
1367                 cq->moder_time = priv->rx_usecs;
1368                 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1369                 priv->last_moder_packets[i] = 0;
1370                 priv->last_moder_bytes[i] = 0;
1371         }
1372
1373         for (i = 0; i < priv->tx_ring_num; i++) {
1374                 cq = priv->tx_cq[i];
1375                 cq->moder_cnt = priv->tx_frames;
1376                 cq->moder_time = priv->tx_usecs;
1377         }
1378
1379         /* Reset auto-moderation params */
1380         priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1381         priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1382         priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1383         priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1384         priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1385         priv->adaptive_rx_coal = 1;
1386         priv->last_moder_jiffies = 0;
1387         priv->last_moder_tx_packets = 0;
1388 }
1389
1390 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1391 {
1392         panic("Disabled");
1393 #if 0 // AKAROS_PORT
1394         unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1395         struct mlx4_en_cq *cq;
1396         unsigned long packets;
1397         unsigned long rate;
1398         unsigned long avg_pkt_size;
1399         unsigned long rx_packets;
1400         unsigned long rx_bytes;
1401         unsigned long rx_pkt_diff;
1402         int moder_time;
1403         int ring, err;
1404
1405         if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1406                 return;
1407
1408         for (ring = 0; ring < priv->rx_ring_num; ring++) {
1409                 spin_lock(&priv->stats_lock);
1410                 rx_packets = priv->rx_ring[ring]->packets;
1411                 rx_bytes = priv->rx_ring[ring]->bytes;
1412                 spin_unlock(&priv->stats_lock);
1413
1414                 rx_pkt_diff = ((unsigned long) (rx_packets -
1415                                 priv->last_moder_packets[ring]));
1416                 packets = rx_pkt_diff;
1417                 rate = packets * HZ / period;
1418                 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1419                                 priv->last_moder_bytes[ring])) / packets : 0;
1420
1421                 /* Apply auto-moderation only when packet rate
1422                  * exceeds a rate that it matters */
1423                 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1424                     avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1425                         if (rate < priv->pkt_rate_low)
1426                                 moder_time = priv->rx_usecs_low;
1427                         else if (rate > priv->pkt_rate_high)
1428                                 moder_time = priv->rx_usecs_high;
1429                         else
1430                                 moder_time = (rate - priv->pkt_rate_low) *
1431                                         (priv->rx_usecs_high - priv->rx_usecs_low) /
1432                                         (priv->pkt_rate_high - priv->pkt_rate_low) +
1433                                         priv->rx_usecs_low;
1434                 } else {
1435                         moder_time = priv->rx_usecs_low;
1436                 }
1437
1438                 if (moder_time != priv->last_moder_time[ring]) {
1439                         priv->last_moder_time[ring] = moder_time;
1440                         cq = priv->rx_cq[ring];
1441                         cq->moder_time = moder_time;
1442                         cq->moder_cnt = priv->rx_frames;
1443                         err = mlx4_en_set_cq_moder(priv, cq);
1444                         if (err)
1445                                 en_err(priv, "Failed modifying moderation for cq:%d\n",
1446                                        ring);
1447                 }
1448                 priv->last_moder_packets[ring] = rx_packets;
1449                 priv->last_moder_bytes[ring] = rx_bytes;
1450         }
1451
1452         priv->last_moder_jiffies = jiffies;
1453 #endif
1454 }
1455
1456 static void mlx4_en_do_get_stats(struct work_struct *work)
1457 {
1458         panic("Disabled");
1459 #if 0 // AKAROS_PORT
1460         struct delayed_work *delay = to_delayed_work(work);
1461         struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1462                                                  stats_task);
1463         struct mlx4_en_dev *mdev = priv->mdev;
1464         int err;
1465
1466         qlock(&mdev->state_lock);
1467         if (mdev->device_up) {
1468                 if (priv->port_up) {
1469                         err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1470                         if (err)
1471                                 en_dbg(HW, priv, "Could not update stats\n");
1472
1473                         mlx4_en_auto_moderation(priv);
1474                 }
1475
1476                 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1477         }
1478         if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1479                 mlx4_en_do_set_mac(priv, priv->current_mac);
1480                 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1481         }
1482         qunlock(&mdev->state_lock);
1483 #endif
1484 }
1485
1486 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1487  * periodically
1488  */
1489 static void mlx4_en_service_task(struct work_struct *work)
1490 {
1491         struct delayed_work *delay = to_delayed_work(work);
1492         struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1493                                                  service_task);
1494         struct mlx4_en_dev *mdev = priv->mdev;
1495
1496         qlock(&mdev->state_lock);
1497         if (mdev->device_up) {
1498 #if 0 // AKAROS_PORT
1499                 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1500                         mlx4_en_ptp_overflow_check(mdev);
1501 #endif
1502
1503                 mlx4_en_recover_from_oom(priv);
1504                 queue_delayed_work(mdev->workqueue, &priv->service_task,
1505                                    SERVICE_TASK_DELAY);
1506         }
1507         qunlock(&mdev->state_lock);
1508 }
1509
1510 static void mlx4_en_linkstate(struct work_struct *work)
1511 {
1512         struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1513                                                  linkstate_task);
1514         struct mlx4_en_dev *mdev = priv->mdev;
1515         int linkstate = priv->link_state;
1516
1517         qlock(&mdev->state_lock);
1518         /* If observable port state changed set carrier state and
1519          * report to system log */
1520         if (priv->last_link_state != linkstate) {
1521                 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1522                         en_info(priv, "Link Down\n");
1523                         netif_carrier_off(priv->dev);
1524                 } else {
1525                         en_info(priv, "Link Up\n");
1526                         netif_carrier_on(priv->dev);
1527                 }
1528         }
1529         priv->last_link_state = linkstate;
1530         qunlock(&mdev->state_lock);
1531 }
1532
1533 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1534 {
1535         struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1536         int numa_node = priv->mdev->dev->numa_node;
1537
1538         if (!zalloc_cpumask_var(&ring->affinity_mask, MEM_WAIT))
1539                 return -ENOMEM;
1540
1541         cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1542                         ring->affinity_mask);
1543         return 0;
1544 }
1545
1546 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1547 {
1548         panic("Disabled");
1549 #if 0 // AKAROS_PORT
1550         free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1551 #endif
1552 }
1553
1554 int mlx4_en_start_port(struct ether *dev)
1555 {
1556         struct mlx4_en_priv *priv = netdev_priv(dev);
1557         struct mlx4_en_dev *mdev = priv->mdev;
1558         struct mlx4_en_cq *cq;
1559         struct mlx4_en_tx_ring *tx_ring;
1560         int rx_index = 0;
1561         int tx_index = 0;
1562         int err = 0;
1563         int i;
1564         int j;
1565         uint8_t mc_list[16] = {0};
1566
1567         if (priv->port_up) {
1568                 en_dbg(DRV, priv, "start port called while port already up\n");
1569                 return 0;
1570         }
1571
1572         INIT_LIST_HEAD(&priv->mc_list);
1573         INIT_LIST_HEAD(&priv->curr_list);
1574 #if 0 // AKAROS_PORT
1575         INIT_LIST_HEAD(&priv->ethtool_list);
1576         memset(&priv->ethtool_rules[0], 0,
1577                sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1578 #endif
1579
1580         /* Calculate Rx buf size */
1581         dev->mtu = MIN(dev->mtu, priv->max_mtu);
1582         mlx4_en_calc_rx_buf(dev);
1583         en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1584
1585         /* Configure rx cq's and rings */
1586         err = mlx4_en_activate_rx_rings(priv);
1587         if (err) {
1588                 en_err(priv, "Failed to activate RX rings\n");
1589                 return err;
1590         }
1591         for (i = 0; i < priv->rx_ring_num; i++) {
1592                 cq = priv->rx_cq[i];
1593
1594                 mlx4_en_cq_init_lock(cq);
1595
1596                 err = mlx4_en_init_affinity_hint(priv, i);
1597                 if (err) {
1598                         en_err(priv, "Failed preparing IRQ affinity hint\n");
1599                         goto cq_err;
1600                 }
1601
1602                 err = mlx4_en_activate_cq(priv, cq, i);
1603                 if (err) {
1604                         en_err(priv, "Failed activating Rx CQ\n");
1605                         mlx4_en_free_affinity_hint(priv, i);
1606                         goto cq_err;
1607                 }
1608
1609                 for (j = 0; j < cq->size; j++) {
1610                         struct mlx4_cqe *cqe = NULL;
1611
1612                         cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1613                               priv->cqe_factor;
1614                         cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1615                 }
1616
1617                 err = mlx4_en_set_cq_moder(priv, cq);
1618                 if (err) {
1619                         en_err(priv, "Failed setting cq moderation parameters\n");
1620                         mlx4_en_deactivate_cq(priv, cq);
1621                         mlx4_en_free_affinity_hint(priv, i);
1622                         goto cq_err;
1623                 }
1624                 mlx4_en_arm_cq(priv, cq);
1625                 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1626                 ++rx_index;
1627         }
1628
1629         /* Set qp number */
1630         en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1631         err = mlx4_en_get_qp(priv);
1632         if (err) {
1633                 en_err(priv, "Failed getting eth qp\n");
1634                 goto cq_err;
1635         }
1636         mdev->mac_removed[priv->port] = 0;
1637
1638         err = mlx4_en_config_rss_steer(priv);
1639         if (err) {
1640                 en_err(priv, "Failed configuring rss steering\n");
1641                 goto mac_err;
1642         }
1643
1644         err = mlx4_en_create_drop_qp(priv);
1645         if (err)
1646                 goto rss_err;
1647
1648         /* Configure tx cq's and rings */
1649         for (i = 0; i < priv->tx_ring_num; i++) {
1650                 /* Configure cq */
1651                 cq = priv->tx_cq[i];
1652                 err = mlx4_en_activate_cq(priv, cq, i);
1653                 if (err) {
1654                         en_err(priv, "Failed allocating Tx CQ\n");
1655                         goto tx_err;
1656                 }
1657                 err = mlx4_en_set_cq_moder(priv, cq);
1658                 if (err) {
1659                         en_err(priv, "Failed setting cq moderation parameters\n");
1660                         mlx4_en_deactivate_cq(priv, cq);
1661                         goto tx_err;
1662                 }
1663                 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1664                 cq->buf->wqe_index = cpu_to_be16(0xffff);
1665
1666                 /* Configure ring */
1667                 tx_ring = priv->tx_ring[i];
1668                 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1669                         i / priv->num_tx_rings_p_up);
1670                 if (err) {
1671                         en_err(priv, "Failed allocating Tx ring\n");
1672                         mlx4_en_deactivate_cq(priv, cq);
1673                         goto tx_err;
1674                 }
1675 #if 0 // AKAROS_PORT
1676                 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1677 #else
1678                 tx_ring->tx_queue = 0; /* TODO multi-queue support. */
1679 #endif
1680
1681                 /* Arm CQ for TX completions */
1682                 mlx4_en_arm_cq(priv, cq);
1683
1684                 /* Set initial ownership of all Tx TXBBs to SW (1) */
1685                 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1686                         *((uint32_t *) (tx_ring->buf + j)) = 0xffffffff;
1687                 ++tx_index;
1688         }
1689
1690         /* Configure port */
1691         err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1692                                     priv->rx_skb_size + ETH_FCS_LEN,
1693                                     priv->prof->tx_pause,
1694                                     priv->prof->tx_ppp,
1695                                     priv->prof->rx_pause,
1696                                     priv->prof->rx_ppp);
1697         if (err) {
1698                 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1699                        priv->port, err);
1700                 goto tx_err;
1701         }
1702         /* Set default qp number */
1703         err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1704         if (err) {
1705                 en_err(priv, "Failed setting default qp numbers\n");
1706                 goto tx_err;
1707         }
1708
1709 #if 0 // AKAROS_PORT
1710         if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1711                 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
1712                 if (err) {
1713                         en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1714                                err);
1715                         goto tx_err;
1716                 }
1717         }
1718 #endif
1719
1720         /* Init port */
1721         en_dbg(HW, priv, "Initializing port\n");
1722         err = mlx4_INIT_PORT(mdev->dev, priv->port);
1723         if (err) {
1724                 en_err(priv, "Failed Initializing port\n");
1725                 goto tx_err;
1726         }
1727
1728         /* Attach rx QP to bradcast address */
1729         eth_broadcast_addr(&mc_list[10]);
1730         mc_list[5] = priv->port; /* needed for B0 steering support */
1731         if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1732                                   priv->port, 0, MLX4_PROT_ETH,
1733                                   &priv->broadcast_id))
1734                 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1735
1736         /* Must redo promiscuous mode setup. */
1737         priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1738
1739         /* Schedule multicast task to populate multicast list */
1740         queue_work(mdev->workqueue, &priv->rx_mode_task);
1741
1742 #ifdef CONFIG_MLX4_EN_VXLAN
1743         if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1744                 vxlan_get_rx_port(dev);
1745 #endif
1746         priv->port_up = true;
1747 #if 0 // AKAROS_PORT
1748         netif_tx_start_all_queues(dev);
1749         netif_device_attach(dev);
1750 #endif
1751
1752         return 0;
1753
1754 tx_err:
1755         while (tx_index--) {
1756                 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1757                 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1758         }
1759         mlx4_en_destroy_drop_qp(priv);
1760 rss_err:
1761         mlx4_en_release_rss_steer(priv);
1762 mac_err:
1763         mlx4_en_put_qp(priv);
1764 cq_err:
1765         while (rx_index--) {
1766                 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1767                 mlx4_en_free_affinity_hint(priv, rx_index);
1768         }
1769         for (i = 0; i < priv->rx_ring_num; i++)
1770                 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1771
1772         return err; /* need to close devices */
1773 }
1774
1775
1776 void mlx4_en_stop_port(struct ether *dev, int detach)
1777 {
1778         panic("Disabled");
1779 #if 0 // AKAROS_PORT
1780         struct mlx4_en_priv *priv = netdev_priv(dev);
1781         struct mlx4_en_dev *mdev = priv->mdev;
1782         struct mlx4_en_mc_list *mclist, *tmp;
1783 #if 0 // AKAROS_PORT
1784         struct ethtool_flow_id *flow, *tmp_flow;
1785 #endif
1786         int i;
1787         uint8_t mc_list[16] = {0};
1788
1789         if (!priv->port_up) {
1790                 en_dbg(DRV, priv, "stop port called while port already down\n");
1791                 return;
1792         }
1793
1794         /* close port*/
1795         mlx4_CLOSE_PORT(mdev->dev, priv->port);
1796
1797         /* Synchronize with tx routine */
1798         netif_tx_lock_bh(dev);
1799         if (detach)
1800                 netif_device_detach(dev);
1801         netif_tx_stop_all_queues(dev);
1802         netif_tx_unlock_bh(dev);
1803
1804         netif_tx_disable(dev);
1805
1806         /* Set port as not active */
1807         priv->port_up = false;
1808
1809         /* Promsicuous mode */
1810         if (mdev->dev->caps.steering_mode ==
1811             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1812                 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1813                                  MLX4_EN_FLAG_MC_PROMISC);
1814                 mlx4_flow_steer_promisc_remove(mdev->dev,
1815                                                priv->port,
1816                                                MLX4_FS_ALL_DEFAULT);
1817                 mlx4_flow_steer_promisc_remove(mdev->dev,
1818                                                priv->port,
1819                                                MLX4_FS_MC_DEFAULT);
1820         } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1821                 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1822
1823                 /* Disable promiscouos mode */
1824                 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1825                                             priv->port);
1826
1827                 /* Disable Multicast promisc */
1828                 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1829                         mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1830                                                       priv->port);
1831                         priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1832                 }
1833         }
1834
1835         /* Detach All multicasts */
1836         eth_broadcast_addr(&mc_list[10]);
1837         mc_list[5] = priv->port; /* needed for B0 steering support */
1838         mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1839                               MLX4_PROT_ETH, priv->broadcast_id);
1840         list_for_each_entry(mclist, &priv->curr_list, list) {
1841                 memcpy(&mc_list[10], mclist->addr, Eaddrlen);
1842                 mc_list[5] = priv->port;
1843                 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1844                                       mc_list, MLX4_PROT_ETH, mclist->reg_id);
1845                 if (mclist->tunnel_reg_id)
1846                         mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1847         }
1848         mlx4_en_clear_list(dev);
1849         list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1850                 list_del(&mclist->list);
1851                 kfree(mclist);
1852         }
1853
1854         /* Flush multicast filter */
1855         mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1856
1857 #if 0 // AKAROS_PORT
1858         /* Remove flow steering rules for the port*/
1859         if (mdev->dev->caps.steering_mode ==
1860             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1861                 ASSERT_RTNL();
1862                 list_for_each_entry_safe(flow, tmp_flow,
1863                                          &priv->ethtool_list, list) {
1864                         mlx4_flow_detach(mdev->dev, flow->id);
1865                         list_del(&flow->list);
1866                 }
1867         }
1868 #endif
1869
1870         mlx4_en_destroy_drop_qp(priv);
1871
1872         /* Free TX Rings */
1873         for (i = 0; i < priv->tx_ring_num; i++) {
1874                 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1875                 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1876         }
1877         kthread_usleep(1000 * 10);
1878
1879         for (i = 0; i < priv->tx_ring_num; i++)
1880                 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1881
1882         /* Free RSS qps */
1883         mlx4_en_release_rss_steer(priv);
1884
1885         /* Unregister Mac address for the port */
1886         mlx4_en_put_qp(priv);
1887         if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
1888                 mdev->mac_removed[priv->port] = 1;
1889
1890         /* Free RX Rings */
1891         for (i = 0; i < priv->rx_ring_num; i++) {
1892                 struct mlx4_en_cq *cq = priv->rx_cq[i];
1893
1894                 local_bh_disable();
1895                 while (!mlx4_en_cq_lock_napi(cq)) {
1896                         pr_info("CQ %d locked\n", i);
1897                         mdelay(1);
1898                 }
1899                 local_bh_enable();
1900
1901                 napi_synchronize(&cq->napi);
1902                 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1903                 mlx4_en_deactivate_cq(priv, cq);
1904
1905                 mlx4_en_free_affinity_hint(priv, i);
1906         }
1907 #endif
1908 }
1909
1910 static void mlx4_en_restart(struct work_struct *work)
1911 {
1912         struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1913                                                  watchdog_task);
1914         struct mlx4_en_dev *mdev = priv->mdev;
1915         struct ether *dev = priv->dev;
1916
1917         en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1918
1919         qlock(&mdev->state_lock);
1920         if (priv->port_up) {
1921                 mlx4_en_stop_port(dev, 1);
1922                 if (mlx4_en_start_port(dev))
1923                         en_err(priv, "Failed restarting port %d\n", priv->port);
1924         }
1925         qunlock(&mdev->state_lock);
1926 }
1927
1928 static void mlx4_en_clear_stats(struct ether *dev)
1929 {
1930         struct mlx4_en_priv *priv = netdev_priv(dev);
1931         struct mlx4_en_dev *mdev = priv->mdev;
1932         int i;
1933
1934 #if 0 // AKAROS_PORT
1935         if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1936                 en_dbg(HW, priv, "Failed dumping statistics\n");
1937 #endif
1938
1939         memset(&priv->stats, 0, sizeof(priv->stats));
1940         memset(&priv->pstats, 0, sizeof(priv->pstats));
1941         memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1942         memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1943         memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
1944         memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
1945         memset(&priv->rx_priority_flowstats, 0,
1946                sizeof(priv->rx_priority_flowstats));
1947         memset(&priv->tx_priority_flowstats, 0,
1948                sizeof(priv->tx_priority_flowstats));
1949
1950         for (i = 0; i < priv->tx_ring_num; i++) {
1951                 priv->tx_ring[i]->bytes = 0;
1952                 priv->tx_ring[i]->packets = 0;
1953                 priv->tx_ring[i]->tx_csum = 0;
1954         }
1955         for (i = 0; i < priv->rx_ring_num; i++) {
1956                 priv->rx_ring[i]->bytes = 0;
1957                 priv->rx_ring[i]->packets = 0;
1958                 priv->rx_ring[i]->csum_ok = 0;
1959                 priv->rx_ring[i]->csum_none = 0;
1960                 priv->rx_ring[i]->csum_complete = 0;
1961         }
1962 }
1963
1964 int mlx4_en_open(struct ether *dev)
1965 {
1966         struct mlx4_en_priv *priv = netdev_priv(dev);
1967         struct mlx4_en_dev *mdev = priv->mdev;
1968         int err = 0;
1969
1970         qlock(&mdev->state_lock);
1971
1972         if (!mdev->device_up) {
1973                 en_err(priv, "Cannot open - device down/disabled\n");
1974                 err = -EBUSY;
1975                 goto out;
1976         }
1977
1978         /* Reset HW statistics and SW counters */
1979         mlx4_en_clear_stats(dev);
1980
1981         err = mlx4_en_start_port(dev);
1982         if (err)
1983                 en_err(priv, "Failed starting port:%d\n", priv->port);
1984
1985 out:
1986         qunlock(&mdev->state_lock);
1987         return err;
1988 }
1989
1990
1991 static int mlx4_en_close(struct ether *dev)
1992 {
1993         struct mlx4_en_priv *priv = netdev_priv(dev);
1994         struct mlx4_en_dev *mdev = priv->mdev;
1995
1996         en_dbg(IFDOWN, priv, "Close port called\n");
1997
1998         qlock(&mdev->state_lock);
1999
2000         mlx4_en_stop_port(dev, 0);
2001         netif_carrier_off(dev);
2002
2003         qunlock(&mdev->state_lock);
2004         return 0;
2005 }
2006
2007 void mlx4_en_free_resources(struct mlx4_en_priv *priv)
2008 {
2009         int i;
2010
2011 #ifdef CONFIG_RFS_ACCEL
2012         free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
2013         priv->dev->rx_cpu_rmap = NULL;
2014 #endif
2015
2016         for (i = 0; i < priv->tx_ring_num; i++) {
2017                 if (priv->tx_ring && priv->tx_ring[i])
2018                         mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
2019                 if (priv->tx_cq && priv->tx_cq[i])
2020                         mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
2021         }
2022
2023         for (i = 0; i < priv->rx_ring_num; i++) {
2024                 if (priv->rx_ring[i])
2025                         mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2026                                 priv->prof->rx_ring_size, priv->stride);
2027                 if (priv->rx_cq[i])
2028                         mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2029         }
2030
2031         if (priv->base_tx_qpn) {
2032                 mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
2033                 priv->base_tx_qpn = 0;
2034         }
2035 }
2036
2037 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
2038 {
2039         struct mlx4_en_port_profile *prof = priv->prof;
2040         int i;
2041         int node;
2042
2043         /* Create tx Rings */
2044         for (i = 0; i < priv->tx_ring_num; i++) {
2045                 node = cpu_to_node(i % num_online_cpus());
2046                 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
2047                                       prof->tx_ring_size, i, TX, node))
2048                         goto err;
2049
2050                 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
2051                                            prof->tx_ring_size, TXBB_SIZE,
2052                                            node, i))
2053                         goto err;
2054         }
2055
2056         /* Create rx Rings */
2057         for (i = 0; i < priv->rx_ring_num; i++) {
2058                 node = cpu_to_node(i % num_online_cpus());
2059                 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
2060                                       prof->rx_ring_size, i, RX, node))
2061                         goto err;
2062
2063                 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2064                                            prof->rx_ring_size, priv->stride,
2065                                            node))
2066                         goto err;
2067         }
2068
2069 #ifdef CONFIG_RFS_ACCEL
2070         if (priv->mdev->dev->caps.comp_pool) {
2071                 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
2072                 if (!priv->dev->rx_cpu_rmap)
2073                         goto err;
2074         }
2075 #endif
2076
2077         return 0;
2078
2079 err:
2080         en_err(priv, "Failed to allocate NIC resources\n");
2081         for (i = 0; i < priv->rx_ring_num; i++) {
2082                 if (priv->rx_ring[i])
2083                         mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2084                                                 prof->rx_ring_size,
2085                                                 priv->stride);
2086                 if (priv->rx_cq[i])
2087                         mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2088         }
2089         for (i = 0; i < priv->tx_ring_num; i++) {
2090                 if (priv->tx_ring[i])
2091                         mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
2092                 if (priv->tx_cq[i])
2093                         mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
2094         }
2095         return -ENOMEM;
2096 }
2097
2098
2099 void mlx4_en_destroy_netdev(struct ether *dev)
2100 {
2101         panic("Disabled");
2102 #if 0 // AKAROS_PORT
2103         struct mlx4_en_priv *priv = netdev_priv(dev);
2104         struct mlx4_en_dev *mdev = priv->mdev;
2105
2106         en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2107
2108         /* Unregister device - this will close the port if it was up */
2109         if (priv->registered)
2110                 unregister_netdev(dev);
2111
2112         if (priv->allocated)
2113                 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2114
2115         cancel_delayed_work(&priv->stats_task);
2116         cancel_delayed_work(&priv->service_task);
2117         /* flush any pending task for this netdev */
2118         flush_workqueue(mdev->workqueue);
2119
2120         /* Detach the netdev so tasks would not attempt to access it */
2121         qlock(&mdev->state_lock);
2122         mdev->pndev[priv->port] = NULL;
2123         mdev->upper[priv->port] = NULL;
2124         qunlock(&mdev->state_lock);
2125
2126         mlx4_en_free_resources(priv);
2127
2128         kfree(priv->tx_ring);
2129         kfree(priv->tx_cq);
2130
2131         free_netdev(dev);
2132 #endif
2133 }
2134
2135 #if 0 // AKAROS_PORT
2136 static int mlx4_en_change_mtu(struct ether *dev, int new_mtu)
2137 {
2138         struct mlx4_en_priv *priv = netdev_priv(dev);
2139         struct mlx4_en_dev *mdev = priv->mdev;
2140         int err = 0;
2141
2142         en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
2143                  dev->mtu, new_mtu);
2144
2145         if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
2146                 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
2147                 return -EPERM;
2148         }
2149         dev->mtu = new_mtu;
2150
2151         if (netif_running(dev)) {
2152                 qlock(&mdev->state_lock);
2153                 if (!mdev->device_up) {
2154                         /* NIC is probably restarting - let watchdog task reset
2155                          * the port */
2156                         en_dbg(DRV, priv, "Change MTU called with card down!?\n");
2157                 } else {
2158                         mlx4_en_stop_port(dev, 1);
2159                         err = mlx4_en_start_port(dev);
2160                         if (err) {
2161                                 en_err(priv, "Failed restarting port:%d\n",
2162                                          priv->port);
2163                                 queue_work(mdev->workqueue, &priv->watchdog_task);
2164                         }
2165                 }
2166                 qunlock(&mdev->state_lock);
2167         }
2168         return 0;
2169 }
2170
2171 static int mlx4_en_hwtstamp_set(struct ether *dev, struct ifreq *ifr)
2172 {
2173         struct mlx4_en_priv *priv = netdev_priv(dev);
2174         struct mlx4_en_dev *mdev = priv->mdev;
2175         struct hwtstamp_config config;
2176
2177         if (memcpy_from_user(current, &config, ifr->ifr_data, sizeof(config)))
2178                 return -EFAULT;
2179
2180         /* reserved for future extensions */
2181         if (config.flags)
2182                 return -EINVAL;
2183
2184         /* device doesn't support time stamping */
2185         if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2186                 return -EINVAL;
2187
2188         /* TX HW timestamp */
2189         switch (config.tx_type) {
2190         case HWTSTAMP_TX_OFF:
2191         case HWTSTAMP_TX_ON:
2192                 break;
2193         default:
2194                 return -ERANGE;
2195         }
2196
2197         /* RX HW timestamp */
2198         switch (config.rx_filter) {
2199         case HWTSTAMP_FILTER_NONE:
2200                 break;
2201         case HWTSTAMP_FILTER_ALL:
2202         case HWTSTAMP_FILTER_SOME:
2203         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2204         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2205         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2206         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2207         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2208         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2209         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2210         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2211         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2212         case HWTSTAMP_FILTER_PTP_V2_EVENT:
2213         case HWTSTAMP_FILTER_PTP_V2_SYNC:
2214         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2215                 config.rx_filter = HWTSTAMP_FILTER_ALL;
2216                 break;
2217         default:
2218                 return -ERANGE;
2219         }
2220
2221         if (mlx4_en_reset_config(dev, config, dev->feat)) {
2222                 config.tx_type = HWTSTAMP_TX_OFF;
2223                 config.rx_filter = HWTSTAMP_FILTER_NONE;
2224         }
2225
2226         return memcpy_to_user(current, ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
2227 }
2228
2229 static int mlx4_en_hwtstamp_get(struct ether *dev, struct ifreq *ifr)
2230 {
2231         struct mlx4_en_priv *priv = netdev_priv(dev);
2232
2233         return memcpy_to_user(current, ifr->ifr_data, &priv->hwtstamp_config,
2234                               sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2235 }
2236
2237 static int mlx4_en_ioctl(struct ether *dev, struct ifreq *ifr, int cmd)
2238 {
2239         switch (cmd) {
2240         case SIOCSHWTSTAMP:
2241                 return mlx4_en_hwtstamp_set(dev, ifr);
2242         case SIOCGHWTSTAMP:
2243                 return mlx4_en_hwtstamp_get(dev, ifr);
2244         default:
2245                 return -EOPNOTSUPP;
2246         }
2247 }
2248
2249 static int mlx4_en_set_features(struct ether *netdev,
2250                                 netdev_features_t features)
2251 {
2252         struct mlx4_en_priv *priv = netdev_priv(netdev);
2253         bool reset = false;
2254         int ret = 0;
2255
2256         if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2257                 en_info(priv, "Turn %s RX-FCS\n",
2258                         (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2259                 reset = true;
2260         }
2261
2262         if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2263                 uint8_t ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2264
2265                 en_info(priv, "Turn %s RX-ALL\n",
2266                         ignore_fcs_value ? "ON" : "OFF");
2267                 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2268                                               priv->port, ignore_fcs_value);
2269                 if (ret)
2270                         return ret;
2271         }
2272
2273         if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2274                 en_info(priv, "Turn %s RX vlan strip offload\n",
2275                         (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
2276                 reset = true;
2277         }
2278
2279         if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2280                 en_info(priv, "Turn %s TX vlan strip offload\n",
2281                         (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2282
2283         if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2284                 en_info(priv, "Turn %s loopback\n",
2285                         (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2286                 mlx4_en_update_loopback_state(netdev, features);
2287         }
2288
2289         if (reset) {
2290                 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2291                                            features);
2292                 if (ret)
2293                         return ret;
2294         }
2295
2296         return 0;
2297 }
2298
2299 static int mlx4_en_set_vf_mac(struct ether *dev, int queue, uint8_t *mac)
2300 {
2301         struct mlx4_en_priv *en_priv = netdev_priv(dev);
2302         struct mlx4_en_dev *mdev = en_priv->mdev;
2303         uint64_t mac_u64 = mlx4_mac_to_u64(mac);
2304
2305         if (!is_valid_ether_addr(mac))
2306                 return -EINVAL;
2307
2308         return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
2309 }
2310
2311 static int mlx4_en_set_vf_vlan(struct ether *dev, int vf, uint16_t vlan,
2312                                uint8_t qos)
2313 {
2314         struct mlx4_en_priv *en_priv = netdev_priv(dev);
2315         struct mlx4_en_dev *mdev = en_priv->mdev;
2316
2317         return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
2318 }
2319
2320 static int mlx4_en_set_vf_rate(struct ether *dev, int vf, int min_tx_rate,
2321                                int max_tx_rate)
2322 {
2323         struct mlx4_en_priv *en_priv = netdev_priv(dev);
2324         struct mlx4_en_dev *mdev = en_priv->mdev;
2325
2326         return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2327                                 max_tx_rate);
2328 }
2329
2330 static int mlx4_en_set_vf_spoofchk(struct ether *dev, int vf, bool setting)
2331 {
2332         struct mlx4_en_priv *en_priv = netdev_priv(dev);
2333         struct mlx4_en_dev *mdev = en_priv->mdev;
2334
2335         return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2336 }
2337
2338 static int mlx4_en_get_vf_config(struct ether *dev, int vf,
2339                                  struct ifla_vf_info *ivf)
2340 {
2341         struct mlx4_en_priv *en_priv = netdev_priv(dev);
2342         struct mlx4_en_dev *mdev = en_priv->mdev;
2343
2344         return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2345 }
2346
2347 static int mlx4_en_set_vf_link_state(struct ether *dev, int vf,
2348                                      int link_state)
2349 {
2350         struct mlx4_en_priv *en_priv = netdev_priv(dev);
2351         struct mlx4_en_dev *mdev = en_priv->mdev;
2352
2353         return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2354 }
2355
2356 #define PORT_ID_BYTE_LEN 8
2357 static int mlx4_en_get_phys_port_id(struct ether *dev,
2358                                     struct netdev_phys_item_id *ppid)
2359 {
2360         struct mlx4_en_priv *priv = netdev_priv(dev);
2361         struct mlx4_dev *mdev = priv->mdev->dev;
2362         int i;
2363         uint64_t phys_port_id = mdev->caps.phys_port_id[priv->port];
2364
2365         if (!phys_port_id)
2366                 return -EOPNOTSUPP;
2367
2368         ppid->id_len = sizeof(phys_port_id);
2369         for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2370                 ppid->id[i] =  phys_port_id & 0xff;
2371                 phys_port_id >>= 8;
2372         }
2373         return 0;
2374 }
2375
2376 #ifdef CONFIG_MLX4_EN_VXLAN
2377 static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2378 {
2379         int ret;
2380         struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2381                                                  vxlan_add_task);
2382
2383         ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2384         if (ret)
2385                 goto out;
2386
2387         ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2388                                   VXLAN_STEER_BY_OUTER_MAC, 1);
2389 out:
2390         if (ret) {
2391                 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2392                 return;
2393         }
2394
2395         /* set offloads */
2396         priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2397                                       NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2398         priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2399         priv->dev->feat    |= NETIF_F_GSO_UDP_TUNNEL;
2400 }
2401
2402 static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2403 {
2404         int ret;
2405         struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2406                                                  vxlan_del_task);
2407         /* unset offloads */
2408         priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2409                                       NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
2410         priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
2411         priv->dev->feat    &= ~NETIF_F_GSO_UDP_TUNNEL;
2412
2413         ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2414                                   VXLAN_STEER_BY_OUTER_MAC, 0);
2415         if (ret)
2416                 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2417
2418         priv->vxlan_port = 0;
2419 }
2420
2421 static void mlx4_en_add_vxlan_port(struct  ether *dev,
2422                                    sa_family_t sa_family, __be16 port)
2423 {
2424         struct mlx4_en_priv *priv = netdev_priv(dev);
2425         __be16 current_port;
2426
2427         if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2428                 return;
2429
2430         if (sa_family == AF_INET6)
2431                 return;
2432
2433         current_port = priv->vxlan_port;
2434         if (current_port && current_port != port) {
2435                 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2436                         be16_to_cpu(current_port), be16_to_cpu(port));
2437                 return;
2438         }
2439
2440         priv->vxlan_port = port;
2441         queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2442 }
2443
2444 static void mlx4_en_del_vxlan_port(struct  ether *dev,
2445                                    sa_family_t sa_family, __be16 port)
2446 {
2447         struct mlx4_en_priv *priv = netdev_priv(dev);
2448         __be16 current_port;
2449
2450         if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2451                 return;
2452
2453         if (sa_family == AF_INET6)
2454                 return;
2455
2456         current_port = priv->vxlan_port;
2457         if (current_port != port) {
2458                 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n",
2459                        be16_to_cpu(port));
2460                 return;
2461         }
2462
2463         queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2464 }
2465
2466 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2467                                                 struct ether *dev,
2468                                                 netdev_features_t features)
2469 {
2470         features = vlan_features_check(skb, features);
2471         return vxlan_features_check(skb, features);
2472 }
2473 #endif
2474
2475 static int mlx4_en_set_tx_maxrate(struct ether *dev, int queue_index,
2476                                   uint32_t maxrate)
2477 {
2478         struct mlx4_en_priv *priv = netdev_priv(dev);
2479         struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index];
2480         struct mlx4_update_qp_params params;
2481         int err;
2482
2483         if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2484                 return -EOPNOTSUPP;
2485
2486         /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
2487         if (maxrate >> 12) {
2488                 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2489                 params.rate_val  = maxrate / 1000;
2490         } else if (maxrate) {
2491                 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2492                 params.rate_val  = maxrate;
2493         } else { /* zero serves to revoke the QP rate-limitation */
2494                 params.rate_unit = 0;
2495                 params.rate_val  = 0;
2496         }
2497
2498         err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2499                              &params);
2500         return err;
2501 }
2502
2503 static const struct net_device_ops mlx4_netdev_ops = {
2504         .ndo_open               = mlx4_en_open,
2505         .ndo_stop               = mlx4_en_close,
2506         .ndo_start_xmit         = mlx4_en_xmit,
2507         .ndo_select_queue       = mlx4_en_select_queue,
2508         .ndo_get_stats          = mlx4_en_get_stats,
2509         .ndo_set_rx_mode        = mlx4_en_set_rx_mode,
2510         .ndo_set_mac_address    = mlx4_en_set_mac,
2511         .ndo_validate_addr      = eth_validate_addr,
2512         .ndo_change_mtu         = mlx4_en_change_mtu,
2513         .ndo_do_ioctl           = mlx4_en_ioctl,
2514         .ndo_tx_timeout         = mlx4_en_tx_timeout,
2515         .ndo_vlan_rx_add_vid    = mlx4_en_vlan_rx_add_vid,
2516         .ndo_vlan_rx_kill_vid   = mlx4_en_vlan_rx_kill_vid,
2517 #ifdef CONFIG_NET_POLL_CONTROLLER
2518         .ndo_poll_controller    = mlx4_en_netpoll,
2519 #endif
2520         .ndo_set_features       = mlx4_en_set_features,
2521         .ndo_setup_tc           = mlx4_en_setup_tc,
2522 #ifdef CONFIG_RFS_ACCEL
2523         .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
2524 #endif
2525 #ifdef CONFIG_NET_RX_BUSY_POLL
2526         .ndo_busy_poll          = mlx4_en_low_latency_recv,
2527 #endif
2528         .ndo_get_phys_port_id   = mlx4_en_get_phys_port_id,
2529 #ifdef CONFIG_MLX4_EN_VXLAN
2530         .ndo_add_vxlan_port     = mlx4_en_add_vxlan_port,
2531         .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
2532         .ndo_features_check     = mlx4_en_features_check,
2533 #endif
2534         .ndo_set_tx_maxrate     = mlx4_en_set_tx_maxrate,
2535 };
2536
2537 static const struct net_device_ops mlx4_netdev_ops_master = {
2538         .ndo_open               = mlx4_en_open,
2539         .ndo_stop               = mlx4_en_close,
2540         .ndo_start_xmit         = mlx4_en_xmit,
2541         .ndo_select_queue       = mlx4_en_select_queue,
2542         .ndo_get_stats          = mlx4_en_get_stats,
2543         .ndo_set_rx_mode        = mlx4_en_set_rx_mode,
2544         .ndo_set_mac_address    = mlx4_en_set_mac,
2545         .ndo_validate_addr      = eth_validate_addr,
2546         .ndo_change_mtu         = mlx4_en_change_mtu,
2547         .ndo_tx_timeout         = mlx4_en_tx_timeout,
2548         .ndo_vlan_rx_add_vid    = mlx4_en_vlan_rx_add_vid,
2549         .ndo_vlan_rx_kill_vid   = mlx4_en_vlan_rx_kill_vid,
2550         .ndo_set_vf_mac         = mlx4_en_set_vf_mac,
2551         .ndo_set_vf_vlan        = mlx4_en_set_vf_vlan,
2552         .ndo_set_vf_rate        = mlx4_en_set_vf_rate,
2553         .ndo_set_vf_spoofchk    = mlx4_en_set_vf_spoofchk,
2554         .ndo_set_vf_link_state  = mlx4_en_set_vf_link_state,
2555         .ndo_get_vf_config      = mlx4_en_get_vf_config,
2556 #ifdef CONFIG_NET_POLL_CONTROLLER
2557         .ndo_poll_controller    = mlx4_en_netpoll,
2558 #endif
2559         .ndo_set_features       = mlx4_en_set_features,
2560         .ndo_setup_tc           = mlx4_en_setup_tc,
2561 #ifdef CONFIG_RFS_ACCEL
2562         .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
2563 #endif
2564         .ndo_get_phys_port_id   = mlx4_en_get_phys_port_id,
2565 #ifdef CONFIG_MLX4_EN_VXLAN
2566         .ndo_add_vxlan_port     = mlx4_en_add_vxlan_port,
2567         .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
2568         .ndo_features_check     = mlx4_en_features_check,
2569 #endif
2570         .ndo_set_tx_maxrate     = mlx4_en_set_tx_maxrate,
2571 };
2572 #endif
2573
2574 struct mlx4_en_bond {
2575         struct work_struct work;
2576         struct mlx4_en_priv *priv;
2577         int is_bonded;
2578         struct mlx4_port_map port_map;
2579 };
2580
2581 static void mlx4_en_bond_work(struct work_struct *work)
2582 {
2583         panic("Disabled");
2584 #if 0 // AKAROS_PORT
2585         struct mlx4_en_bond *bond = container_of(work,
2586                                                      struct mlx4_en_bond,
2587                                                      work);
2588         int err = 0;
2589         struct mlx4_dev *dev = bond->priv->mdev->dev;
2590
2591         if (bond->is_bonded) {
2592                 if (!mlx4_is_bonded(dev)) {
2593                         err = mlx4_bond(dev);
2594                         if (err)
2595                                 en_err(bond->priv, "Fail to bond device\n");
2596                 }
2597                 if (!err) {
2598                         err = mlx4_port_map_set(dev, &bond->port_map);
2599                         if (err)
2600                                 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2601                                        bond->port_map.port1,
2602                                        bond->port_map.port2,
2603                                        err);
2604                 }
2605         } else if (mlx4_is_bonded(dev)) {
2606                 err = mlx4_unbond(dev);
2607                 if (err)
2608                         en_err(bond->priv, "Fail to unbond device\n");
2609         }
2610         dev_put(bond->priv->dev);
2611         kfree(bond);
2612 #endif
2613 }
2614
2615 static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2616                                    uint8_t v2p_p1, uint8_t v2p_p2)
2617 {
2618         panic("Disabled");
2619 #if 0 // AKAROS_PORT
2620         struct mlx4_en_bond *bond = NULL;
2621
2622         bond = kzmalloc(sizeof(*bond), 0);
2623         if (!bond)
2624                 return -ENOMEM;
2625
2626         INIT_WORK(&bond->work, mlx4_en_bond_work);
2627         bond->priv = priv;
2628         bond->is_bonded = is_bonded;
2629         bond->port_map.port1 = v2p_p1;
2630         bond->port_map.port2 = v2p_p2;
2631         dev_hold(priv->dev);
2632         queue_work(priv->mdev->workqueue, &bond->work);
2633         return 0;
2634 #endif
2635 }
2636
2637 int mlx4_en_netdev_event(struct notifier_block *this,
2638                          unsigned long event, void *ptr)
2639 {
2640         panic("Disabled");
2641 #if 0 // AKAROS_PORT
2642         struct ether *ndev = netdev_notifier_info_to_dev(ptr);
2643         uint8_t port = 0;
2644         struct mlx4_en_dev *mdev;
2645         struct mlx4_dev *dev;
2646         int i, num_eth_ports = 0;
2647         bool do_bond = true;
2648         struct mlx4_en_priv *priv;
2649         uint8_t v2p_port1 = 0;
2650         uint8_t v2p_port2 = 0;
2651
2652         if (!net_eq(dev_net(ndev), &init_net))
2653                 return NOTIFY_DONE;
2654
2655         mdev = container_of(this, struct mlx4_en_dev, nb);
2656         dev = mdev->dev;
2657
2658         /* Go into this mode only when two network devices set on two ports
2659          * of the same mlx4 device are slaves of the same bonding master
2660          */
2661         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
2662                 ++num_eth_ports;
2663                 if (!port && (mdev->pndev[i] == ndev))
2664                         port = i;
2665                 mdev->upper[i] = mdev->pndev[i] ?
2666                         netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
2667                 /* condition not met: network device is a slave */
2668                 if (!mdev->upper[i])
2669                         do_bond = false;
2670                 if (num_eth_ports < 2)
2671                         continue;
2672                 /* condition not met: same master */
2673                 if (mdev->upper[i] != mdev->upper[i-1])
2674                         do_bond = false;
2675         }
2676         /* condition not met: 2 salves */
2677         do_bond = (num_eth_ports ==  2) ? do_bond : false;
2678
2679         /* handle only events that come with enough info */
2680         if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
2681                 return NOTIFY_DONE;
2682
2683         priv = netdev_priv(ndev);
2684         if (do_bond) {
2685                 struct netdev_notifier_bonding_info *notifier_info = ptr;
2686                 struct netdev_bonding_info *bonding_info =
2687                         &notifier_info->bonding_info;
2688
2689                 /* required mode 1, 2 or 4 */
2690                 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
2691                     (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
2692                     (bonding_info->master.bond_mode != BOND_MODE_8023AD))
2693                         do_bond = false;
2694
2695                 /* require exactly 2 slaves */
2696                 if (bonding_info->master.num_slaves != 2)
2697                         do_bond = false;
2698
2699                 /* calc v2p */
2700                 if (do_bond) {
2701                         if (bonding_info->master.bond_mode ==
2702                             BOND_MODE_ACTIVEBACKUP) {
2703                                 /* in active-backup mode virtual ports are
2704                                  * mapped to the physical port of the active
2705                                  * slave */
2706                                 if (bonding_info->slave.state ==
2707                                     BOND_STATE_BACKUP) {
2708                                         if (port == 1) {
2709                                                 v2p_port1 = 2;
2710                                                 v2p_port2 = 2;
2711                                         } else {
2712                                                 v2p_port1 = 1;
2713                                                 v2p_port2 = 1;
2714                                         }
2715                                 } else { /* BOND_STATE_ACTIVE */
2716                                         if (port == 1) {
2717                                                 v2p_port1 = 1;
2718                                                 v2p_port2 = 1;
2719                                         } else {
2720                                                 v2p_port1 = 2;
2721                                                 v2p_port2 = 2;
2722                                         }
2723                                 }
2724                         } else { /* Active-Active */
2725                                 /* in active-active mode a virtual port is
2726                                  * mapped to the native physical port if and only
2727                                  * if the physical port is up */
2728                                 __s8 link = bonding_info->slave.link;
2729
2730                                 if (port == 1)
2731                                         v2p_port2 = 2;
2732                                 else
2733                                         v2p_port1 = 1;
2734                                 if ((link == BOND_LINK_UP) ||
2735                                     (link == BOND_LINK_FAIL)) {
2736                                         if (port == 1)
2737                                                 v2p_port1 = 1;
2738                                         else
2739                                                 v2p_port2 = 2;
2740                                 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
2741                                         if (port == 1)
2742                                                 v2p_port1 = 2;
2743                                         else
2744                                                 v2p_port2 = 1;
2745                                 }
2746                         }
2747                 }
2748         }
2749
2750         mlx4_en_queue_bond_work(priv, do_bond,
2751                                 v2p_port1, v2p_port2);
2752 #endif
2753
2754         return NOTIFY_DONE;
2755 }
2756
2757 void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
2758                                      struct mlx4_en_stats_bitmap *stats_bitmap,
2759                                      uint8_t rx_ppp, uint8_t rx_pause,
2760                                      uint8_t tx_ppp, uint8_t tx_pause)
2761 {
2762         int last_i = NUM_MAIN_STATS + NUM_PORT_STATS;
2763
2764         if (!mlx4_is_slave(dev) &&
2765             (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
2766                 qlock(&stats_bitmap->mutex);
2767                 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
2768
2769                 if (rx_ppp)
2770                         bitmap_set(stats_bitmap->bitmap, last_i,
2771                                    NUM_FLOW_PRIORITY_STATS_RX);
2772                 last_i += NUM_FLOW_PRIORITY_STATS_RX;
2773
2774                 if (rx_pause && !(rx_ppp))
2775                         bitmap_set(stats_bitmap->bitmap, last_i,
2776                                    NUM_FLOW_STATS_RX);
2777                 last_i += NUM_FLOW_STATS_RX;
2778
2779                 if (tx_ppp)
2780                         bitmap_set(stats_bitmap->bitmap, last_i,
2781                                    NUM_FLOW_PRIORITY_STATS_TX);
2782                 last_i += NUM_FLOW_PRIORITY_STATS_TX;
2783
2784                 if (tx_pause && !(tx_ppp))
2785                         bitmap_set(stats_bitmap->bitmap, last_i,
2786                                    NUM_FLOW_STATS_TX);
2787                 last_i += NUM_FLOW_STATS_TX;
2788
2789                 qunlock(&stats_bitmap->mutex);
2790         }
2791 }
2792
2793 void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
2794                               struct mlx4_en_stats_bitmap *stats_bitmap,
2795                               uint8_t rx_ppp, uint8_t rx_pause,
2796                               uint8_t tx_ppp, uint8_t tx_pause)
2797 {
2798         panic("Disabled");
2799 #if 0 // AKAROS_PORT
2800         int last_i = 0;
2801
2802         qlock_init(&stats_bitmap->mutex);
2803         bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
2804
2805         if (mlx4_is_slave(dev)) {
2806                 bitmap_set(stats_bitmap->bitmap, last_i +
2807                                          MLX4_FIND_NETDEV_STAT(rx_packets), 1);
2808                 bitmap_set(stats_bitmap->bitmap, last_i +
2809                                          MLX4_FIND_NETDEV_STAT(tx_packets), 1);
2810                 bitmap_set(stats_bitmap->bitmap, last_i +
2811                                          MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
2812                 bitmap_set(stats_bitmap->bitmap, last_i +
2813                                          MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
2814                 bitmap_set(stats_bitmap->bitmap, last_i +
2815                                          MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
2816                 bitmap_set(stats_bitmap->bitmap, last_i +
2817                                          MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
2818         } else {
2819                 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
2820         }
2821         last_i += NUM_MAIN_STATS;
2822
2823         bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
2824         last_i += NUM_PORT_STATS;
2825
2826         mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
2827                                         rx_ppp, rx_pause,
2828                                         tx_ppp, tx_pause);
2829         last_i += NUM_FLOW_STATS;
2830
2831         if (!mlx4_is_slave(dev))
2832                 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
2833 #endif
2834 }
2835
2836 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2837                         struct ether *dev,
2838                         struct mlx4_en_port_profile *prof)
2839 {
2840         struct mlx4_en_priv *priv;
2841         int i;
2842         int err;
2843         uint64_t mac_u64;
2844
2845 #if 0 // AKAROS_PORT
2846         dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
2847                                  MAX_TX_RINGS, MAX_RX_RINGS);
2848         if (dev == NULL)
2849                 return -ENOMEM;
2850
2851         netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
2852         netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
2853
2854         SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
2855 #else
2856         dev->ctlr = kzmalloc(sizeof(struct mlx4_en_priv), MEM_WAIT);
2857 #endif
2858
2859         /*
2860          * Initialize driver private data
2861          */
2862
2863         priv = netdev_priv(dev);
2864         memset(priv, 0, sizeof(struct mlx4_en_priv));
2865         spinlock_init(&priv->stats_lock);
2866         INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2867         INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2868         INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2869         INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2870         INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2871 #ifdef CONFIG_MLX4_EN_VXLAN
2872         INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
2873         INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
2874 #endif
2875 #ifdef CONFIG_RFS_ACCEL
2876         INIT_LIST_HEAD(&priv->filters);
2877         spinlock_init(&priv->filters_lock);
2878 #endif
2879
2880         priv->dev = dev;
2881         priv->mdev = mdev;
2882 #if 0 // AKAROS_PORT
2883         priv->ddev = &mdev->pdev->dev;
2884 #endif
2885         priv->prof = prof;
2886         priv->port = port;
2887         priv->port_up = false;
2888         priv->flags = prof->flags;
2889         priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
2890         priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
2891                         MLX4_WQE_CTRL_SOLICITED);
2892         priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2893         priv->tx_ring_num = prof->tx_ring_num;
2894         priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
2895         netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
2896
2897         priv->tx_ring = kzmalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2898                                  MEM_WAIT);
2899         if (!priv->tx_ring) {
2900                 err = -ENOMEM;
2901                 goto out;
2902         }
2903         priv->tx_cq = kzmalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
2904                                MEM_WAIT);
2905         if (!priv->tx_cq) {
2906                 err = -ENOMEM;
2907                 goto out;
2908         }
2909         priv->rx_ring_num = prof->rx_ring_num;
2910         priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
2911         priv->cqe_size = mdev->dev->caps.cqe_size;
2912         priv->mac_index = -1;
2913         priv->msg_enable = MLX4_EN_MSG_LEVEL;
2914 #ifdef CONFIG_MLX4_EN_DCB
2915         if (!mlx4_is_slave(priv->mdev->dev)) {
2916                 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
2917                         dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2918                 } else {
2919                         en_info(priv, "enabling only PFC DCB ops\n");
2920                         dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2921                 }
2922         }
2923 #endif
2924
2925         for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
2926                 INIT_HLIST_HEAD(&priv->mac_hash[i]);
2927
2928         /* Query for default mac and max mtu */
2929         priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
2930
2931         if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
2932             MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
2933                 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
2934
2935         /* Set default MAC */
2936         mlx4_en_u64_to_mac(dev->ea, mdev->dev->caps.def_mac[priv->port]);
2937         if (!is_valid_ether_addr(dev->ea)) {
2938                 if (mlx4_is_slave(priv->mdev->dev)) {
2939                         eth_hw_addr_random(dev);
2940                         en_warn(priv, "Assigned random MAC address %pM\n",
2941                                 dev->ea);
2942                         mac_u64 = mlx4_mac_to_u64(dev->ea);
2943                         mdev->dev->caps.def_mac[priv->port] = mac_u64;
2944                 } else {
2945                         en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
2946                                priv->port, dev->ea);
2947                         err = -EINVAL;
2948                         goto out;
2949                 }
2950         }
2951
2952         memcpy(priv->current_mac, dev->ea, sizeof(priv->current_mac));
2953
2954         priv->stride = ROUNDUPPWR2(sizeof(struct mlx4_en_rx_desc) + DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2955         err = mlx4_en_alloc_resources(priv);
2956         if (err)
2957                 goto out;
2958
2959         /* Initialize time stamping config */
2960         priv->hwtstamp_config.flags = 0;
2961         priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
2962         priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2963
2964         /* Allocate page for receive rings */
2965         err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2966                                 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
2967         if (err) {
2968                 en_err(priv, "Failed to allocate page for rx qps\n");
2969                 goto out;
2970         }
2971         priv->allocated = 1;
2972
2973 #if 0 // AKAROS_PORT
2974         /*
2975          * Initialize netdev entry points
2976          */
2977         if (mlx4_is_master(priv->mdev->dev))
2978                 {}
2979                 else
2980                         {}
2981         netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
2982         netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
2983 #endif
2984
2985         /*
2986          * Set driver features
2987          */
2988         dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2989         if (mdev->LSO_support)
2990                 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2991
2992         dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
2993 #if 0 // AKAROS_PORT
2994         dev->feat = dev->hw_features | NETIF_F_HIGHDMA |
2995                         NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2996                         NETIF_F_HW_VLAN_CTAG_FILTER;
2997 #else
2998         dev->feat = dev->hw_features | NETIF_F_SG | NETIF_F_IP_CSUM | NETF_PADMIN;
2999 #endif
3000         dev->hw_features |= NETIF_F_LOOPBACK |
3001                         NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
3002
3003         if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
3004                 dev->hw_features |= NETIF_F_RXFCS;
3005
3006         if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
3007                 dev->hw_features |= NETIF_F_RXALL;
3008
3009         if (mdev->dev->caps.steering_mode ==
3010             MLX4_STEERING_MODE_DEVICE_MANAGED &&
3011             mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
3012                 dev->hw_features |= NETIF_F_NTUPLE;
3013
3014 #if 0 // AKAROS_PORT
3015         if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
3016                 dev->priv_flags |= IFF_UNICAST_FLT;
3017 #endif
3018
3019         /* Setting a default hash function value */
3020         if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
3021                 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3022         } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
3023                 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
3024         } else {
3025                 en_warn(priv,
3026                         "No RSS hash capabilities exposed, using Toeplitz\n");
3027                 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3028         }
3029
3030         mdev->pndev[port] = dev;
3031         mdev->upper[port] = NULL;
3032
3033         netif_carrier_off(dev);
3034         mlx4_en_set_default_moderation(priv);
3035
3036         en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
3037         en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
3038
3039         mlx4_en_update_loopback_state(priv->dev, priv->dev->feat);
3040
3041         /* Configure port */
3042         mlx4_en_calc_rx_buf(dev);
3043         err = mlx4_SET_PORT_general(mdev->dev, priv->port,
3044                                     priv->rx_skb_size + ETH_FCS_LEN,
3045                                     prof->tx_pause, prof->tx_ppp,
3046                                     prof->rx_pause, prof->rx_ppp);
3047         if (err) {
3048                 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
3049                        priv->port, err);
3050                 goto out;
3051         }
3052
3053         if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3054                 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
3055                 if (err) {
3056                         en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
3057                                err);
3058                         goto out;
3059                 }
3060         }
3061
3062         /* Init port */
3063         en_warn(priv, "Initializing port\n");
3064         err = mlx4_INIT_PORT(mdev->dev, priv->port);
3065         if (err) {
3066                 en_err(priv, "Failed Initializing port\n");
3067                 goto out;
3068         }
3069 #if 0 // AKAROS_PORT
3070         queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
3071 #endif
3072
3073         if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
3074                 queue_delayed_work(mdev->workqueue, &priv->service_task,
3075                                    SERVICE_TASK_DELAY);
3076
3077 #if 0 // AKAROS_PORT
3078         mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3079                                  mdev->profile.prof[priv->port].rx_ppp,
3080                                  mdev->profile.prof[priv->port].rx_pause,
3081                                  mdev->profile.prof[priv->port].tx_ppp,
3082                                  mdev->profile.prof[priv->port].tx_pause);
3083
3084         err = register_netdev(dev);
3085         if (err) {
3086                 en_err(priv, "Netdev registration failed for port %d\n", port);
3087                 goto out;
3088         }
3089 #endif
3090
3091         priv->registered = 1;
3092
3093         return 0;
3094
3095 out:
3096         mlx4_en_destroy_netdev(dev);
3097         return err;
3098 }
3099
3100 int mlx4_en_reset_config(struct ether *dev,
3101                          struct hwtstamp_config ts_config,
3102                          netdev_features_t features)
3103 {
3104         struct mlx4_en_priv *priv = netdev_priv(dev);
3105         struct mlx4_en_dev *mdev = priv->mdev;
3106         int port_up = 0;
3107         int err = 0;
3108
3109         if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3110             priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
3111             !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3112             !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
3113                 return 0; /* Nothing to change */
3114
3115         if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3116             (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3117             (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3118                 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3119                 return -EINVAL;
3120         }
3121
3122         qlock(&mdev->state_lock);
3123         if (priv->port_up) {
3124                 port_up = 1;
3125                 mlx4_en_stop_port(dev, 1);
3126         }
3127
3128         mlx4_en_free_resources(priv);
3129
3130         en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
3131                 ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
3132
3133         priv->hwtstamp_config.tx_type = ts_config.tx_type;
3134         priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
3135
3136         if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3137                 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3138                         dev->feat |= NETIF_F_HW_VLAN_CTAG_RX;
3139                 else
3140                         dev->feat &= ~NETIF_F_HW_VLAN_CTAG_RX;
3141         } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3142                 /* RX time-stamping is OFF, update the RX vlan offload
3143                  * to the latest wanted state
3144                  */
3145 #if 0 // AKAROS_PORT
3146                 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3147                         dev->feat |= NETIF_F_HW_VLAN_CTAG_RX;
3148                 else
3149 #endif
3150                         dev->feat &= ~NETIF_F_HW_VLAN_CTAG_RX;
3151         }
3152
3153         if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3154                 if (features & NETIF_F_RXFCS)
3155                         dev->feat |= NETIF_F_RXFCS;
3156                 else
3157                         dev->feat &= ~NETIF_F_RXFCS;
3158         }
3159
3160         /* RX vlan offload and RX time-stamping can't co-exist !
3161          * Regardless of the caller's choice,
3162          * Turn Off RX vlan offload in case of time-stamping is ON
3163          */
3164         if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3165                 if (dev->feat & NETIF_F_HW_VLAN_CTAG_RX)
3166                         en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3167                 dev->feat &= ~NETIF_F_HW_VLAN_CTAG_RX;
3168         }
3169
3170         err = mlx4_en_alloc_resources(priv);
3171         if (err) {
3172                 en_err(priv, "Failed reallocating port resources\n");
3173                 goto out;
3174         }
3175         if (port_up) {
3176                 err = mlx4_en_start_port(dev);
3177                 if (err)
3178                         en_err(priv, "Failed starting port\n");
3179         }
3180
3181 out:
3182         qunlock(&mdev->state_lock);
3183 #if 0 // AKAROS_PORT
3184         netdev_features_change(dev);
3185 #endif
3186         return err;
3187 }