2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/tcp.h>
34 #include <linux/udp.h>
36 #include <linux/list.h>
37 #include <linux/slab.h>
38 #include <linux/if_ether.h>
39 #include <linux/notifier.h>
40 #include <linux/reboot.h>
41 #include <linux/memory.h>
42 #include <asm/kexec.h>
43 #include <linux/mutex.h>
44 #include <linux/prefetch.h>
50 #include "ehea_phyp.h"
53 MODULE_LICENSE("GPL");
54 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
55 MODULE_DESCRIPTION("IBM eServer HEA Driver");
56 MODULE_VERSION(DRV_VERSION);
59 static int msg_level = -1;
60 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
61 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
62 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
63 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
64 static int use_mcs = 1;
66 static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
67 static int prop_carrier_state;
69 module_param(msg_level, int, 0);
70 module_param(rq1_entries, int, 0);
71 module_param(rq2_entries, int, 0);
72 module_param(rq3_entries, int, 0);
73 module_param(sq_entries, int, 0);
74 module_param(prop_carrier_state, int, 0);
75 module_param(use_mcs, int, 0);
76 module_param(use_lro, int, 0);
77 module_param(lro_max_aggr, int, 0);
79 MODULE_PARM_DESC(msg_level, "msg_level");
80 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
81 "port to stack. 1:yes, 0:no. Default = 0 ");
82 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
83 "[2^x - 1], x = [6..14]. Default = "
84 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
85 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
86 "[2^x - 1], x = [6..14]. Default = "
87 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
88 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
89 "[2^x - 1], x = [6..14]. Default = "
90 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
91 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
92 "[2^x - 1], x = [6..14]. Default = "
93 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
94 MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
97 MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
98 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
99 MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
102 static int port_name_cnt;
103 static LIST_HEAD(adapter_list);
104 static unsigned long ehea_driver_flags;
105 static DEFINE_MUTEX(dlpar_mem_lock);
106 struct ehea_fw_handle_array ehea_fw_handles;
107 struct ehea_bcmc_reg_array ehea_bcmc_regs;
110 static int __devinit ehea_probe_adapter(struct platform_device *dev,
111 const struct of_device_id *id);
113 static int __devexit ehea_remove(struct platform_device *dev);
115 static struct of_device_id ehea_device_table[] = {
118 .compatible = "IBM,lhea",
122 MODULE_DEVICE_TABLE(of, ehea_device_table);
124 static struct of_platform_driver ehea_driver = {
127 .owner = THIS_MODULE,
128 .of_match_table = ehea_device_table,
130 .probe = ehea_probe_adapter,
131 .remove = ehea_remove,
134 void ehea_dump(void *adr, int len, char *msg)
137 unsigned char *deb = adr;
138 for (x = 0; x < len; x += 16) {
139 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
140 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
145 void ehea_schedule_port_reset(struct ehea_port *port)
147 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
148 schedule_work(&port->reset_task);
151 static void ehea_update_firmware_handles(void)
153 struct ehea_fw_handle_entry *arr = NULL;
154 struct ehea_adapter *adapter;
155 int num_adapters = 0;
159 int num_fw_handles, k, l;
161 /* Determine number of handles */
162 mutex_lock(&ehea_fw_handles.lock);
164 list_for_each_entry(adapter, &adapter_list, list) {
167 for (k = 0; k < EHEA_MAX_PORTS; k++) {
168 struct ehea_port *port = adapter->port[k];
170 if (!port || (port->state != EHEA_PORT_UP))
174 num_portres += port->num_def_qps;
178 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
179 num_ports * EHEA_NUM_PORT_FW_HANDLES +
180 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
182 if (num_fw_handles) {
183 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
185 goto out; /* Keep the existing array */
189 list_for_each_entry(adapter, &adapter_list, list) {
190 if (num_adapters == 0)
193 for (k = 0; k < EHEA_MAX_PORTS; k++) {
194 struct ehea_port *port = adapter->port[k];
196 if (!port || (port->state != EHEA_PORT_UP) ||
200 for (l = 0; l < port->num_def_qps; l++) {
201 struct ehea_port_res *pr = &port->port_res[l];
203 arr[i].adh = adapter->handle;
204 arr[i++].fwh = pr->qp->fw_handle;
205 arr[i].adh = adapter->handle;
206 arr[i++].fwh = pr->send_cq->fw_handle;
207 arr[i].adh = adapter->handle;
208 arr[i++].fwh = pr->recv_cq->fw_handle;
209 arr[i].adh = adapter->handle;
210 arr[i++].fwh = pr->eq->fw_handle;
211 arr[i].adh = adapter->handle;
212 arr[i++].fwh = pr->send_mr.handle;
213 arr[i].adh = adapter->handle;
214 arr[i++].fwh = pr->recv_mr.handle;
216 arr[i].adh = adapter->handle;
217 arr[i++].fwh = port->qp_eq->fw_handle;
221 arr[i].adh = adapter->handle;
222 arr[i++].fwh = adapter->neq->fw_handle;
224 if (adapter->mr.handle) {
225 arr[i].adh = adapter->handle;
226 arr[i++].fwh = adapter->mr.handle;
232 kfree(ehea_fw_handles.arr);
233 ehea_fw_handles.arr = arr;
234 ehea_fw_handles.num_entries = i;
236 mutex_unlock(&ehea_fw_handles.lock);
239 static void ehea_update_bcmc_registrations(void)
242 struct ehea_bcmc_reg_entry *arr = NULL;
243 struct ehea_adapter *adapter;
244 struct ehea_mc_list *mc_entry;
245 int num_registrations = 0;
249 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
251 /* Determine number of registrations */
252 list_for_each_entry(adapter, &adapter_list, list)
253 for (k = 0; k < EHEA_MAX_PORTS; k++) {
254 struct ehea_port *port = adapter->port[k];
256 if (!port || (port->state != EHEA_PORT_UP))
259 num_registrations += 2; /* Broadcast registrations */
261 list_for_each_entry(mc_entry, &port->mc_list->list,list)
262 num_registrations += 2;
265 if (num_registrations) {
266 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
268 goto out; /* Keep the existing array */
272 list_for_each_entry(adapter, &adapter_list, list) {
273 for (k = 0; k < EHEA_MAX_PORTS; k++) {
274 struct ehea_port *port = adapter->port[k];
276 if (!port || (port->state != EHEA_PORT_UP))
279 if (num_registrations == 0)
282 arr[i].adh = adapter->handle;
283 arr[i].port_id = port->logical_port_id;
284 arr[i].reg_type = EHEA_BCMC_BROADCAST |
286 arr[i++].macaddr = port->mac_addr;
288 arr[i].adh = adapter->handle;
289 arr[i].port_id = port->logical_port_id;
290 arr[i].reg_type = EHEA_BCMC_BROADCAST |
291 EHEA_BCMC_VLANID_ALL;
292 arr[i++].macaddr = port->mac_addr;
293 num_registrations -= 2;
295 list_for_each_entry(mc_entry,
296 &port->mc_list->list, list) {
297 if (num_registrations == 0)
300 arr[i].adh = adapter->handle;
301 arr[i].port_id = port->logical_port_id;
302 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
303 EHEA_BCMC_MULTICAST |
305 arr[i++].macaddr = mc_entry->macaddr;
307 arr[i].adh = adapter->handle;
308 arr[i].port_id = port->logical_port_id;
309 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
310 EHEA_BCMC_MULTICAST |
311 EHEA_BCMC_VLANID_ALL;
312 arr[i++].macaddr = mc_entry->macaddr;
313 num_registrations -= 2;
319 kfree(ehea_bcmc_regs.arr);
320 ehea_bcmc_regs.arr = arr;
321 ehea_bcmc_regs.num_entries = i;
323 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
326 static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
327 struct rtnl_link_stats64 *stats)
329 struct ehea_port *port = netdev_priv(dev);
330 u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
333 for (i = 0; i < port->num_def_qps; i++) {
334 rx_packets += port->port_res[i].rx_packets;
335 rx_bytes += port->port_res[i].rx_bytes;
338 for (i = 0; i < port->num_def_qps; i++) {
339 tx_packets += port->port_res[i].tx_packets;
340 tx_bytes += port->port_res[i].tx_bytes;
343 stats->tx_packets = tx_packets;
344 stats->rx_bytes = rx_bytes;
345 stats->tx_bytes = tx_bytes;
346 stats->rx_packets = rx_packets;
351 static void ehea_update_stats(struct work_struct *work)
353 struct ehea_port *port =
354 container_of(work, struct ehea_port, stats_work.work);
355 struct net_device *dev = port->netdev;
356 struct rtnl_link_stats64 *stats = &port->stats;
357 struct hcp_ehea_port_cb2 *cb2;
360 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
362 netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
366 hret = ehea_h_query_ehea_port(port->adapter->handle,
367 port->logical_port_id,
368 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
369 if (hret != H_SUCCESS) {
370 netdev_err(dev, "query_ehea_port failed\n");
374 if (netif_msg_hw(port))
375 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
377 stats->multicast = cb2->rxmcp;
378 stats->rx_errors = cb2->rxuerr;
381 free_page((unsigned long)cb2);
383 schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
386 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
388 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
389 struct net_device *dev = pr->port->netdev;
390 int max_index_mask = pr->rq1_skba.len - 1;
391 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
395 pr->rq1_skba.os_skbs = 0;
397 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
399 pr->rq1_skba.index = index;
400 pr->rq1_skba.os_skbs = fill_wqes;
404 for (i = 0; i < fill_wqes; i++) {
405 if (!skb_arr_rq1[index]) {
406 skb_arr_rq1[index] = netdev_alloc_skb(dev,
408 if (!skb_arr_rq1[index]) {
409 netdev_info(dev, "Unable to allocate enough skb in the array\n");
410 pr->rq1_skba.os_skbs = fill_wqes - i;
415 index &= max_index_mask;
423 ehea_update_rq1a(pr->qp, adder);
426 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
428 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
429 struct net_device *dev = pr->port->netdev;
432 if (nr_rq1a > pr->rq1_skba.len) {
433 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
437 for (i = 0; i < nr_rq1a; i++) {
438 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
439 if (!skb_arr_rq1[i]) {
440 netdev_info(dev, "Not enough memory to allocate skb array\n");
445 ehea_update_rq1a(pr->qp, i - 1);
448 static int ehea_refill_rq_def(struct ehea_port_res *pr,
449 struct ehea_q_skb_arr *q_skba, int rq_nr,
450 int num_wqes, int wqe_type, int packet_size)
452 struct net_device *dev = pr->port->netdev;
453 struct ehea_qp *qp = pr->qp;
454 struct sk_buff **skb_arr = q_skba->arr;
455 struct ehea_rwqe *rwqe;
456 int i, index, max_index_mask, fill_wqes;
460 fill_wqes = q_skba->os_skbs + num_wqes;
463 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
464 q_skba->os_skbs = fill_wqes;
468 index = q_skba->index;
469 max_index_mask = q_skba->len - 1;
470 for (i = 0; i < fill_wqes; i++) {
474 skb = netdev_alloc_skb_ip_align(dev, packet_size);
476 q_skba->os_skbs = fill_wqes - i;
477 if (q_skba->os_skbs == q_skba->len - 2) {
478 netdev_info(pr->port->netdev,
479 "rq%i ran dry - no mem for skb\n",
486 skb_arr[index] = skb;
487 tmp_addr = ehea_map_vaddr(skb->data);
488 if (tmp_addr == -1) {
490 q_skba->os_skbs = fill_wqes - i;
495 rwqe = ehea_get_next_rwqe(qp, rq_nr);
496 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
497 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
498 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
499 rwqe->sg_list[0].vaddr = tmp_addr;
500 rwqe->sg_list[0].len = packet_size;
501 rwqe->data_segments = 1;
504 index &= max_index_mask;
508 q_skba->index = index;
515 ehea_update_rq2a(pr->qp, adder);
517 ehea_update_rq3a(pr->qp, adder);
523 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
525 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
526 nr_of_wqes, EHEA_RWQE2_TYPE,
531 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
533 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
534 nr_of_wqes, EHEA_RWQE3_TYPE,
535 EHEA_MAX_PACKET_SIZE);
538 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
540 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
541 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
543 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
544 (cqe->header_length == 0))
549 static inline void ehea_fill_skb(struct net_device *dev,
550 struct sk_buff *skb, struct ehea_cqe *cqe,
551 struct ehea_port_res *pr)
553 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
555 skb_put(skb, length);
556 skb->protocol = eth_type_trans(skb, dev);
558 /* The packet was not an IPV4 packet so a complemented checksum was
559 calculated. The value is found in the Internet Checksum field. */
560 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
561 skb->ip_summed = CHECKSUM_COMPLETE;
562 skb->csum = csum_unfold(~cqe->inet_checksum_value);
564 skb->ip_summed = CHECKSUM_UNNECESSARY;
566 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
569 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
571 struct ehea_cqe *cqe)
573 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
584 prefetchw(pref + EHEA_CACHE_LINE);
586 pref = (skb_array[x]->data);
588 prefetch(pref + EHEA_CACHE_LINE);
589 prefetch(pref + EHEA_CACHE_LINE * 2);
590 prefetch(pref + EHEA_CACHE_LINE * 3);
593 skb = skb_array[skb_index];
594 skb_array[skb_index] = NULL;
598 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
599 int arr_len, int wqe_index)
611 prefetchw(pref + EHEA_CACHE_LINE);
613 pref = (skb_array[x]->data);
615 prefetchw(pref + EHEA_CACHE_LINE);
618 skb = skb_array[wqe_index];
619 skb_array[wqe_index] = NULL;
623 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
624 struct ehea_cqe *cqe, int *processed_rq2,
629 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
630 pr->p_stats.err_tcp_cksum++;
631 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
632 pr->p_stats.err_ip_cksum++;
633 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
634 pr->p_stats.err_frame_crc++;
638 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
640 } else if (rq == 3) {
642 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
646 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
647 if (netif_msg_rx_err(pr->port)) {
648 pr_err("Critical receive error for QP %d. Resetting port.\n",
649 pr->qp->init_attr.qp_nr);
650 ehea_dump(cqe, sizeof(*cqe), "CQE");
652 ehea_schedule_port_reset(pr->port);
659 static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
660 void **tcph, u64 *hdr_flags, void *priv)
662 struct ehea_cqe *cqe = priv;
666 /* non tcp/udp packets */
667 if (!cqe->header_length)
671 skb_reset_network_header(skb);
673 if (iph->protocol != IPPROTO_TCP)
676 ip_len = ip_hdrlen(skb);
677 skb_set_transport_header(skb, ip_len);
678 *tcph = tcp_hdr(skb);
680 /* check if ip header and tcp header are complete */
681 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
684 *hdr_flags = LRO_IPV4 | LRO_TCP;
690 static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
693 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
694 __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
696 if (skb->dev->features & NETIF_F_LRO)
697 lro_receive_skb(&pr->lro_mgr, skb, cqe);
699 netif_receive_skb(skb);
702 static int ehea_proc_rwqes(struct net_device *dev,
703 struct ehea_port_res *pr,
706 struct ehea_port *port = pr->port;
707 struct ehea_qp *qp = pr->qp;
708 struct ehea_cqe *cqe;
710 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
711 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
712 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
713 int skb_arr_rq1_len = pr->rq1_skba.len;
714 int skb_arr_rq2_len = pr->rq2_skba.len;
715 int skb_arr_rq3_len = pr->rq3_skba.len;
716 int processed, processed_rq1, processed_rq2, processed_rq3;
717 u64 processed_bytes = 0;
718 int wqe_index, last_wqe_index, rq, port_reset;
720 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
723 cqe = ehea_poll_rq1(qp, &wqe_index);
724 while ((processed < budget) && cqe) {
728 if (netif_msg_rx_status(port))
729 ehea_dump(cqe, sizeof(*cqe), "CQE");
731 last_wqe_index = wqe_index;
733 if (!ehea_check_cqe(cqe, &rq)) {
736 skb = get_skb_by_index_ll(skb_arr_rq1,
739 if (unlikely(!skb)) {
740 netif_info(port, rx_err, dev,
741 "LL rq1: skb=NULL\n");
743 skb = netdev_alloc_skb(dev,
746 netdev_err(dev, "Not enough memory to allocate skb\n");
750 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
751 cqe->num_bytes_transfered - 4);
752 ehea_fill_skb(dev, skb, cqe, pr);
753 } else if (rq == 2) {
755 skb = get_skb_by_index(skb_arr_rq2,
756 skb_arr_rq2_len, cqe);
757 if (unlikely(!skb)) {
758 netif_err(port, rx_err, dev,
762 ehea_fill_skb(dev, skb, cqe, pr);
766 skb = get_skb_by_index(skb_arr_rq3,
767 skb_arr_rq3_len, cqe);
768 if (unlikely(!skb)) {
769 netif_err(port, rx_err, dev,
773 ehea_fill_skb(dev, skb, cqe, pr);
777 processed_bytes += skb->len;
778 ehea_proc_skb(pr, cqe, skb);
780 pr->p_stats.poll_receive_errors++;
781 port_reset = ehea_treat_poll_error(pr, rq, cqe,
787 cqe = ehea_poll_rq1(qp, &wqe_index);
789 if (dev->features & NETIF_F_LRO)
790 lro_flush_all(&pr->lro_mgr);
792 pr->rx_packets += processed;
793 pr->rx_bytes += processed_bytes;
795 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
796 ehea_refill_rq2(pr, processed_rq2);
797 ehea_refill_rq3(pr, processed_rq3);
802 #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
804 static void reset_sq_restart_flag(struct ehea_port *port)
808 for (i = 0; i < port->num_def_qps; i++) {
809 struct ehea_port_res *pr = &port->port_res[i];
810 pr->sq_restart_flag = 0;
812 wake_up(&port->restart_wq);
815 static void check_sqs(struct ehea_port *port)
817 struct ehea_swqe *swqe;
821 for (i = 0; i < port->num_def_qps; i++) {
822 struct ehea_port_res *pr = &port->port_res[i];
825 swqe = ehea_get_swqe(pr->qp, &swqe_index);
826 memset(swqe, 0, SWQE_HEADER_SIZE);
827 atomic_dec(&pr->swqe_avail);
829 swqe->tx_control |= EHEA_SWQE_PURGE;
830 swqe->wr_id = SWQE_RESTART_CHECK;
831 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
832 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
833 swqe->immediate_data_length = 80;
835 ehea_post_swqe(pr->qp, swqe);
837 ret = wait_event_timeout(port->restart_wq,
838 pr->sq_restart_flag == 0,
839 msecs_to_jiffies(100));
842 pr_err("HW/SW queues out of sync\n");
843 ehea_schedule_port_reset(pr->port);
850 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
853 struct ehea_cq *send_cq = pr->send_cq;
854 struct ehea_cqe *cqe;
855 int quota = my_quota;
859 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
860 pr - &pr->port->port_res[0]);
862 cqe = ehea_poll_cq(send_cq);
863 while (cqe && (quota > 0)) {
864 ehea_inc_cq(send_cq);
869 if (cqe->wr_id == SWQE_RESTART_CHECK) {
870 pr->sq_restart_flag = 1;
875 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
876 pr_err("Bad send completion status=0x%04X\n",
879 if (netif_msg_tx_err(pr->port))
880 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
882 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
883 pr_err("Resetting port\n");
884 ehea_schedule_port_reset(pr->port);
889 if (netif_msg_tx_done(pr->port))
890 ehea_dump(cqe, sizeof(*cqe), "CQE");
892 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
893 == EHEA_SWQE2_TYPE)) {
895 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
896 skb = pr->sq_skba.arr[index];
898 pr->sq_skba.arr[index] = NULL;
901 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
904 cqe = ehea_poll_cq(send_cq);
907 ehea_update_feca(send_cq, cqe_counter);
908 atomic_add(swqe_av, &pr->swqe_avail);
910 if (unlikely(netif_tx_queue_stopped(txq) &&
911 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
912 __netif_tx_lock(txq, smp_processor_id());
913 if (netif_tx_queue_stopped(txq) &&
914 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
915 netif_tx_wake_queue(txq);
916 __netif_tx_unlock(txq);
919 wake_up(&pr->port->swqe_avail_wq);
924 #define EHEA_POLL_MAX_CQES 65535
926 static int ehea_poll(struct napi_struct *napi, int budget)
928 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
930 struct net_device *dev = pr->port->netdev;
931 struct ehea_cqe *cqe;
932 struct ehea_cqe *cqe_skb = NULL;
936 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
937 rx += ehea_proc_rwqes(dev, pr, budget - rx);
939 while (rx != budget) {
941 ehea_reset_cq_ep(pr->recv_cq);
942 ehea_reset_cq_ep(pr->send_cq);
943 ehea_reset_cq_n1(pr->recv_cq);
944 ehea_reset_cq_n1(pr->send_cq);
946 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
947 cqe_skb = ehea_poll_cq(pr->send_cq);
949 if (!cqe && !cqe_skb)
952 if (!napi_reschedule(napi))
955 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
956 rx += ehea_proc_rwqes(dev, pr, budget - rx);
962 #ifdef CONFIG_NET_POLL_CONTROLLER
963 static void ehea_netpoll(struct net_device *dev)
965 struct ehea_port *port = netdev_priv(dev);
968 for (i = 0; i < port->num_def_qps; i++)
969 napi_schedule(&port->port_res[i].napi);
973 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
975 struct ehea_port_res *pr = param;
977 napi_schedule(&pr->napi);
982 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
984 struct ehea_port *port = param;
985 struct ehea_eqe *eqe;
988 u64 resource_type, aer, aerr;
991 eqe = ehea_poll_eq(port->qp_eq);
994 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
995 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
996 eqe->entry, qp_token);
998 qp = port->port_res[qp_token].qp;
1000 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
1003 if (resource_type == EHEA_AER_RESTYPE_QP) {
1004 if ((aer & EHEA_AER_RESET_MASK) ||
1005 (aerr & EHEA_AERR_RESET_MASK))
1008 reset_port = 1; /* Reset in case of CQ or EQ error */
1010 eqe = ehea_poll_eq(port->qp_eq);
1014 pr_err("Resetting port\n");
1015 ehea_schedule_port_reset(port);
1021 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
1026 for (i = 0; i < EHEA_MAX_PORTS; i++)
1027 if (adapter->port[i])
1028 if (adapter->port[i]->logical_port_id == logical_port)
1029 return adapter->port[i];
1033 int ehea_sense_port_attr(struct ehea_port *port)
1037 struct hcp_ehea_port_cb0 *cb0;
1039 /* may be called via ehea_neq_tasklet() */
1040 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
1042 pr_err("no mem for cb0\n");
1047 hret = ehea_h_query_ehea_port(port->adapter->handle,
1048 port->logical_port_id, H_PORT_CB0,
1049 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
1051 if (hret != H_SUCCESS) {
1057 port->mac_addr = cb0->port_mac_addr << 16;
1059 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
1060 ret = -EADDRNOTAVAIL;
1065 switch (cb0->port_speed) {
1067 port->port_speed = EHEA_SPEED_10M;
1068 port->full_duplex = 0;
1071 port->port_speed = EHEA_SPEED_10M;
1072 port->full_duplex = 1;
1074 case H_SPEED_100M_H:
1075 port->port_speed = EHEA_SPEED_100M;
1076 port->full_duplex = 0;
1078 case H_SPEED_100M_F:
1079 port->port_speed = EHEA_SPEED_100M;
1080 port->full_duplex = 1;
1083 port->port_speed = EHEA_SPEED_1G;
1084 port->full_duplex = 1;
1087 port->port_speed = EHEA_SPEED_10G;
1088 port->full_duplex = 1;
1091 port->port_speed = 0;
1092 port->full_duplex = 0;
1097 port->num_mcs = cb0->num_default_qps;
1099 /* Number of default QPs */
1101 port->num_def_qps = cb0->num_default_qps;
1103 port->num_def_qps = 1;
1105 if (!port->num_def_qps) {
1112 if (ret || netif_msg_probe(port))
1113 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1114 free_page((unsigned long)cb0);
1119 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1121 struct hcp_ehea_port_cb4 *cb4;
1125 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1127 pr_err("no mem for cb4\n");
1132 cb4->port_speed = port_speed;
1134 netif_carrier_off(port->netdev);
1136 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1137 port->logical_port_id,
1138 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1139 if (hret == H_SUCCESS) {
1140 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1142 hret = ehea_h_query_ehea_port(port->adapter->handle,
1143 port->logical_port_id,
1144 H_PORT_CB4, H_PORT_CB4_SPEED,
1146 if (hret == H_SUCCESS) {
1147 switch (cb4->port_speed) {
1149 port->port_speed = EHEA_SPEED_10M;
1150 port->full_duplex = 0;
1153 port->port_speed = EHEA_SPEED_10M;
1154 port->full_duplex = 1;
1156 case H_SPEED_100M_H:
1157 port->port_speed = EHEA_SPEED_100M;
1158 port->full_duplex = 0;
1160 case H_SPEED_100M_F:
1161 port->port_speed = EHEA_SPEED_100M;
1162 port->full_duplex = 1;
1165 port->port_speed = EHEA_SPEED_1G;
1166 port->full_duplex = 1;
1169 port->port_speed = EHEA_SPEED_10G;
1170 port->full_duplex = 1;
1173 port->port_speed = 0;
1174 port->full_duplex = 0;
1178 pr_err("Failed sensing port speed\n");
1182 if (hret == H_AUTHORITY) {
1183 pr_info("Hypervisor denied setting port speed\n");
1187 pr_err("Failed setting port speed\n");
1190 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1191 netif_carrier_on(port->netdev);
1193 free_page((unsigned long)cb4);
1198 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1203 struct ehea_port *port;
1204 struct net_device *dev;
1206 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1207 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1208 port = ehea_get_port(adapter, portnum);
1212 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1215 netdev_err(dev, "unknown portnum %x\n", portnum);
1219 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1220 if (!netif_carrier_ok(dev)) {
1221 ret = ehea_sense_port_attr(port);
1223 netdev_err(dev, "failed resensing port attributes\n");
1227 netif_info(port, link, dev,
1228 "Logical port up: %dMbps %s Duplex\n",
1230 port->full_duplex == 1 ?
1233 netif_carrier_on(dev);
1234 netif_wake_queue(dev);
1237 if (netif_carrier_ok(dev)) {
1238 netif_info(port, link, dev,
1239 "Logical port down\n");
1240 netif_carrier_off(dev);
1241 netif_tx_disable(dev);
1244 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1245 port->phy_link = EHEA_PHY_LINK_UP;
1246 netif_info(port, link, dev,
1247 "Physical port up\n");
1248 if (prop_carrier_state)
1249 netif_carrier_on(dev);
1251 port->phy_link = EHEA_PHY_LINK_DOWN;
1252 netif_info(port, link, dev,
1253 "Physical port down\n");
1254 if (prop_carrier_state)
1255 netif_carrier_off(dev);
1258 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1260 "External switch port is primary port\n");
1263 "External switch port is backup port\n");
1266 case EHEA_EC_ADAPTER_MALFUNC:
1267 netdev_err(dev, "Adapter malfunction\n");
1269 case EHEA_EC_PORT_MALFUNC:
1270 netdev_info(dev, "Port malfunction\n");
1271 netif_carrier_off(dev);
1272 netif_tx_disable(dev);
1275 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1280 static void ehea_neq_tasklet(unsigned long data)
1282 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1283 struct ehea_eqe *eqe;
1286 eqe = ehea_poll_eq(adapter->neq);
1287 pr_debug("eqe=%p\n", eqe);
1290 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1291 ehea_parse_eqe(adapter, eqe->entry);
1292 eqe = ehea_poll_eq(adapter->neq);
1293 pr_debug("next eqe=%p\n", eqe);
1296 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1297 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1298 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1300 ehea_h_reset_events(adapter->handle,
1301 adapter->neq->fw_handle, event_mask);
1304 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1306 struct ehea_adapter *adapter = param;
1307 tasklet_hi_schedule(&adapter->neq_tasklet);
1312 static int ehea_fill_port_res(struct ehea_port_res *pr)
1315 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1317 ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1319 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1321 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1326 static int ehea_reg_interrupts(struct net_device *dev)
1328 struct ehea_port *port = netdev_priv(dev);
1329 struct ehea_port_res *pr;
1333 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1336 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1337 ehea_qp_aff_irq_handler,
1338 IRQF_DISABLED, port->int_aff_name, port);
1340 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1341 port->qp_eq->attr.ist1);
1345 netif_info(port, ifup, dev,
1346 "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1347 port->qp_eq->attr.ist1);
1350 for (i = 0; i < port->num_def_qps; i++) {
1351 pr = &port->port_res[i];
1352 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1353 "%s-queue%d", dev->name, i);
1354 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1355 ehea_recv_irq_handler,
1356 IRQF_DISABLED, pr->int_send_name,
1359 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1360 i, pr->eq->attr.ist1);
1363 netif_info(port, ifup, dev,
1364 "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1365 pr->eq->attr.ist1, i);
1373 u32 ist = port->port_res[i].eq->attr.ist1;
1374 ibmebus_free_irq(ist, &port->port_res[i]);
1378 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1379 i = port->num_def_qps;
1385 static void ehea_free_interrupts(struct net_device *dev)
1387 struct ehea_port *port = netdev_priv(dev);
1388 struct ehea_port_res *pr;
1393 for (i = 0; i < port->num_def_qps; i++) {
1394 pr = &port->port_res[i];
1395 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1396 netif_info(port, intr, dev,
1397 "free send irq for res %d with handle 0x%X\n",
1398 i, pr->eq->attr.ist1);
1401 /* associated events */
1402 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1403 netif_info(port, intr, dev,
1404 "associated event interrupt for handle 0x%X freed\n",
1405 port->qp_eq->attr.ist1);
1408 static int ehea_configure_port(struct ehea_port *port)
1412 struct hcp_ehea_port_cb0 *cb0;
1415 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1419 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1420 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1421 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1422 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1423 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1424 PXLY_RC_VLAN_FILTER)
1425 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1427 for (i = 0; i < port->num_mcs; i++)
1429 cb0->default_qpn_arr[i] =
1430 port->port_res[i].qp->init_attr.qp_nr;
1432 cb0->default_qpn_arr[i] =
1433 port->port_res[0].qp->init_attr.qp_nr;
1435 if (netif_msg_ifup(port))
1436 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1438 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1439 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1441 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1442 port->logical_port_id,
1443 H_PORT_CB0, mask, cb0);
1445 if (hret != H_SUCCESS)
1451 free_page((unsigned long)cb0);
1456 int ehea_gen_smrs(struct ehea_port_res *pr)
1459 struct ehea_adapter *adapter = pr->port->adapter;
1461 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1465 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1472 ehea_rem_mr(&pr->send_mr);
1474 pr_err("Generating SMRS failed\n");
1478 int ehea_rem_smrs(struct ehea_port_res *pr)
1480 if ((ehea_rem_mr(&pr->send_mr)) ||
1481 (ehea_rem_mr(&pr->recv_mr)))
1487 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1489 int arr_size = sizeof(void *) * max_q_entries;
1491 q_skba->arr = vzalloc(arr_size);
1495 q_skba->len = max_q_entries;
1497 q_skba->os_skbs = 0;
1502 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1503 struct port_res_cfg *pr_cfg, int queue_token)
1505 struct ehea_adapter *adapter = port->adapter;
1506 enum ehea_eq_type eq_type = EHEA_EQ;
1507 struct ehea_qp_init_attr *init_attr = NULL;
1509 u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1511 tx_bytes = pr->tx_bytes;
1512 tx_packets = pr->tx_packets;
1513 rx_bytes = pr->rx_bytes;
1514 rx_packets = pr->rx_packets;
1516 memset(pr, 0, sizeof(struct ehea_port_res));
1518 pr->tx_bytes = rx_bytes;
1519 pr->tx_packets = tx_packets;
1520 pr->rx_bytes = rx_bytes;
1521 pr->rx_packets = rx_packets;
1525 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1527 pr_err("create_eq failed (eq)\n");
1531 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1533 port->logical_port_id);
1535 pr_err("create_cq failed (cq_recv)\n");
1539 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1541 port->logical_port_id);
1543 pr_err("create_cq failed (cq_send)\n");
1547 if (netif_msg_ifup(port))
1548 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1549 pr->send_cq->attr.act_nr_of_cqes,
1550 pr->recv_cq->attr.act_nr_of_cqes);
1552 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1555 pr_err("no mem for ehea_qp_init_attr\n");
1559 init_attr->low_lat_rq1 = 1;
1560 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1561 init_attr->rq_count = 3;
1562 init_attr->qp_token = queue_token;
1563 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1564 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1565 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1566 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1567 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1568 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1569 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1570 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1571 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1572 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1573 init_attr->port_nr = port->logical_port_id;
1574 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1575 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1576 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1578 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1580 pr_err("create_qp failed\n");
1585 if (netif_msg_ifup(port))
1586 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1588 init_attr->act_nr_send_wqes,
1589 init_attr->act_nr_rwqes_rq1,
1590 init_attr->act_nr_rwqes_rq2,
1591 init_attr->act_nr_rwqes_rq3);
1593 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1595 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1596 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1597 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1598 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1602 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1603 if (ehea_gen_smrs(pr) != 0) {
1608 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1612 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1614 pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
1615 pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1616 pr->lro_mgr.lro_arr = pr->lro_desc;
1617 pr->lro_mgr.get_skb_header = get_skb_hdr;
1618 pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1619 pr->lro_mgr.dev = port->netdev;
1620 pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1621 pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1628 vfree(pr->sq_skba.arr);
1629 vfree(pr->rq1_skba.arr);
1630 vfree(pr->rq2_skba.arr);
1631 vfree(pr->rq3_skba.arr);
1632 ehea_destroy_qp(pr->qp);
1633 ehea_destroy_cq(pr->send_cq);
1634 ehea_destroy_cq(pr->recv_cq);
1635 ehea_destroy_eq(pr->eq);
1640 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1645 netif_napi_del(&pr->napi);
1647 ret = ehea_destroy_qp(pr->qp);
1650 ehea_destroy_cq(pr->send_cq);
1651 ehea_destroy_cq(pr->recv_cq);
1652 ehea_destroy_eq(pr->eq);
1654 for (i = 0; i < pr->rq1_skba.len; i++)
1655 if (pr->rq1_skba.arr[i])
1656 dev_kfree_skb(pr->rq1_skba.arr[i]);
1658 for (i = 0; i < pr->rq2_skba.len; i++)
1659 if (pr->rq2_skba.arr[i])
1660 dev_kfree_skb(pr->rq2_skba.arr[i]);
1662 for (i = 0; i < pr->rq3_skba.len; i++)
1663 if (pr->rq3_skba.arr[i])
1664 dev_kfree_skb(pr->rq3_skba.arr[i]);
1666 for (i = 0; i < pr->sq_skba.len; i++)
1667 if (pr->sq_skba.arr[i])
1668 dev_kfree_skb(pr->sq_skba.arr[i]);
1670 vfree(pr->rq1_skba.arr);
1671 vfree(pr->rq2_skba.arr);
1672 vfree(pr->rq3_skba.arr);
1673 vfree(pr->sq_skba.arr);
1674 ret = ehea_rem_smrs(pr);
1679 static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1682 int skb_data_size = skb_headlen(skb);
1683 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1684 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1685 unsigned int immediate_len = SWQE2_MAX_IMM;
1687 swqe->descriptors = 0;
1689 if (skb_is_gso(skb)) {
1690 swqe->tx_control |= EHEA_SWQE_TSO;
1691 swqe->mss = skb_shinfo(skb)->gso_size;
1693 * For TSO packets we only copy the headers into the
1696 immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1699 if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1700 skb_copy_from_linear_data(skb, imm_data, immediate_len);
1701 swqe->immediate_data_length = immediate_len;
1703 if (skb_data_size > immediate_len) {
1704 sg1entry->l_key = lkey;
1705 sg1entry->len = skb_data_size - immediate_len;
1707 ehea_map_vaddr(skb->data + immediate_len);
1708 swqe->descriptors++;
1711 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1712 swqe->immediate_data_length = skb_data_size;
1716 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1717 struct ehea_swqe *swqe, u32 lkey)
1719 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1721 int nfrags, sg1entry_contains_frag_data, i;
1723 nfrags = skb_shinfo(skb)->nr_frags;
1724 sg1entry = &swqe->u.immdata_desc.sg_entry;
1725 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1726 sg1entry_contains_frag_data = 0;
1728 write_swqe2_immediate(skb, swqe, lkey);
1730 /* write descriptors */
1732 if (swqe->descriptors == 0) {
1733 /* sg1entry not yet used */
1734 frag = &skb_shinfo(skb)->frags[0];
1736 /* copy sg1entry data */
1737 sg1entry->l_key = lkey;
1738 sg1entry->len = frag->size;
1740 ehea_map_vaddr(skb_frag_address(frag));
1741 swqe->descriptors++;
1742 sg1entry_contains_frag_data = 1;
1745 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1747 frag = &skb_shinfo(skb)->frags[i];
1748 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1750 sgentry->l_key = lkey;
1751 sgentry->len = frag->size;
1752 sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1753 swqe->descriptors++;
1758 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1764 /* De/Register untagged packets */
1765 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1766 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1767 port->logical_port_id,
1768 reg_type, port->mac_addr, 0, hcallid);
1769 if (hret != H_SUCCESS) {
1770 pr_err("%sregistering bc address failed (tagged)\n",
1771 hcallid == H_REG_BCMC ? "" : "de");
1776 /* De/Register VLAN packets */
1777 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1778 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1779 port->logical_port_id,
1780 reg_type, port->mac_addr, 0, hcallid);
1781 if (hret != H_SUCCESS) {
1782 pr_err("%sregistering bc address failed (vlan)\n",
1783 hcallid == H_REG_BCMC ? "" : "de");
1790 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1792 struct ehea_port *port = netdev_priv(dev);
1793 struct sockaddr *mac_addr = sa;
1794 struct hcp_ehea_port_cb0 *cb0;
1798 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1799 ret = -EADDRNOTAVAIL;
1803 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1805 pr_err("no mem for cb0\n");
1810 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1812 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1814 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1815 port->logical_port_id, H_PORT_CB0,
1816 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1817 if (hret != H_SUCCESS) {
1822 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1824 /* Deregister old MAC in pHYP */
1825 if (port->state == EHEA_PORT_UP) {
1826 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1831 port->mac_addr = cb0->port_mac_addr << 16;
1833 /* Register new MAC in pHYP */
1834 if (port->state == EHEA_PORT_UP) {
1835 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1843 ehea_update_bcmc_registrations();
1845 free_page((unsigned long)cb0);
1850 static void ehea_promiscuous_error(u64 hret, int enable)
1852 if (hret == H_AUTHORITY)
1853 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1854 enable == 1 ? "en" : "dis");
1856 pr_err("failed %sabling promiscuous mode\n",
1857 enable == 1 ? "en" : "dis");
1860 static void ehea_promiscuous(struct net_device *dev, int enable)
1862 struct ehea_port *port = netdev_priv(dev);
1863 struct hcp_ehea_port_cb7 *cb7;
1866 if (enable == port->promisc)
1869 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1871 pr_err("no mem for cb7\n");
1875 /* Modify Pxs_DUCQPN in CB7 */
1876 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1878 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1879 port->logical_port_id,
1880 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1882 ehea_promiscuous_error(hret, enable);
1886 port->promisc = enable;
1888 free_page((unsigned long)cb7);
1891 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1897 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1898 | EHEA_BCMC_UNTAGGED;
1900 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1901 port->logical_port_id,
1902 reg_type, mc_mac_addr, 0, hcallid);
1906 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1907 | EHEA_BCMC_VLANID_ALL;
1909 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1910 port->logical_port_id,
1911 reg_type, mc_mac_addr, 0, hcallid);
1916 static int ehea_drop_multicast_list(struct net_device *dev)
1918 struct ehea_port *port = netdev_priv(dev);
1919 struct ehea_mc_list *mc_entry = port->mc_list;
1920 struct list_head *pos;
1921 struct list_head *temp;
1925 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1926 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1928 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1931 pr_err("failed deregistering mcast MAC\n");
1941 static void ehea_allmulti(struct net_device *dev, int enable)
1943 struct ehea_port *port = netdev_priv(dev);
1946 if (!port->allmulti) {
1948 /* Enable ALLMULTI */
1949 ehea_drop_multicast_list(dev);
1950 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1955 "failed enabling IFF_ALLMULTI\n");
1959 /* Disable ALLMULTI */
1960 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1965 "failed disabling IFF_ALLMULTI\n");
1969 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1971 struct ehea_mc_list *ehea_mcl_entry;
1974 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1975 if (!ehea_mcl_entry) {
1976 pr_err("no mem for mcl_entry\n");
1980 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1982 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1984 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1987 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1989 pr_err("failed registering mcast MAC\n");
1990 kfree(ehea_mcl_entry);
1994 static void ehea_set_multicast_list(struct net_device *dev)
1996 struct ehea_port *port = netdev_priv(dev);
1997 struct netdev_hw_addr *ha;
2000 if (port->promisc) {
2001 ehea_promiscuous(dev, 1);
2004 ehea_promiscuous(dev, 0);
2006 if (dev->flags & IFF_ALLMULTI) {
2007 ehea_allmulti(dev, 1);
2010 ehea_allmulti(dev, 0);
2012 if (!netdev_mc_empty(dev)) {
2013 ret = ehea_drop_multicast_list(dev);
2015 /* Dropping the current multicast list failed.
2016 * Enabling ALL_MULTI is the best we can do.
2018 ehea_allmulti(dev, 1);
2021 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
2022 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
2023 port->adapter->max_mc_mac);
2027 netdev_for_each_mc_addr(ha, dev)
2028 ehea_add_multicast_entry(port, ha->addr);
2032 ehea_update_bcmc_registrations();
2035 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
2037 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
2043 static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
2045 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
2047 if (skb->protocol != htons(ETH_P_IP))
2050 if (skb->ip_summed == CHECKSUM_PARTIAL)
2051 swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
2053 swqe->ip_start = skb_network_offset(skb);
2054 swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
2056 switch (ip_hdr(skb)->protocol) {
2058 if (skb->ip_summed == CHECKSUM_PARTIAL)
2059 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2061 swqe->tcp_offset = swqe->ip_end + 1 +
2062 offsetof(struct udphdr, check);
2063 swqe->tcp_end = skb->len - 1;
2067 if (skb->ip_summed == CHECKSUM_PARTIAL)
2068 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2070 swqe->tcp_offset = swqe->ip_end + 1 +
2071 offsetof(struct tcphdr, check);
2072 swqe->tcp_end = skb->len - 1;
2077 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2078 struct ehea_swqe *swqe, u32 lkey)
2080 swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
2082 xmit_common(skb, swqe);
2084 write_swqe2_data(skb, dev, swqe, lkey);
2087 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2088 struct ehea_swqe *swqe)
2090 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2092 xmit_common(skb, swqe);
2095 skb_copy_from_linear_data(skb, imm_data, skb->len);
2097 skb_copy_bits(skb, 0, imm_data, skb->len);
2099 swqe->immediate_data_length = skb->len;
2103 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2105 struct ehea_port *port = netdev_priv(dev);
2106 struct ehea_swqe *swqe;
2109 struct ehea_port_res *pr;
2110 struct netdev_queue *txq;
2112 pr = &port->port_res[skb_get_queue_mapping(skb)];
2113 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2115 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2116 memset(swqe, 0, SWQE_HEADER_SIZE);
2117 atomic_dec(&pr->swqe_avail);
2119 if (vlan_tx_tag_present(skb)) {
2120 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2121 swqe->vlan_tag = vlan_tx_tag_get(skb);
2125 pr->tx_bytes += skb->len;
2127 if (skb->len <= SWQE3_MAX_IMM) {
2128 u32 sig_iv = port->sig_comp_iv;
2129 u32 swqe_num = pr->swqe_id_counter;
2130 ehea_xmit3(skb, dev, swqe);
2131 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2132 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2133 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2134 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2136 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2137 pr->swqe_ll_count = 0;
2139 pr->swqe_ll_count += 1;
2142 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2143 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2144 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2145 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2146 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2148 pr->sq_skba.index++;
2149 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2151 lkey = pr->send_mr.lkey;
2152 ehea_xmit2(skb, dev, swqe, lkey);
2153 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2155 pr->swqe_id_counter += 1;
2157 netif_info(port, tx_queued, dev,
2158 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2159 if (netif_msg_tx_queued(port))
2160 ehea_dump(swqe, 512, "swqe");
2162 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2163 netif_tx_stop_queue(txq);
2164 swqe->tx_control |= EHEA_SWQE_PURGE;
2167 ehea_post_swqe(pr->qp, swqe);
2169 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2170 pr->p_stats.queue_stopped++;
2171 netif_tx_stop_queue(txq);
2174 return NETDEV_TX_OK;
2177 static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2179 struct ehea_port *port = netdev_priv(dev);
2180 struct ehea_adapter *adapter = port->adapter;
2181 struct hcp_ehea_port_cb1 *cb1;
2185 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2187 pr_err("no mem for cb1\n");
2191 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2192 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2193 if (hret != H_SUCCESS) {
2194 pr_err("query_ehea_port failed\n");
2199 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2201 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2202 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2203 if (hret != H_SUCCESS)
2204 pr_err("modify_ehea_port failed\n");
2206 free_page((unsigned long)cb1);
2210 static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2212 struct ehea_port *port = netdev_priv(dev);
2213 struct ehea_adapter *adapter = port->adapter;
2214 struct hcp_ehea_port_cb1 *cb1;
2218 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2220 pr_err("no mem for cb1\n");
2224 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2225 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2226 if (hret != H_SUCCESS) {
2227 pr_err("query_ehea_port failed\n");
2232 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2234 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2235 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2236 if (hret != H_SUCCESS)
2237 pr_err("modify_ehea_port failed\n");
2239 free_page((unsigned long)cb1);
2242 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2248 struct hcp_modify_qp_cb0 *cb0;
2250 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2256 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2257 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2258 if (hret != H_SUCCESS) {
2259 pr_err("query_ehea_qp failed (1)\n");
2263 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2264 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2265 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2266 &dummy64, &dummy64, &dummy16, &dummy16);
2267 if (hret != H_SUCCESS) {
2268 pr_err("modify_ehea_qp failed (1)\n");
2272 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2273 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2274 if (hret != H_SUCCESS) {
2275 pr_err("query_ehea_qp failed (2)\n");
2279 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2280 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2281 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2282 &dummy64, &dummy64, &dummy16, &dummy16);
2283 if (hret != H_SUCCESS) {
2284 pr_err("modify_ehea_qp failed (2)\n");
2288 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2289 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2290 if (hret != H_SUCCESS) {
2291 pr_err("query_ehea_qp failed (3)\n");
2295 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2296 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2297 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2298 &dummy64, &dummy64, &dummy16, &dummy16);
2299 if (hret != H_SUCCESS) {
2300 pr_err("modify_ehea_qp failed (3)\n");
2304 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2305 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2306 if (hret != H_SUCCESS) {
2307 pr_err("query_ehea_qp failed (4)\n");
2313 free_page((unsigned long)cb0);
2317 static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
2320 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2321 enum ehea_eq_type eq_type = EHEA_EQ;
2323 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2324 EHEA_MAX_ENTRIES_EQ, 1);
2327 pr_err("ehea_create_eq failed (qp_eq)\n");
2331 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2332 pr_cfg.max_entries_scq = sq_entries * 2;
2333 pr_cfg.max_entries_sq = sq_entries;
2334 pr_cfg.max_entries_rq1 = rq1_entries;
2335 pr_cfg.max_entries_rq2 = rq2_entries;
2336 pr_cfg.max_entries_rq3 = rq3_entries;
2338 pr_cfg_small_rx.max_entries_rcq = 1;
2339 pr_cfg_small_rx.max_entries_scq = sq_entries;
2340 pr_cfg_small_rx.max_entries_sq = sq_entries;
2341 pr_cfg_small_rx.max_entries_rq1 = 1;
2342 pr_cfg_small_rx.max_entries_rq2 = 1;
2343 pr_cfg_small_rx.max_entries_rq3 = 1;
2345 for (i = 0; i < def_qps; i++) {
2346 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2350 for (i = def_qps; i < def_qps; i++) {
2351 ret = ehea_init_port_res(port, &port->port_res[i],
2352 &pr_cfg_small_rx, i);
2361 ehea_clean_portres(port, &port->port_res[i]);
2364 ehea_destroy_eq(port->qp_eq);
2368 static int ehea_clean_all_portres(struct ehea_port *port)
2373 for (i = 0; i < port->num_def_qps; i++)
2374 ret |= ehea_clean_portres(port, &port->port_res[i]);
2376 ret |= ehea_destroy_eq(port->qp_eq);
2381 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2383 if (adapter->active_ports)
2386 ehea_rem_mr(&adapter->mr);
2389 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2391 if (adapter->active_ports)
2394 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2397 static int ehea_up(struct net_device *dev)
2400 struct ehea_port *port = netdev_priv(dev);
2402 if (port->state == EHEA_PORT_UP)
2405 ret = ehea_port_res_setup(port, port->num_def_qps);
2407 netdev_err(dev, "port_res_failed\n");
2411 /* Set default QP for this port */
2412 ret = ehea_configure_port(port);
2414 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2418 ret = ehea_reg_interrupts(dev);
2420 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2424 for (i = 0; i < port->num_def_qps; i++) {
2425 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2427 netdev_err(dev, "activate_qp failed\n");
2432 for (i = 0; i < port->num_def_qps; i++) {
2433 ret = ehea_fill_port_res(&port->port_res[i]);
2435 netdev_err(dev, "out_free_irqs\n");
2440 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2446 port->state = EHEA_PORT_UP;
2452 ehea_free_interrupts(dev);
2455 ehea_clean_all_portres(port);
2458 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2460 ehea_update_bcmc_registrations();
2461 ehea_update_firmware_handles();
2466 static void port_napi_disable(struct ehea_port *port)
2470 for (i = 0; i < port->num_def_qps; i++)
2471 napi_disable(&port->port_res[i].napi);
2474 static void port_napi_enable(struct ehea_port *port)
2478 for (i = 0; i < port->num_def_qps; i++)
2479 napi_enable(&port->port_res[i].napi);
2482 static int ehea_open(struct net_device *dev)
2485 struct ehea_port *port = netdev_priv(dev);
2487 mutex_lock(&port->port_lock);
2489 netif_info(port, ifup, dev, "enabling port\n");
2493 port_napi_enable(port);
2494 netif_tx_start_all_queues(dev);
2497 mutex_unlock(&port->port_lock);
2498 schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
2503 static int ehea_down(struct net_device *dev)
2506 struct ehea_port *port = netdev_priv(dev);
2508 if (port->state == EHEA_PORT_DOWN)
2511 ehea_drop_multicast_list(dev);
2512 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2514 ehea_free_interrupts(dev);
2516 port->state = EHEA_PORT_DOWN;
2518 ehea_update_bcmc_registrations();
2520 ret = ehea_clean_all_portres(port);
2522 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2524 ehea_update_firmware_handles();
2529 static int ehea_stop(struct net_device *dev)
2532 struct ehea_port *port = netdev_priv(dev);
2534 netif_info(port, ifdown, dev, "disabling port\n");
2536 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2537 cancel_work_sync(&port->reset_task);
2538 cancel_delayed_work_sync(&port->stats_work);
2539 mutex_lock(&port->port_lock);
2540 netif_tx_stop_all_queues(dev);
2541 port_napi_disable(port);
2542 ret = ehea_down(dev);
2543 mutex_unlock(&port->port_lock);
2544 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2548 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2550 struct ehea_qp qp = *orig_qp;
2551 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2552 struct ehea_swqe *swqe;
2556 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2557 swqe = ehea_get_swqe(&qp, &wqe_index);
2558 swqe->tx_control |= EHEA_SWQE_PURGE;
2562 static void ehea_flush_sq(struct ehea_port *port)
2566 for (i = 0; i < port->num_def_qps; i++) {
2567 struct ehea_port_res *pr = &port->port_res[i];
2568 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2571 ret = wait_event_timeout(port->swqe_avail_wq,
2572 atomic_read(&pr->swqe_avail) >= swqe_max,
2573 msecs_to_jiffies(100));
2576 pr_err("WARNING: sq not flushed completely\n");
2582 int ehea_stop_qps(struct net_device *dev)
2584 struct ehea_port *port = netdev_priv(dev);
2585 struct ehea_adapter *adapter = port->adapter;
2586 struct hcp_modify_qp_cb0 *cb0;
2594 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2600 for (i = 0; i < (port->num_def_qps); i++) {
2601 struct ehea_port_res *pr = &port->port_res[i];
2602 struct ehea_qp *qp = pr->qp;
2604 /* Purge send queue */
2607 /* Disable queue pair */
2608 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2609 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2611 if (hret != H_SUCCESS) {
2612 pr_err("query_ehea_qp failed (1)\n");
2616 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2617 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2619 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2620 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2622 &dummy64, &dummy16, &dummy16);
2623 if (hret != H_SUCCESS) {
2624 pr_err("modify_ehea_qp failed (1)\n");
2628 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2629 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2631 if (hret != H_SUCCESS) {
2632 pr_err("query_ehea_qp failed (2)\n");
2636 /* deregister shared memory regions */
2637 dret = ehea_rem_smrs(pr);
2639 pr_err("unreg shared memory region failed\n");
2646 free_page((unsigned long)cb0);
2651 void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2653 struct ehea_qp qp = *orig_qp;
2654 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2655 struct ehea_rwqe *rwqe;
2656 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2657 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2658 struct sk_buff *skb;
2659 u32 lkey = pr->recv_mr.lkey;
2665 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2666 rwqe = ehea_get_next_rwqe(&qp, 2);
2667 rwqe->sg_list[0].l_key = lkey;
2668 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2669 skb = skba_rq2[index];
2671 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2674 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2675 rwqe = ehea_get_next_rwqe(&qp, 3);
2676 rwqe->sg_list[0].l_key = lkey;
2677 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2678 skb = skba_rq3[index];
2680 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2684 int ehea_restart_qps(struct net_device *dev)
2686 struct ehea_port *port = netdev_priv(dev);
2687 struct ehea_adapter *adapter = port->adapter;
2691 struct hcp_modify_qp_cb0 *cb0;
2696 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2702 for (i = 0; i < (port->num_def_qps); i++) {
2703 struct ehea_port_res *pr = &port->port_res[i];
2704 struct ehea_qp *qp = pr->qp;
2706 ret = ehea_gen_smrs(pr);
2708 netdev_err(dev, "creation of shared memory regions failed\n");
2712 ehea_update_rqs(qp, pr);
2714 /* Enable queue pair */
2715 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2716 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2718 if (hret != H_SUCCESS) {
2719 netdev_err(dev, "query_ehea_qp failed (1)\n");
2723 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2724 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2726 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2727 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2729 &dummy64, &dummy16, &dummy16);
2730 if (hret != H_SUCCESS) {
2731 netdev_err(dev, "modify_ehea_qp failed (1)\n");
2735 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2736 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2738 if (hret != H_SUCCESS) {
2739 netdev_err(dev, "query_ehea_qp failed (2)\n");
2743 /* refill entire queue */
2744 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2745 ehea_refill_rq2(pr, 0);
2746 ehea_refill_rq3(pr, 0);
2749 free_page((unsigned long)cb0);
2754 static void ehea_reset_port(struct work_struct *work)
2757 struct ehea_port *port =
2758 container_of(work, struct ehea_port, reset_task);
2759 struct net_device *dev = port->netdev;
2761 mutex_lock(&dlpar_mem_lock);
2763 mutex_lock(&port->port_lock);
2764 netif_tx_disable(dev);
2766 port_napi_disable(port);
2774 ehea_set_multicast_list(dev);
2776 netif_info(port, timer, dev, "reset successful\n");
2778 port_napi_enable(port);
2780 netif_tx_wake_all_queues(dev);
2782 mutex_unlock(&port->port_lock);
2783 mutex_unlock(&dlpar_mem_lock);
2786 static void ehea_rereg_mrs(void)
2789 struct ehea_adapter *adapter;
2791 pr_info("LPAR memory changed - re-initializing driver\n");
2793 list_for_each_entry(adapter, &adapter_list, list)
2794 if (adapter->active_ports) {
2795 /* Shutdown all ports */
2796 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2797 struct ehea_port *port = adapter->port[i];
2798 struct net_device *dev;
2805 if (dev->flags & IFF_UP) {
2806 mutex_lock(&port->port_lock);
2807 netif_tx_disable(dev);
2808 ehea_flush_sq(port);
2809 ret = ehea_stop_qps(dev);
2811 mutex_unlock(&port->port_lock);
2814 port_napi_disable(port);
2815 mutex_unlock(&port->port_lock);
2817 reset_sq_restart_flag(port);
2820 /* Unregister old memory region */
2821 ret = ehea_rem_mr(&adapter->mr);
2823 pr_err("unregister MR failed - driver inoperable!\n");
2828 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2830 list_for_each_entry(adapter, &adapter_list, list)
2831 if (adapter->active_ports) {
2832 /* Register new memory region */
2833 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2835 pr_err("register MR failed - driver inoperable!\n");
2839 /* Restart all ports */
2840 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2841 struct ehea_port *port = adapter->port[i];
2844 struct net_device *dev = port->netdev;
2846 if (dev->flags & IFF_UP) {
2847 mutex_lock(&port->port_lock);
2848 ret = ehea_restart_qps(dev);
2851 port_napi_enable(port);
2852 netif_tx_wake_all_queues(dev);
2854 netdev_err(dev, "Unable to restart QPS\n");
2856 mutex_unlock(&port->port_lock);
2861 pr_info("re-initializing driver complete\n");
2866 static void ehea_tx_watchdog(struct net_device *dev)
2868 struct ehea_port *port = netdev_priv(dev);
2870 if (netif_carrier_ok(dev) &&
2871 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2872 ehea_schedule_port_reset(port);
2875 int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2877 struct hcp_query_ehea *cb;
2881 cb = (void *)get_zeroed_page(GFP_KERNEL);
2887 hret = ehea_h_query_ehea(adapter->handle, cb);
2889 if (hret != H_SUCCESS) {
2894 adapter->max_mc_mac = cb->max_mc_mac - 1;
2898 free_page((unsigned long)cb);
2903 int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2905 struct hcp_ehea_port_cb4 *cb4;
2911 /* (Try to) enable *jumbo frames */
2912 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2914 pr_err("no mem for cb4\n");
2918 hret = ehea_h_query_ehea_port(port->adapter->handle,
2919 port->logical_port_id,
2921 H_PORT_CB4_JUMBO, cb4);
2922 if (hret == H_SUCCESS) {
2923 if (cb4->jumbo_frame)
2926 cb4->jumbo_frame = 1;
2927 hret = ehea_h_modify_ehea_port(port->adapter->
2934 if (hret == H_SUCCESS)
2940 free_page((unsigned long)cb4);
2946 static ssize_t ehea_show_port_id(struct device *dev,
2947 struct device_attribute *attr, char *buf)
2949 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2950 return sprintf(buf, "%d", port->logical_port_id);
2953 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2956 static void __devinit logical_port_release(struct device *dev)
2958 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2959 of_node_put(port->ofdev.dev.of_node);
2962 static struct device *ehea_register_port(struct ehea_port *port,
2963 struct device_node *dn)
2967 port->ofdev.dev.of_node = of_node_get(dn);
2968 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2969 port->ofdev.dev.bus = &ibmebus_bus_type;
2971 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
2972 port->ofdev.dev.release = logical_port_release;
2974 ret = of_device_register(&port->ofdev);
2976 pr_err("failed to register device. ret=%d\n", ret);
2980 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2982 pr_err("failed to register attributes, ret=%d\n", ret);
2983 goto out_unreg_of_dev;
2986 return &port->ofdev.dev;
2989 of_device_unregister(&port->ofdev);
2994 static void ehea_unregister_port(struct ehea_port *port)
2996 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2997 of_device_unregister(&port->ofdev);
3000 static const struct net_device_ops ehea_netdev_ops = {
3001 .ndo_open = ehea_open,
3002 .ndo_stop = ehea_stop,
3003 .ndo_start_xmit = ehea_start_xmit,
3004 #ifdef CONFIG_NET_POLL_CONTROLLER
3005 .ndo_poll_controller = ehea_netpoll,
3007 .ndo_get_stats64 = ehea_get_stats64,
3008 .ndo_set_mac_address = ehea_set_mac_addr,
3009 .ndo_validate_addr = eth_validate_addr,
3010 .ndo_set_rx_mode = ehea_set_multicast_list,
3011 .ndo_change_mtu = ehea_change_mtu,
3012 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
3013 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
3014 .ndo_tx_timeout = ehea_tx_watchdog,
3017 struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3018 u32 logical_port_id,
3019 struct device_node *dn)
3022 struct net_device *dev;
3023 struct ehea_port *port;
3024 struct device *port_dev;
3027 /* allocate memory for the port structures */
3028 dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
3031 pr_err("no mem for net_device\n");
3036 port = netdev_priv(dev);
3038 mutex_init(&port->port_lock);
3039 port->state = EHEA_PORT_DOWN;
3040 port->sig_comp_iv = sq_entries / 10;
3042 port->adapter = adapter;
3044 port->logical_port_id = logical_port_id;
3046 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3048 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3049 if (!port->mc_list) {
3051 goto out_free_ethdev;
3054 INIT_LIST_HEAD(&port->mc_list->list);
3056 ret = ehea_sense_port_attr(port);
3058 goto out_free_mc_list;
3060 netif_set_real_num_rx_queues(dev, port->num_def_qps);
3061 netif_set_real_num_tx_queues(dev, port->num_def_qps);
3063 port_dev = ehea_register_port(port, dn);
3065 goto out_free_mc_list;
3067 SET_NETDEV_DEV(dev, port_dev);
3069 /* initialize net_device structure */
3070 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3072 dev->netdev_ops = &ehea_netdev_ops;
3073 ehea_set_ethtool_ops(dev);
3075 dev->hw_features = NETIF_F_SG | NETIF_F_TSO
3076 | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
3077 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3078 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3079 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3081 dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
3083 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3086 dev->features |= NETIF_F_LRO;
3088 INIT_WORK(&port->reset_task, ehea_reset_port);
3089 INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3091 init_waitqueue_head(&port->swqe_avail_wq);
3092 init_waitqueue_head(&port->restart_wq);
3094 memset(&port->stats, 0, sizeof(struct net_device_stats));
3095 ret = register_netdev(dev);
3097 pr_err("register_netdev failed. ret=%d\n", ret);
3098 goto out_unreg_port;
3101 port->lro_max_aggr = lro_max_aggr;
3103 ret = ehea_get_jumboframe_status(port, &jumbo);
3105 netdev_err(dev, "failed determining jumbo frame status\n");
3107 netdev_info(dev, "Jumbo frames are %sabled\n",
3108 jumbo == 1 ? "en" : "dis");
3110 adapter->active_ports++;
3115 ehea_unregister_port(port);
3118 kfree(port->mc_list);
3124 pr_err("setting up logical port with id=%d failed, ret=%d\n",
3125 logical_port_id, ret);
3129 static void ehea_shutdown_single_port(struct ehea_port *port)
3131 struct ehea_adapter *adapter = port->adapter;
3133 cancel_work_sync(&port->reset_task);
3134 cancel_delayed_work_sync(&port->stats_work);
3135 unregister_netdev(port->netdev);
3136 ehea_unregister_port(port);
3137 kfree(port->mc_list);
3138 free_netdev(port->netdev);
3139 adapter->active_ports--;
3142 static int ehea_setup_ports(struct ehea_adapter *adapter)
3144 struct device_node *lhea_dn;
3145 struct device_node *eth_dn = NULL;
3147 const u32 *dn_log_port_id;
3150 lhea_dn = adapter->ofdev->dev.of_node;
3151 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3153 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3155 if (!dn_log_port_id) {
3156 pr_err("bad device node: eth_dn name=%s\n",
3161 if (ehea_add_adapter_mr(adapter)) {
3162 pr_err("creating MR failed\n");
3163 of_node_put(eth_dn);
3167 adapter->port[i] = ehea_setup_single_port(adapter,
3170 if (adapter->port[i])
3171 netdev_info(adapter->port[i]->netdev,
3172 "logical port id #%d\n", *dn_log_port_id);
3174 ehea_remove_adapter_mr(adapter);
3181 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3182 u32 logical_port_id)
3184 struct device_node *lhea_dn;
3185 struct device_node *eth_dn = NULL;
3186 const u32 *dn_log_port_id;
3188 lhea_dn = adapter->ofdev->dev.of_node;
3189 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3191 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3194 if (*dn_log_port_id == logical_port_id)
3201 static ssize_t ehea_probe_port(struct device *dev,
3202 struct device_attribute *attr,
3203 const char *buf, size_t count)
3205 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3206 struct ehea_port *port;
3207 struct device_node *eth_dn = NULL;
3210 u32 logical_port_id;
3212 sscanf(buf, "%d", &logical_port_id);
3214 port = ehea_get_port(adapter, logical_port_id);
3217 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3222 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3225 pr_info("no logical port with id %d found\n", logical_port_id);
3229 if (ehea_add_adapter_mr(adapter)) {
3230 pr_err("creating MR failed\n");
3234 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3236 of_node_put(eth_dn);
3239 for (i = 0; i < EHEA_MAX_PORTS; i++)
3240 if (!adapter->port[i]) {
3241 adapter->port[i] = port;
3245 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3248 ehea_remove_adapter_mr(adapter);
3252 return (ssize_t) count;
3255 static ssize_t ehea_remove_port(struct device *dev,
3256 struct device_attribute *attr,
3257 const char *buf, size_t count)
3259 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3260 struct ehea_port *port;
3262 u32 logical_port_id;
3264 sscanf(buf, "%d", &logical_port_id);
3266 port = ehea_get_port(adapter, logical_port_id);
3269 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3272 ehea_shutdown_single_port(port);
3274 for (i = 0; i < EHEA_MAX_PORTS; i++)
3275 if (adapter->port[i] == port) {
3276 adapter->port[i] = NULL;
3280 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3285 ehea_remove_adapter_mr(adapter);
3287 return (ssize_t) count;
3290 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3291 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3293 int ehea_create_device_sysfs(struct platform_device *dev)
3295 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3299 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3304 void ehea_remove_device_sysfs(struct platform_device *dev)
3306 device_remove_file(&dev->dev, &dev_attr_probe_port);
3307 device_remove_file(&dev->dev, &dev_attr_remove_port);
3310 static int __devinit ehea_probe_adapter(struct platform_device *dev,
3311 const struct of_device_id *id)
3313 struct ehea_adapter *adapter;
3314 const u64 *adapter_handle;
3317 if (!dev || !dev->dev.of_node) {
3318 pr_err("Invalid ibmebus device probed\n");
3322 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3325 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3329 list_add(&adapter->list, &adapter_list);
3331 adapter->ofdev = dev;
3333 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3336 adapter->handle = *adapter_handle;
3338 if (!adapter->handle) {
3339 dev_err(&dev->dev, "failed getting handle for adapter"
3340 " '%s'\n", dev->dev.of_node->full_name);
3345 adapter->pd = EHEA_PD_ID;
3347 dev_set_drvdata(&dev->dev, adapter);
3350 /* initialize adapter and ports */
3351 /* get adapter properties */
3352 ret = ehea_sense_adapter_attr(adapter);
3354 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3358 adapter->neq = ehea_create_eq(adapter,
3359 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3360 if (!adapter->neq) {
3362 dev_err(&dev->dev, "NEQ creation failed\n");
3366 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3367 (unsigned long)adapter);
3369 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3370 ehea_interrupt_neq, IRQF_DISABLED,
3371 "ehea_neq", adapter);
3373 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3377 ret = ehea_create_device_sysfs(dev);
3381 ret = ehea_setup_ports(adapter);
3383 dev_err(&dev->dev, "setup_ports failed\n");
3384 goto out_rem_dev_sysfs;
3391 ehea_remove_device_sysfs(dev);
3394 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3397 ehea_destroy_eq(adapter->neq);
3400 list_del(&adapter->list);
3404 ehea_update_firmware_handles();
3409 static int __devexit ehea_remove(struct platform_device *dev)
3411 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
3414 for (i = 0; i < EHEA_MAX_PORTS; i++)
3415 if (adapter->port[i]) {
3416 ehea_shutdown_single_port(adapter->port[i]);
3417 adapter->port[i] = NULL;
3420 ehea_remove_device_sysfs(dev);
3422 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3423 tasklet_kill(&adapter->neq_tasklet);
3425 ehea_destroy_eq(adapter->neq);
3426 ehea_remove_adapter_mr(adapter);
3427 list_del(&adapter->list);
3430 ehea_update_firmware_handles();
3435 void ehea_crash_handler(void)
3439 if (ehea_fw_handles.arr)
3440 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3441 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3442 ehea_fw_handles.arr[i].fwh,
3445 if (ehea_bcmc_regs.arr)
3446 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3447 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3448 ehea_bcmc_regs.arr[i].port_id,
3449 ehea_bcmc_regs.arr[i].reg_type,
3450 ehea_bcmc_regs.arr[i].macaddr,
3454 static int ehea_mem_notifier(struct notifier_block *nb,
3455 unsigned long action, void *data)
3457 int ret = NOTIFY_BAD;
3458 struct memory_notify *arg = data;
3460 mutex_lock(&dlpar_mem_lock);
3463 case MEM_CANCEL_OFFLINE:
3464 pr_info("memory offlining canceled");
3465 /* Readd canceled memory block */
3467 pr_info("memory is going online");
3468 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3469 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3473 case MEM_GOING_OFFLINE:
3474 pr_info("memory is going offline");
3475 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3476 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3484 ehea_update_firmware_handles();
3488 mutex_unlock(&dlpar_mem_lock);
3492 static struct notifier_block ehea_mem_nb = {
3493 .notifier_call = ehea_mem_notifier,
3496 static int ehea_reboot_notifier(struct notifier_block *nb,
3497 unsigned long action, void *unused)
3499 if (action == SYS_RESTART) {
3500 pr_info("Reboot: freeing all eHEA resources\n");
3501 ibmebus_unregister_driver(&ehea_driver);
3506 static struct notifier_block ehea_reboot_nb = {
3507 .notifier_call = ehea_reboot_notifier,
3510 static int check_module_parm(void)
3514 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3515 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3516 pr_info("Bad parameter: rq1_entries\n");
3519 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3520 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3521 pr_info("Bad parameter: rq2_entries\n");
3524 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3525 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3526 pr_info("Bad parameter: rq3_entries\n");
3529 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3530 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3531 pr_info("Bad parameter: sq_entries\n");
3538 static ssize_t ehea_show_capabilities(struct device_driver *drv,
3541 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3544 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3545 ehea_show_capabilities, NULL);
3547 int __init ehea_module_init(void)
3551 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3553 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3554 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3556 mutex_init(&ehea_fw_handles.lock);
3557 spin_lock_init(&ehea_bcmc_regs.lock);
3559 ret = check_module_parm();
3563 ret = ehea_create_busmap();
3567 ret = register_reboot_notifier(&ehea_reboot_nb);
3569 pr_info("failed registering reboot notifier\n");
3571 ret = register_memory_notifier(&ehea_mem_nb);
3573 pr_info("failed registering memory remove notifier\n");
3575 ret = crash_shutdown_register(ehea_crash_handler);
3577 pr_info("failed registering crash handler\n");
3579 ret = ibmebus_register_driver(&ehea_driver);
3581 pr_err("failed registering eHEA device driver on ebus\n");
3585 ret = driver_create_file(&ehea_driver.driver,
3586 &driver_attr_capabilities);
3588 pr_err("failed to register capabilities attribute, ret=%d\n",
3596 ibmebus_unregister_driver(&ehea_driver);
3598 unregister_memory_notifier(&ehea_mem_nb);
3599 unregister_reboot_notifier(&ehea_reboot_nb);
3600 crash_shutdown_unregister(ehea_crash_handler);
3605 static void __exit ehea_module_exit(void)
3609 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3610 ibmebus_unregister_driver(&ehea_driver);
3611 unregister_reboot_notifier(&ehea_reboot_nb);
3612 ret = crash_shutdown_unregister(ehea_crash_handler);
3614 pr_info("failed unregistering crash handler\n");
3615 unregister_memory_notifier(&ehea_mem_nb);
3616 kfree(ehea_fw_handles.arr);
3617 kfree(ehea_bcmc_regs.arr);
3618 ehea_destroy_busmap();
3621 module_init(ehea_module_init);
3622 module_exit(ehea_module_exit);