1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
5 * eHEA ethernet device driver for IBM eServer System p
7 * (C) Copyright IBM Corp. 2006
10 * Christoph Raisch <raisch@de.ibm.com>
11 * Jan-Bernd Themann <themann@de.ibm.com>
12 * Thomas Klein <tklein@de.ibm.com>
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/device.h>
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
23 #include <linux/list.h>
24 #include <linux/slab.h>
25 #include <linux/if_ether.h>
26 #include <linux/notifier.h>
27 #include <linux/reboot.h>
28 #include <linux/memory.h>
29 #include <asm/kexec.h>
30 #include <linux/mutex.h>
31 #include <linux/prefetch.h>
33 #include <linux/of_device.h>
39 #include "ehea_phyp.h"
42 MODULE_LICENSE("GPL");
43 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
44 MODULE_DESCRIPTION("IBM eServer HEA Driver");
45 MODULE_VERSION(DRV_VERSION);
48 static int msg_level = -1;
49 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
50 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
51 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
52 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
53 static int use_mcs = 1;
54 static int prop_carrier_state;
56 module_param(msg_level, int, 0);
57 module_param(rq1_entries, int, 0);
58 module_param(rq2_entries, int, 0);
59 module_param(rq3_entries, int, 0);
60 module_param(sq_entries, int, 0);
61 module_param(prop_carrier_state, int, 0);
62 module_param(use_mcs, int, 0);
64 MODULE_PARM_DESC(msg_level, "msg_level");
65 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
66 "port to stack. 1:yes, 0:no. Default = 0 ");
67 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
68 "[2^x - 1], x = [7..14]. Default = "
69 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
70 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
71 "[2^x - 1], x = [7..14]. Default = "
72 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
73 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
74 "[2^x - 1], x = [7..14]. Default = "
75 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
76 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
77 "[2^x - 1], x = [7..14]. Default = "
78 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
79 MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
82 static int port_name_cnt;
83 static LIST_HEAD(adapter_list);
84 static unsigned long ehea_driver_flags;
85 static DEFINE_MUTEX(dlpar_mem_lock);
86 static struct ehea_fw_handle_array ehea_fw_handles;
87 static struct ehea_bcmc_reg_array ehea_bcmc_regs;
90 static int ehea_probe_adapter(struct platform_device *dev);
92 static int ehea_remove(struct platform_device *dev);
94 static const struct of_device_id ehea_module_device_table[] = {
97 .compatible = "IBM,lhea",
101 .compatible = "IBM,lhea-ethernet",
105 MODULE_DEVICE_TABLE(of, ehea_module_device_table);
107 static const struct of_device_id ehea_device_table[] = {
110 .compatible = "IBM,lhea",
114 MODULE_DEVICE_TABLE(of, ehea_device_table);
116 static struct platform_driver ehea_driver = {
119 .owner = THIS_MODULE,
120 .of_match_table = ehea_device_table,
122 .probe = ehea_probe_adapter,
123 .remove = ehea_remove,
126 void ehea_dump(void *adr, int len, char *msg)
129 unsigned char *deb = adr;
130 for (x = 0; x < len; x += 16) {
131 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
132 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
137 static void ehea_schedule_port_reset(struct ehea_port *port)
139 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
140 schedule_work(&port->reset_task);
143 static void ehea_update_firmware_handles(void)
145 struct ehea_fw_handle_entry *arr = NULL;
146 struct ehea_adapter *adapter;
147 int num_adapters = 0;
151 int num_fw_handles, k, l;
153 /* Determine number of handles */
154 mutex_lock(&ehea_fw_handles.lock);
156 list_for_each_entry(adapter, &adapter_list, list) {
159 for (k = 0; k < EHEA_MAX_PORTS; k++) {
160 struct ehea_port *port = adapter->port[k];
162 if (!port || (port->state != EHEA_PORT_UP))
166 num_portres += port->num_def_qps;
170 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
171 num_ports * EHEA_NUM_PORT_FW_HANDLES +
172 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
174 if (num_fw_handles) {
175 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
177 goto out; /* Keep the existing array */
181 list_for_each_entry(adapter, &adapter_list, list) {
182 if (num_adapters == 0)
185 for (k = 0; k < EHEA_MAX_PORTS; k++) {
186 struct ehea_port *port = adapter->port[k];
188 if (!port || (port->state != EHEA_PORT_UP) ||
192 for (l = 0; l < port->num_def_qps; l++) {
193 struct ehea_port_res *pr = &port->port_res[l];
195 arr[i].adh = adapter->handle;
196 arr[i++].fwh = pr->qp->fw_handle;
197 arr[i].adh = adapter->handle;
198 arr[i++].fwh = pr->send_cq->fw_handle;
199 arr[i].adh = adapter->handle;
200 arr[i++].fwh = pr->recv_cq->fw_handle;
201 arr[i].adh = adapter->handle;
202 arr[i++].fwh = pr->eq->fw_handle;
203 arr[i].adh = adapter->handle;
204 arr[i++].fwh = pr->send_mr.handle;
205 arr[i].adh = adapter->handle;
206 arr[i++].fwh = pr->recv_mr.handle;
208 arr[i].adh = adapter->handle;
209 arr[i++].fwh = port->qp_eq->fw_handle;
213 arr[i].adh = adapter->handle;
214 arr[i++].fwh = adapter->neq->fw_handle;
216 if (adapter->mr.handle) {
217 arr[i].adh = adapter->handle;
218 arr[i++].fwh = adapter->mr.handle;
224 kfree(ehea_fw_handles.arr);
225 ehea_fw_handles.arr = arr;
226 ehea_fw_handles.num_entries = i;
228 mutex_unlock(&ehea_fw_handles.lock);
231 static void ehea_update_bcmc_registrations(void)
234 struct ehea_bcmc_reg_entry *arr = NULL;
235 struct ehea_adapter *adapter;
236 struct ehea_mc_list *mc_entry;
237 int num_registrations = 0;
241 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
243 /* Determine number of registrations */
244 list_for_each_entry(adapter, &adapter_list, list)
245 for (k = 0; k < EHEA_MAX_PORTS; k++) {
246 struct ehea_port *port = adapter->port[k];
248 if (!port || (port->state != EHEA_PORT_UP))
251 num_registrations += 2; /* Broadcast registrations */
253 list_for_each_entry(mc_entry, &port->mc_list->list,list)
254 num_registrations += 2;
257 if (num_registrations) {
258 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
260 goto out; /* Keep the existing array */
264 list_for_each_entry(adapter, &adapter_list, list) {
265 for (k = 0; k < EHEA_MAX_PORTS; k++) {
266 struct ehea_port *port = adapter->port[k];
268 if (!port || (port->state != EHEA_PORT_UP))
271 if (num_registrations == 0)
274 arr[i].adh = adapter->handle;
275 arr[i].port_id = port->logical_port_id;
276 arr[i].reg_type = EHEA_BCMC_BROADCAST |
278 arr[i++].macaddr = port->mac_addr;
280 arr[i].adh = adapter->handle;
281 arr[i].port_id = port->logical_port_id;
282 arr[i].reg_type = EHEA_BCMC_BROADCAST |
283 EHEA_BCMC_VLANID_ALL;
284 arr[i++].macaddr = port->mac_addr;
285 num_registrations -= 2;
287 list_for_each_entry(mc_entry,
288 &port->mc_list->list, list) {
289 if (num_registrations == 0)
292 arr[i].adh = adapter->handle;
293 arr[i].port_id = port->logical_port_id;
294 arr[i].reg_type = EHEA_BCMC_MULTICAST |
296 if (mc_entry->macaddr == 0)
297 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
298 arr[i++].macaddr = mc_entry->macaddr;
300 arr[i].adh = adapter->handle;
301 arr[i].port_id = port->logical_port_id;
302 arr[i].reg_type = EHEA_BCMC_MULTICAST |
303 EHEA_BCMC_VLANID_ALL;
304 if (mc_entry->macaddr == 0)
305 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
306 arr[i++].macaddr = mc_entry->macaddr;
307 num_registrations -= 2;
313 kfree(ehea_bcmc_regs.arr);
314 ehea_bcmc_regs.arr = arr;
315 ehea_bcmc_regs.num_entries = i;
317 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
320 static void ehea_get_stats64(struct net_device *dev,
321 struct rtnl_link_stats64 *stats)
323 struct ehea_port *port = netdev_priv(dev);
324 u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
327 for (i = 0; i < port->num_def_qps; i++) {
328 rx_packets += port->port_res[i].rx_packets;
329 rx_bytes += port->port_res[i].rx_bytes;
332 for (i = 0; i < port->num_def_qps; i++) {
333 tx_packets += port->port_res[i].tx_packets;
334 tx_bytes += port->port_res[i].tx_bytes;
337 stats->tx_packets = tx_packets;
338 stats->rx_bytes = rx_bytes;
339 stats->tx_bytes = tx_bytes;
340 stats->rx_packets = rx_packets;
342 stats->multicast = port->stats.multicast;
343 stats->rx_errors = port->stats.rx_errors;
346 static void ehea_update_stats(struct work_struct *work)
348 struct ehea_port *port =
349 container_of(work, struct ehea_port, stats_work.work);
350 struct net_device *dev = port->netdev;
351 struct rtnl_link_stats64 *stats = &port->stats;
352 struct hcp_ehea_port_cb2 *cb2;
355 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
357 netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
361 hret = ehea_h_query_ehea_port(port->adapter->handle,
362 port->logical_port_id,
363 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
364 if (hret != H_SUCCESS) {
365 netdev_err(dev, "query_ehea_port failed\n");
369 if (netif_msg_hw(port))
370 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
372 stats->multicast = cb2->rxmcp;
373 stats->rx_errors = cb2->rxuerr;
376 free_page((unsigned long)cb2);
378 schedule_delayed_work(&port->stats_work,
379 round_jiffies_relative(msecs_to_jiffies(1000)));
382 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
384 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
385 struct net_device *dev = pr->port->netdev;
386 int max_index_mask = pr->rq1_skba.len - 1;
387 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
391 pr->rq1_skba.os_skbs = 0;
393 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
395 pr->rq1_skba.index = index;
396 pr->rq1_skba.os_skbs = fill_wqes;
400 for (i = 0; i < fill_wqes; i++) {
401 if (!skb_arr_rq1[index]) {
402 skb_arr_rq1[index] = netdev_alloc_skb(dev,
404 if (!skb_arr_rq1[index]) {
405 pr->rq1_skba.os_skbs = fill_wqes - i;
410 index &= max_index_mask;
418 ehea_update_rq1a(pr->qp, adder);
421 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
423 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
424 struct net_device *dev = pr->port->netdev;
427 if (nr_rq1a > pr->rq1_skba.len) {
428 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
432 for (i = 0; i < nr_rq1a; i++) {
433 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
438 ehea_update_rq1a(pr->qp, i - 1);
441 static int ehea_refill_rq_def(struct ehea_port_res *pr,
442 struct ehea_q_skb_arr *q_skba, int rq_nr,
443 int num_wqes, int wqe_type, int packet_size)
445 struct net_device *dev = pr->port->netdev;
446 struct ehea_qp *qp = pr->qp;
447 struct sk_buff **skb_arr = q_skba->arr;
448 struct ehea_rwqe *rwqe;
449 int i, index, max_index_mask, fill_wqes;
453 fill_wqes = q_skba->os_skbs + num_wqes;
456 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
457 q_skba->os_skbs = fill_wqes;
461 index = q_skba->index;
462 max_index_mask = q_skba->len - 1;
463 for (i = 0; i < fill_wqes; i++) {
467 skb = netdev_alloc_skb_ip_align(dev, packet_size);
469 q_skba->os_skbs = fill_wqes - i;
470 if (q_skba->os_skbs == q_skba->len - 2) {
471 netdev_info(pr->port->netdev,
472 "rq%i ran dry - no mem for skb\n",
479 skb_arr[index] = skb;
480 tmp_addr = ehea_map_vaddr(skb->data);
481 if (tmp_addr == -1) {
482 dev_consume_skb_any(skb);
483 q_skba->os_skbs = fill_wqes - i;
488 rwqe = ehea_get_next_rwqe(qp, rq_nr);
489 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
490 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
491 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
492 rwqe->sg_list[0].vaddr = tmp_addr;
493 rwqe->sg_list[0].len = packet_size;
494 rwqe->data_segments = 1;
497 index &= max_index_mask;
501 q_skba->index = index;
508 ehea_update_rq2a(pr->qp, adder);
510 ehea_update_rq3a(pr->qp, adder);
516 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
518 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
519 nr_of_wqes, EHEA_RWQE2_TYPE,
524 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
526 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
527 nr_of_wqes, EHEA_RWQE3_TYPE,
528 EHEA_MAX_PACKET_SIZE);
531 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
533 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
534 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
536 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
537 (cqe->header_length == 0))
542 static inline void ehea_fill_skb(struct net_device *dev,
543 struct sk_buff *skb, struct ehea_cqe *cqe,
544 struct ehea_port_res *pr)
546 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
548 skb_put(skb, length);
549 skb->protocol = eth_type_trans(skb, dev);
551 /* The packet was not an IPV4 packet so a complemented checksum was
552 calculated. The value is found in the Internet Checksum field. */
553 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
554 skb->ip_summed = CHECKSUM_COMPLETE;
555 skb->csum = csum_unfold(~cqe->inet_checksum_value);
557 skb->ip_summed = CHECKSUM_UNNECESSARY;
559 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
562 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
564 struct ehea_cqe *cqe)
566 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
577 prefetchw(pref + EHEA_CACHE_LINE);
579 pref = (skb_array[x]->data);
581 prefetch(pref + EHEA_CACHE_LINE);
582 prefetch(pref + EHEA_CACHE_LINE * 2);
583 prefetch(pref + EHEA_CACHE_LINE * 3);
586 skb = skb_array[skb_index];
587 skb_array[skb_index] = NULL;
591 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
592 int arr_len, int wqe_index)
604 prefetchw(pref + EHEA_CACHE_LINE);
606 pref = (skb_array[x]->data);
608 prefetchw(pref + EHEA_CACHE_LINE);
611 skb = skb_array[wqe_index];
612 skb_array[wqe_index] = NULL;
616 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
617 struct ehea_cqe *cqe, int *processed_rq2,
622 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
623 pr->p_stats.err_tcp_cksum++;
624 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
625 pr->p_stats.err_ip_cksum++;
626 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
627 pr->p_stats.err_frame_crc++;
631 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
633 } else if (rq == 3) {
635 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
639 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
640 if (netif_msg_rx_err(pr->port)) {
641 pr_err("Critical receive error for QP %d. Resetting port.\n",
642 pr->qp->init_attr.qp_nr);
643 ehea_dump(cqe, sizeof(*cqe), "CQE");
645 ehea_schedule_port_reset(pr->port);
652 static int ehea_proc_rwqes(struct net_device *dev,
653 struct ehea_port_res *pr,
656 struct ehea_port *port = pr->port;
657 struct ehea_qp *qp = pr->qp;
658 struct ehea_cqe *cqe;
660 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
661 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
662 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
663 int skb_arr_rq1_len = pr->rq1_skba.len;
664 int skb_arr_rq2_len = pr->rq2_skba.len;
665 int skb_arr_rq3_len = pr->rq3_skba.len;
666 int processed, processed_rq1, processed_rq2, processed_rq3;
667 u64 processed_bytes = 0;
668 int wqe_index, last_wqe_index, rq, port_reset;
670 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
673 cqe = ehea_poll_rq1(qp, &wqe_index);
674 while ((processed < budget) && cqe) {
678 if (netif_msg_rx_status(port))
679 ehea_dump(cqe, sizeof(*cqe), "CQE");
681 last_wqe_index = wqe_index;
683 if (!ehea_check_cqe(cqe, &rq)) {
686 skb = get_skb_by_index_ll(skb_arr_rq1,
689 if (unlikely(!skb)) {
690 netif_info(port, rx_err, dev,
691 "LL rq1: skb=NULL\n");
693 skb = netdev_alloc_skb(dev,
698 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
699 cqe->num_bytes_transfered - 4);
700 ehea_fill_skb(dev, skb, cqe, pr);
701 } else if (rq == 2) {
703 skb = get_skb_by_index(skb_arr_rq2,
704 skb_arr_rq2_len, cqe);
705 if (unlikely(!skb)) {
706 netif_err(port, rx_err, dev,
710 ehea_fill_skb(dev, skb, cqe, pr);
714 skb = get_skb_by_index(skb_arr_rq3,
715 skb_arr_rq3_len, cqe);
716 if (unlikely(!skb)) {
717 netif_err(port, rx_err, dev,
721 ehea_fill_skb(dev, skb, cqe, pr);
725 processed_bytes += skb->len;
727 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
728 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
731 napi_gro_receive(&pr->napi, skb);
733 pr->p_stats.poll_receive_errors++;
734 port_reset = ehea_treat_poll_error(pr, rq, cqe,
740 cqe = ehea_poll_rq1(qp, &wqe_index);
743 pr->rx_packets += processed;
744 pr->rx_bytes += processed_bytes;
746 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
747 ehea_refill_rq2(pr, processed_rq2);
748 ehea_refill_rq3(pr, processed_rq3);
753 #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
755 static void reset_sq_restart_flag(struct ehea_port *port)
759 for (i = 0; i < port->num_def_qps; i++) {
760 struct ehea_port_res *pr = &port->port_res[i];
761 pr->sq_restart_flag = 0;
763 wake_up(&port->restart_wq);
766 static void check_sqs(struct ehea_port *port)
768 struct ehea_swqe *swqe;
772 for (i = 0; i < port->num_def_qps; i++) {
773 struct ehea_port_res *pr = &port->port_res[i];
775 swqe = ehea_get_swqe(pr->qp, &swqe_index);
776 memset(swqe, 0, SWQE_HEADER_SIZE);
777 atomic_dec(&pr->swqe_avail);
779 swqe->tx_control |= EHEA_SWQE_PURGE;
780 swqe->wr_id = SWQE_RESTART_CHECK;
781 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
782 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
783 swqe->immediate_data_length = 80;
785 ehea_post_swqe(pr->qp, swqe);
787 ret = wait_event_timeout(port->restart_wq,
788 pr->sq_restart_flag == 0,
789 msecs_to_jiffies(100));
792 pr_err("HW/SW queues out of sync\n");
793 ehea_schedule_port_reset(pr->port);
800 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
803 struct ehea_cq *send_cq = pr->send_cq;
804 struct ehea_cqe *cqe;
805 int quota = my_quota;
809 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
810 pr - &pr->port->port_res[0]);
812 cqe = ehea_poll_cq(send_cq);
813 while (cqe && (quota > 0)) {
814 ehea_inc_cq(send_cq);
819 if (cqe->wr_id == SWQE_RESTART_CHECK) {
820 pr->sq_restart_flag = 1;
825 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
826 pr_err("Bad send completion status=0x%04X\n",
829 if (netif_msg_tx_err(pr->port))
830 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
832 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
833 pr_err("Resetting port\n");
834 ehea_schedule_port_reset(pr->port);
839 if (netif_msg_tx_done(pr->port))
840 ehea_dump(cqe, sizeof(*cqe), "CQE");
842 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
843 == EHEA_SWQE2_TYPE)) {
845 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
846 skb = pr->sq_skba.arr[index];
847 dev_consume_skb_any(skb);
848 pr->sq_skba.arr[index] = NULL;
851 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
854 cqe = ehea_poll_cq(send_cq);
857 ehea_update_feca(send_cq, cqe_counter);
858 atomic_add(swqe_av, &pr->swqe_avail);
860 if (unlikely(netif_tx_queue_stopped(txq) &&
861 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
862 __netif_tx_lock(txq, smp_processor_id());
863 if (netif_tx_queue_stopped(txq) &&
864 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
865 netif_tx_wake_queue(txq);
866 __netif_tx_unlock(txq);
869 wake_up(&pr->port->swqe_avail_wq);
874 #define EHEA_POLL_MAX_CQES 65535
876 static int ehea_poll(struct napi_struct *napi, int budget)
878 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
880 struct net_device *dev = pr->port->netdev;
881 struct ehea_cqe *cqe;
882 struct ehea_cqe *cqe_skb = NULL;
886 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
887 rx += ehea_proc_rwqes(dev, pr, budget - rx);
889 while (rx != budget) {
891 ehea_reset_cq_ep(pr->recv_cq);
892 ehea_reset_cq_ep(pr->send_cq);
893 ehea_reset_cq_n1(pr->recv_cq);
894 ehea_reset_cq_n1(pr->send_cq);
896 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
897 cqe_skb = ehea_poll_cq(pr->send_cq);
899 if (!cqe && !cqe_skb)
902 if (!napi_reschedule(napi))
905 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
906 rx += ehea_proc_rwqes(dev, pr, budget - rx);
912 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
914 struct ehea_port_res *pr = param;
916 napi_schedule(&pr->napi);
921 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
923 struct ehea_port *port = param;
924 struct ehea_eqe *eqe;
927 u64 resource_type, aer, aerr;
930 eqe = ehea_poll_eq(port->qp_eq);
933 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
934 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
935 eqe->entry, qp_token);
937 qp = port->port_res[qp_token].qp;
939 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
942 if (resource_type == EHEA_AER_RESTYPE_QP) {
943 if ((aer & EHEA_AER_RESET_MASK) ||
944 (aerr & EHEA_AERR_RESET_MASK))
947 reset_port = 1; /* Reset in case of CQ or EQ error */
949 eqe = ehea_poll_eq(port->qp_eq);
953 pr_err("Resetting port\n");
954 ehea_schedule_port_reset(port);
960 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
965 for (i = 0; i < EHEA_MAX_PORTS; i++)
966 if (adapter->port[i])
967 if (adapter->port[i]->logical_port_id == logical_port)
968 return adapter->port[i];
972 int ehea_sense_port_attr(struct ehea_port *port)
976 struct hcp_ehea_port_cb0 *cb0;
978 /* may be called via ehea_neq_tasklet() */
979 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
981 pr_err("no mem for cb0\n");
986 hret = ehea_h_query_ehea_port(port->adapter->handle,
987 port->logical_port_id, H_PORT_CB0,
988 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
990 if (hret != H_SUCCESS) {
996 port->mac_addr = cb0->port_mac_addr << 16;
998 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
999 ret = -EADDRNOTAVAIL;
1004 switch (cb0->port_speed) {
1006 port->port_speed = EHEA_SPEED_10M;
1007 port->full_duplex = 0;
1010 port->port_speed = EHEA_SPEED_10M;
1011 port->full_duplex = 1;
1013 case H_SPEED_100M_H:
1014 port->port_speed = EHEA_SPEED_100M;
1015 port->full_duplex = 0;
1017 case H_SPEED_100M_F:
1018 port->port_speed = EHEA_SPEED_100M;
1019 port->full_duplex = 1;
1022 port->port_speed = EHEA_SPEED_1G;
1023 port->full_duplex = 1;
1026 port->port_speed = EHEA_SPEED_10G;
1027 port->full_duplex = 1;
1030 port->port_speed = 0;
1031 port->full_duplex = 0;
1036 port->num_mcs = cb0->num_default_qps;
1038 /* Number of default QPs */
1040 port->num_def_qps = cb0->num_default_qps;
1042 port->num_def_qps = 1;
1044 if (!port->num_def_qps) {
1051 if (ret || netif_msg_probe(port))
1052 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1053 free_page((unsigned long)cb0);
1058 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1060 struct hcp_ehea_port_cb4 *cb4;
1064 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1066 pr_err("no mem for cb4\n");
1071 cb4->port_speed = port_speed;
1073 netif_carrier_off(port->netdev);
1075 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1076 port->logical_port_id,
1077 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1078 if (hret == H_SUCCESS) {
1079 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1081 hret = ehea_h_query_ehea_port(port->adapter->handle,
1082 port->logical_port_id,
1083 H_PORT_CB4, H_PORT_CB4_SPEED,
1085 if (hret == H_SUCCESS) {
1086 switch (cb4->port_speed) {
1088 port->port_speed = EHEA_SPEED_10M;
1089 port->full_duplex = 0;
1092 port->port_speed = EHEA_SPEED_10M;
1093 port->full_duplex = 1;
1095 case H_SPEED_100M_H:
1096 port->port_speed = EHEA_SPEED_100M;
1097 port->full_duplex = 0;
1099 case H_SPEED_100M_F:
1100 port->port_speed = EHEA_SPEED_100M;
1101 port->full_duplex = 1;
1104 port->port_speed = EHEA_SPEED_1G;
1105 port->full_duplex = 1;
1108 port->port_speed = EHEA_SPEED_10G;
1109 port->full_duplex = 1;
1112 port->port_speed = 0;
1113 port->full_duplex = 0;
1117 pr_err("Failed sensing port speed\n");
1121 if (hret == H_AUTHORITY) {
1122 pr_info("Hypervisor denied setting port speed\n");
1126 pr_err("Failed setting port speed\n");
1129 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1130 netif_carrier_on(port->netdev);
1132 free_page((unsigned long)cb4);
1137 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1142 struct ehea_port *port;
1143 struct net_device *dev;
1145 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1146 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1147 port = ehea_get_port(adapter, portnum);
1149 netdev_err(NULL, "unknown portnum %x\n", portnum);
1155 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1157 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1158 if (!netif_carrier_ok(dev)) {
1159 ret = ehea_sense_port_attr(port);
1161 netdev_err(dev, "failed resensing port attributes\n");
1165 netif_info(port, link, dev,
1166 "Logical port up: %dMbps %s Duplex\n",
1168 port->full_duplex == 1 ?
1171 netif_carrier_on(dev);
1172 netif_wake_queue(dev);
1175 if (netif_carrier_ok(dev)) {
1176 netif_info(port, link, dev,
1177 "Logical port down\n");
1178 netif_carrier_off(dev);
1179 netif_tx_disable(dev);
1182 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1183 port->phy_link = EHEA_PHY_LINK_UP;
1184 netif_info(port, link, dev,
1185 "Physical port up\n");
1186 if (prop_carrier_state)
1187 netif_carrier_on(dev);
1189 port->phy_link = EHEA_PHY_LINK_DOWN;
1190 netif_info(port, link, dev,
1191 "Physical port down\n");
1192 if (prop_carrier_state)
1193 netif_carrier_off(dev);
1196 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1198 "External switch port is primary port\n");
1201 "External switch port is backup port\n");
1204 case EHEA_EC_ADAPTER_MALFUNC:
1205 netdev_err(dev, "Adapter malfunction\n");
1207 case EHEA_EC_PORT_MALFUNC:
1208 netdev_info(dev, "Port malfunction\n");
1209 netif_carrier_off(dev);
1210 netif_tx_disable(dev);
1213 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1218 static void ehea_neq_tasklet(struct tasklet_struct *t)
1220 struct ehea_adapter *adapter = from_tasklet(adapter, t, neq_tasklet);
1221 struct ehea_eqe *eqe;
1224 eqe = ehea_poll_eq(adapter->neq);
1225 pr_debug("eqe=%p\n", eqe);
1228 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1229 ehea_parse_eqe(adapter, eqe->entry);
1230 eqe = ehea_poll_eq(adapter->neq);
1231 pr_debug("next eqe=%p\n", eqe);
1234 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1235 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1236 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1238 ehea_h_reset_events(adapter->handle,
1239 adapter->neq->fw_handle, event_mask);
1242 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1244 struct ehea_adapter *adapter = param;
1245 tasklet_hi_schedule(&adapter->neq_tasklet);
1250 static int ehea_fill_port_res(struct ehea_port_res *pr)
1253 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1255 ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1257 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1259 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1264 static int ehea_reg_interrupts(struct net_device *dev)
1266 struct ehea_port *port = netdev_priv(dev);
1267 struct ehea_port_res *pr;
1271 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1274 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1275 ehea_qp_aff_irq_handler,
1276 0, port->int_aff_name, port);
1278 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1279 port->qp_eq->attr.ist1);
1283 netif_info(port, ifup, dev,
1284 "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1285 port->qp_eq->attr.ist1);
1288 for (i = 0; i < port->num_def_qps; i++) {
1289 pr = &port->port_res[i];
1290 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1291 "%s-queue%d", dev->name, i);
1292 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1293 ehea_recv_irq_handler,
1294 0, pr->int_send_name, pr);
1296 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1297 i, pr->eq->attr.ist1);
1300 netif_info(port, ifup, dev,
1301 "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1302 pr->eq->attr.ist1, i);
1310 u32 ist = port->port_res[i].eq->attr.ist1;
1311 ibmebus_free_irq(ist, &port->port_res[i]);
1315 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1316 i = port->num_def_qps;
1322 static void ehea_free_interrupts(struct net_device *dev)
1324 struct ehea_port *port = netdev_priv(dev);
1325 struct ehea_port_res *pr;
1330 for (i = 0; i < port->num_def_qps; i++) {
1331 pr = &port->port_res[i];
1332 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1333 netif_info(port, intr, dev,
1334 "free send irq for res %d with handle 0x%X\n",
1335 i, pr->eq->attr.ist1);
1338 /* associated events */
1339 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1340 netif_info(port, intr, dev,
1341 "associated event interrupt for handle 0x%X freed\n",
1342 port->qp_eq->attr.ist1);
1345 static int ehea_configure_port(struct ehea_port *port)
1349 struct hcp_ehea_port_cb0 *cb0;
1352 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1356 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1357 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1358 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1359 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1360 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1361 PXLY_RC_VLAN_FILTER)
1362 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1364 for (i = 0; i < port->num_mcs; i++)
1366 cb0->default_qpn_arr[i] =
1367 port->port_res[i].qp->init_attr.qp_nr;
1369 cb0->default_qpn_arr[i] =
1370 port->port_res[0].qp->init_attr.qp_nr;
1372 if (netif_msg_ifup(port))
1373 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1375 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1376 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1378 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1379 port->logical_port_id,
1380 H_PORT_CB0, mask, cb0);
1382 if (hret != H_SUCCESS)
1388 free_page((unsigned long)cb0);
1393 static int ehea_gen_smrs(struct ehea_port_res *pr)
1396 struct ehea_adapter *adapter = pr->port->adapter;
1398 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1402 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1409 ehea_rem_mr(&pr->send_mr);
1411 pr_err("Generating SMRS failed\n");
1415 static int ehea_rem_smrs(struct ehea_port_res *pr)
1417 if ((ehea_rem_mr(&pr->send_mr)) ||
1418 (ehea_rem_mr(&pr->recv_mr)))
1424 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1426 int arr_size = sizeof(void *) * max_q_entries;
1428 q_skba->arr = vzalloc(arr_size);
1432 q_skba->len = max_q_entries;
1434 q_skba->os_skbs = 0;
1439 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1440 struct port_res_cfg *pr_cfg, int queue_token)
1442 struct ehea_adapter *adapter = port->adapter;
1443 enum ehea_eq_type eq_type = EHEA_EQ;
1444 struct ehea_qp_init_attr *init_attr = NULL;
1446 u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1448 tx_bytes = pr->tx_bytes;
1449 tx_packets = pr->tx_packets;
1450 rx_bytes = pr->rx_bytes;
1451 rx_packets = pr->rx_packets;
1453 memset(pr, 0, sizeof(struct ehea_port_res));
1455 pr->tx_bytes = tx_bytes;
1456 pr->tx_packets = tx_packets;
1457 pr->rx_bytes = rx_bytes;
1458 pr->rx_packets = rx_packets;
1462 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1464 pr_err("create_eq failed (eq)\n");
1468 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1470 port->logical_port_id);
1472 pr_err("create_cq failed (cq_recv)\n");
1476 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1478 port->logical_port_id);
1480 pr_err("create_cq failed (cq_send)\n");
1484 if (netif_msg_ifup(port))
1485 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1486 pr->send_cq->attr.act_nr_of_cqes,
1487 pr->recv_cq->attr.act_nr_of_cqes);
1489 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1492 pr_err("no mem for ehea_qp_init_attr\n");
1496 init_attr->low_lat_rq1 = 1;
1497 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1498 init_attr->rq_count = 3;
1499 init_attr->qp_token = queue_token;
1500 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1501 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1502 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1503 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1504 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1505 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1506 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1507 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1508 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1509 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1510 init_attr->port_nr = port->logical_port_id;
1511 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1512 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1513 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1515 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1517 pr_err("create_qp failed\n");
1522 if (netif_msg_ifup(port))
1523 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1525 init_attr->act_nr_send_wqes,
1526 init_attr->act_nr_rwqes_rq1,
1527 init_attr->act_nr_rwqes_rq2,
1528 init_attr->act_nr_rwqes_rq3);
1530 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1532 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1533 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1534 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1535 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1539 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1540 if (ehea_gen_smrs(pr) != 0) {
1545 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1549 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1556 vfree(pr->sq_skba.arr);
1557 vfree(pr->rq1_skba.arr);
1558 vfree(pr->rq2_skba.arr);
1559 vfree(pr->rq3_skba.arr);
1560 ehea_destroy_qp(pr->qp);
1561 ehea_destroy_cq(pr->send_cq);
1562 ehea_destroy_cq(pr->recv_cq);
1563 ehea_destroy_eq(pr->eq);
1568 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1573 netif_napi_del(&pr->napi);
1575 ret = ehea_destroy_qp(pr->qp);
1578 ehea_destroy_cq(pr->send_cq);
1579 ehea_destroy_cq(pr->recv_cq);
1580 ehea_destroy_eq(pr->eq);
1582 for (i = 0; i < pr->rq1_skba.len; i++)
1583 dev_kfree_skb(pr->rq1_skba.arr[i]);
1585 for (i = 0; i < pr->rq2_skba.len; i++)
1586 dev_kfree_skb(pr->rq2_skba.arr[i]);
1588 for (i = 0; i < pr->rq3_skba.len; i++)
1589 dev_kfree_skb(pr->rq3_skba.arr[i]);
1591 for (i = 0; i < pr->sq_skba.len; i++)
1592 dev_kfree_skb(pr->sq_skba.arr[i]);
1594 vfree(pr->rq1_skba.arr);
1595 vfree(pr->rq2_skba.arr);
1596 vfree(pr->rq3_skba.arr);
1597 vfree(pr->sq_skba.arr);
1598 ret = ehea_rem_smrs(pr);
1603 static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1606 int skb_data_size = skb_headlen(skb);
1607 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1608 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1609 unsigned int immediate_len = SWQE2_MAX_IMM;
1611 swqe->descriptors = 0;
1613 if (skb_is_gso(skb)) {
1614 swqe->tx_control |= EHEA_SWQE_TSO;
1615 swqe->mss = skb_shinfo(skb)->gso_size;
1617 * For TSO packets we only copy the headers into the
1620 immediate_len = skb_tcp_all_headers(skb);
1623 if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1624 skb_copy_from_linear_data(skb, imm_data, immediate_len);
1625 swqe->immediate_data_length = immediate_len;
1627 if (skb_data_size > immediate_len) {
1628 sg1entry->l_key = lkey;
1629 sg1entry->len = skb_data_size - immediate_len;
1631 ehea_map_vaddr(skb->data + immediate_len);
1632 swqe->descriptors++;
1635 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1636 swqe->immediate_data_length = skb_data_size;
1640 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1641 struct ehea_swqe *swqe, u32 lkey)
1643 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1645 int nfrags, sg1entry_contains_frag_data, i;
1647 nfrags = skb_shinfo(skb)->nr_frags;
1648 sg1entry = &swqe->u.immdata_desc.sg_entry;
1649 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1650 sg1entry_contains_frag_data = 0;
1652 write_swqe2_immediate(skb, swqe, lkey);
1654 /* write descriptors */
1656 if (swqe->descriptors == 0) {
1657 /* sg1entry not yet used */
1658 frag = &skb_shinfo(skb)->frags[0];
1660 /* copy sg1entry data */
1661 sg1entry->l_key = lkey;
1662 sg1entry->len = skb_frag_size(frag);
1664 ehea_map_vaddr(skb_frag_address(frag));
1665 swqe->descriptors++;
1666 sg1entry_contains_frag_data = 1;
1669 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1671 frag = &skb_shinfo(skb)->frags[i];
1672 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1674 sgentry->l_key = lkey;
1675 sgentry->len = skb_frag_size(frag);
1676 sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1677 swqe->descriptors++;
1682 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1688 /* De/Register untagged packets */
1689 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1690 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1691 port->logical_port_id,
1692 reg_type, port->mac_addr, 0, hcallid);
1693 if (hret != H_SUCCESS) {
1694 pr_err("%sregistering bc address failed (tagged)\n",
1695 hcallid == H_REG_BCMC ? "" : "de");
1700 /* De/Register VLAN packets */
1701 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1702 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1703 port->logical_port_id,
1704 reg_type, port->mac_addr, 0, hcallid);
1705 if (hret != H_SUCCESS) {
1706 pr_err("%sregistering bc address failed (vlan)\n",
1707 hcallid == H_REG_BCMC ? "" : "de");
1714 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1716 struct ehea_port *port = netdev_priv(dev);
1717 struct sockaddr *mac_addr = sa;
1718 struct hcp_ehea_port_cb0 *cb0;
1722 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1723 ret = -EADDRNOTAVAIL;
1727 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1729 pr_err("no mem for cb0\n");
1734 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1736 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1738 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1739 port->logical_port_id, H_PORT_CB0,
1740 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1741 if (hret != H_SUCCESS) {
1746 eth_hw_addr_set(dev, mac_addr->sa_data);
1748 /* Deregister old MAC in pHYP */
1749 if (port->state == EHEA_PORT_UP) {
1750 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1755 port->mac_addr = cb0->port_mac_addr << 16;
1757 /* Register new MAC in pHYP */
1758 if (port->state == EHEA_PORT_UP) {
1759 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1767 ehea_update_bcmc_registrations();
1769 free_page((unsigned long)cb0);
1774 static void ehea_promiscuous_error(u64 hret, int enable)
1776 if (hret == H_AUTHORITY)
1777 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1778 enable == 1 ? "en" : "dis");
1780 pr_err("failed %sabling promiscuous mode\n",
1781 enable == 1 ? "en" : "dis");
1784 static void ehea_promiscuous(struct net_device *dev, int enable)
1786 struct ehea_port *port = netdev_priv(dev);
1787 struct hcp_ehea_port_cb7 *cb7;
1790 if (enable == port->promisc)
1793 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1795 pr_err("no mem for cb7\n");
1799 /* Modify Pxs_DUCQPN in CB7 */
1800 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1802 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1803 port->logical_port_id,
1804 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1806 ehea_promiscuous_error(hret, enable);
1810 port->promisc = enable;
1812 free_page((unsigned long)cb7);
1815 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1821 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
1822 if (mc_mac_addr == 0)
1823 reg_type |= EHEA_BCMC_SCOPE_ALL;
1825 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1826 port->logical_port_id,
1827 reg_type, mc_mac_addr, 0, hcallid);
1831 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
1832 if (mc_mac_addr == 0)
1833 reg_type |= EHEA_BCMC_SCOPE_ALL;
1835 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1836 port->logical_port_id,
1837 reg_type, mc_mac_addr, 0, hcallid);
1842 static int ehea_drop_multicast_list(struct net_device *dev)
1844 struct ehea_port *port = netdev_priv(dev);
1845 struct ehea_mc_list *mc_entry = port->mc_list;
1846 struct list_head *pos;
1847 struct list_head *temp;
1851 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1852 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1854 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1857 pr_err("failed deregistering mcast MAC\n");
1867 static void ehea_allmulti(struct net_device *dev, int enable)
1869 struct ehea_port *port = netdev_priv(dev);
1872 if (!port->allmulti) {
1874 /* Enable ALLMULTI */
1875 ehea_drop_multicast_list(dev);
1876 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1881 "failed enabling IFF_ALLMULTI\n");
1885 /* Disable ALLMULTI */
1886 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1891 "failed disabling IFF_ALLMULTI\n");
1896 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1898 struct ehea_mc_list *ehea_mcl_entry;
1901 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1902 if (!ehea_mcl_entry)
1905 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1907 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1909 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1912 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1914 pr_err("failed registering mcast MAC\n");
1915 kfree(ehea_mcl_entry);
1919 static void ehea_set_multicast_list(struct net_device *dev)
1921 struct ehea_port *port = netdev_priv(dev);
1922 struct netdev_hw_addr *ha;
1925 ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
1927 if (dev->flags & IFF_ALLMULTI) {
1928 ehea_allmulti(dev, 1);
1931 ehea_allmulti(dev, 0);
1933 if (!netdev_mc_empty(dev)) {
1934 ret = ehea_drop_multicast_list(dev);
1936 /* Dropping the current multicast list failed.
1937 * Enabling ALL_MULTI is the best we can do.
1939 ehea_allmulti(dev, 1);
1942 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
1943 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
1944 port->adapter->max_mc_mac);
1948 netdev_for_each_mc_addr(ha, dev)
1949 ehea_add_multicast_entry(port, ha->addr);
1953 ehea_update_bcmc_registrations();
1956 static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1958 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1960 if (vlan_get_protocol(skb) != htons(ETH_P_IP))
1963 if (skb->ip_summed == CHECKSUM_PARTIAL)
1964 swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
1966 swqe->ip_start = skb_network_offset(skb);
1967 swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
1969 switch (ip_hdr(skb)->protocol) {
1971 if (skb->ip_summed == CHECKSUM_PARTIAL)
1972 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
1974 swqe->tcp_offset = swqe->ip_end + 1 +
1975 offsetof(struct udphdr, check);
1979 if (skb->ip_summed == CHECKSUM_PARTIAL)
1980 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
1982 swqe->tcp_offset = swqe->ip_end + 1 +
1983 offsetof(struct tcphdr, check);
1988 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1989 struct ehea_swqe *swqe, u32 lkey)
1991 swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
1993 xmit_common(skb, swqe);
1995 write_swqe2_data(skb, dev, swqe, lkey);
1998 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1999 struct ehea_swqe *swqe)
2001 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2003 xmit_common(skb, swqe);
2006 skb_copy_from_linear_data(skb, imm_data, skb->len);
2008 skb_copy_bits(skb, 0, imm_data, skb->len);
2010 swqe->immediate_data_length = skb->len;
2011 dev_consume_skb_any(skb);
2014 static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2016 struct ehea_port *port = netdev_priv(dev);
2017 struct ehea_swqe *swqe;
2020 struct ehea_port_res *pr;
2021 struct netdev_queue *txq;
2023 pr = &port->port_res[skb_get_queue_mapping(skb)];
2024 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2026 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2027 memset(swqe, 0, SWQE_HEADER_SIZE);
2028 atomic_dec(&pr->swqe_avail);
2030 if (skb_vlan_tag_present(skb)) {
2031 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2032 swqe->vlan_tag = skb_vlan_tag_get(skb);
2036 pr->tx_bytes += skb->len;
2038 if (skb->len <= SWQE3_MAX_IMM) {
2039 u32 sig_iv = port->sig_comp_iv;
2040 u32 swqe_num = pr->swqe_id_counter;
2041 ehea_xmit3(skb, dev, swqe);
2042 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2043 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2044 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2045 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2047 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2048 pr->swqe_ll_count = 0;
2050 pr->swqe_ll_count += 1;
2053 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2054 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2055 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2056 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2057 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2059 pr->sq_skba.index++;
2060 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2062 lkey = pr->send_mr.lkey;
2063 ehea_xmit2(skb, dev, swqe, lkey);
2064 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2066 pr->swqe_id_counter += 1;
2068 netif_info(port, tx_queued, dev,
2069 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2070 if (netif_msg_tx_queued(port))
2071 ehea_dump(swqe, 512, "swqe");
2073 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2074 netif_tx_stop_queue(txq);
2075 swqe->tx_control |= EHEA_SWQE_PURGE;
2078 ehea_post_swqe(pr->qp, swqe);
2080 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2081 pr->p_stats.queue_stopped++;
2082 netif_tx_stop_queue(txq);
2085 return NETDEV_TX_OK;
2088 static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
2090 struct ehea_port *port = netdev_priv(dev);
2091 struct ehea_adapter *adapter = port->adapter;
2092 struct hcp_ehea_port_cb1 *cb1;
2097 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2099 pr_err("no mem for cb1\n");
2104 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2105 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2106 if (hret != H_SUCCESS) {
2107 pr_err("query_ehea_port failed\n");
2113 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2115 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2116 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2117 if (hret != H_SUCCESS) {
2118 pr_err("modify_ehea_port failed\n");
2122 free_page((unsigned long)cb1);
2126 static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
2128 struct ehea_port *port = netdev_priv(dev);
2129 struct ehea_adapter *adapter = port->adapter;
2130 struct hcp_ehea_port_cb1 *cb1;
2135 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2137 pr_err("no mem for cb1\n");
2142 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2143 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2144 if (hret != H_SUCCESS) {
2145 pr_err("query_ehea_port failed\n");
2151 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2153 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2154 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2155 if (hret != H_SUCCESS) {
2156 pr_err("modify_ehea_port failed\n");
2160 free_page((unsigned long)cb1);
2164 static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2170 struct hcp_modify_qp_cb0 *cb0;
2172 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2178 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2179 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2180 if (hret != H_SUCCESS) {
2181 pr_err("query_ehea_qp failed (1)\n");
2185 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2186 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2187 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2188 &dummy64, &dummy64, &dummy16, &dummy16);
2189 if (hret != H_SUCCESS) {
2190 pr_err("modify_ehea_qp failed (1)\n");
2194 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2195 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2196 if (hret != H_SUCCESS) {
2197 pr_err("query_ehea_qp failed (2)\n");
2201 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2202 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2203 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2204 &dummy64, &dummy64, &dummy16, &dummy16);
2205 if (hret != H_SUCCESS) {
2206 pr_err("modify_ehea_qp failed (2)\n");
2210 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2211 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2212 if (hret != H_SUCCESS) {
2213 pr_err("query_ehea_qp failed (3)\n");
2217 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2218 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2219 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2220 &dummy64, &dummy64, &dummy16, &dummy16);
2221 if (hret != H_SUCCESS) {
2222 pr_err("modify_ehea_qp failed (3)\n");
2226 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2227 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2228 if (hret != H_SUCCESS) {
2229 pr_err("query_ehea_qp failed (4)\n");
2235 free_page((unsigned long)cb0);
2239 static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
2242 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2243 enum ehea_eq_type eq_type = EHEA_EQ;
2245 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2246 EHEA_MAX_ENTRIES_EQ, 1);
2249 pr_err("ehea_create_eq failed (qp_eq)\n");
2253 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2254 pr_cfg.max_entries_scq = sq_entries * 2;
2255 pr_cfg.max_entries_sq = sq_entries;
2256 pr_cfg.max_entries_rq1 = rq1_entries;
2257 pr_cfg.max_entries_rq2 = rq2_entries;
2258 pr_cfg.max_entries_rq3 = rq3_entries;
2260 pr_cfg_small_rx.max_entries_rcq = 1;
2261 pr_cfg_small_rx.max_entries_scq = sq_entries;
2262 pr_cfg_small_rx.max_entries_sq = sq_entries;
2263 pr_cfg_small_rx.max_entries_rq1 = 1;
2264 pr_cfg_small_rx.max_entries_rq2 = 1;
2265 pr_cfg_small_rx.max_entries_rq3 = 1;
2267 for (i = 0; i < def_qps; i++) {
2268 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2272 for (i = def_qps; i < def_qps; i++) {
2273 ret = ehea_init_port_res(port, &port->port_res[i],
2274 &pr_cfg_small_rx, i);
2283 ehea_clean_portres(port, &port->port_res[i]);
2286 ehea_destroy_eq(port->qp_eq);
2290 static int ehea_clean_all_portres(struct ehea_port *port)
2295 for (i = 0; i < port->num_def_qps; i++)
2296 ret |= ehea_clean_portres(port, &port->port_res[i]);
2298 ret |= ehea_destroy_eq(port->qp_eq);
2303 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2305 if (adapter->active_ports)
2308 ehea_rem_mr(&adapter->mr);
2311 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2313 if (adapter->active_ports)
2316 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2319 static int ehea_up(struct net_device *dev)
2322 struct ehea_port *port = netdev_priv(dev);
2324 if (port->state == EHEA_PORT_UP)
2327 ret = ehea_port_res_setup(port, port->num_def_qps);
2329 netdev_err(dev, "port_res_failed\n");
2333 /* Set default QP for this port */
2334 ret = ehea_configure_port(port);
2336 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2340 ret = ehea_reg_interrupts(dev);
2342 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2346 for (i = 0; i < port->num_def_qps; i++) {
2347 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2349 netdev_err(dev, "activate_qp failed\n");
2354 for (i = 0; i < port->num_def_qps; i++) {
2355 ret = ehea_fill_port_res(&port->port_res[i]);
2357 netdev_err(dev, "out_free_irqs\n");
2362 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2368 port->state = EHEA_PORT_UP;
2374 ehea_free_interrupts(dev);
2377 ehea_clean_all_portres(port);
2380 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2382 ehea_update_bcmc_registrations();
2383 ehea_update_firmware_handles();
2388 static void port_napi_disable(struct ehea_port *port)
2392 for (i = 0; i < port->num_def_qps; i++)
2393 napi_disable(&port->port_res[i].napi);
2396 static void port_napi_enable(struct ehea_port *port)
2400 for (i = 0; i < port->num_def_qps; i++)
2401 napi_enable(&port->port_res[i].napi);
2404 static int ehea_open(struct net_device *dev)
2407 struct ehea_port *port = netdev_priv(dev);
2409 mutex_lock(&port->port_lock);
2411 netif_info(port, ifup, dev, "enabling port\n");
2413 netif_carrier_off(dev);
2417 port_napi_enable(port);
2418 netif_tx_start_all_queues(dev);
2421 mutex_unlock(&port->port_lock);
2422 schedule_delayed_work(&port->stats_work,
2423 round_jiffies_relative(msecs_to_jiffies(1000)));
2428 static int ehea_down(struct net_device *dev)
2431 struct ehea_port *port = netdev_priv(dev);
2433 if (port->state == EHEA_PORT_DOWN)
2436 ehea_drop_multicast_list(dev);
2437 ehea_allmulti(dev, 0);
2438 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2440 ehea_free_interrupts(dev);
2442 port->state = EHEA_PORT_DOWN;
2444 ehea_update_bcmc_registrations();
2446 ret = ehea_clean_all_portres(port);
2448 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2450 ehea_update_firmware_handles();
2455 static int ehea_stop(struct net_device *dev)
2458 struct ehea_port *port = netdev_priv(dev);
2460 netif_info(port, ifdown, dev, "disabling port\n");
2462 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2463 cancel_work_sync(&port->reset_task);
2464 cancel_delayed_work_sync(&port->stats_work);
2465 mutex_lock(&port->port_lock);
2466 netif_tx_stop_all_queues(dev);
2467 port_napi_disable(port);
2468 ret = ehea_down(dev);
2469 mutex_unlock(&port->port_lock);
2470 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2474 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2476 struct ehea_qp qp = *orig_qp;
2477 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2478 struct ehea_swqe *swqe;
2482 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2483 swqe = ehea_get_swqe(&qp, &wqe_index);
2484 swqe->tx_control |= EHEA_SWQE_PURGE;
2488 static void ehea_flush_sq(struct ehea_port *port)
2492 for (i = 0; i < port->num_def_qps; i++) {
2493 struct ehea_port_res *pr = &port->port_res[i];
2494 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2497 ret = wait_event_timeout(port->swqe_avail_wq,
2498 atomic_read(&pr->swqe_avail) >= swqe_max,
2499 msecs_to_jiffies(100));
2502 pr_err("WARNING: sq not flushed completely\n");
2508 static int ehea_stop_qps(struct net_device *dev)
2510 struct ehea_port *port = netdev_priv(dev);
2511 struct ehea_adapter *adapter = port->adapter;
2512 struct hcp_modify_qp_cb0 *cb0;
2520 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2526 for (i = 0; i < (port->num_def_qps); i++) {
2527 struct ehea_port_res *pr = &port->port_res[i];
2528 struct ehea_qp *qp = pr->qp;
2530 /* Purge send queue */
2533 /* Disable queue pair */
2534 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2535 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2537 if (hret != H_SUCCESS) {
2538 pr_err("query_ehea_qp failed (1)\n");
2542 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2543 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2545 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2546 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2548 &dummy64, &dummy16, &dummy16);
2549 if (hret != H_SUCCESS) {
2550 pr_err("modify_ehea_qp failed (1)\n");
2554 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2555 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2557 if (hret != H_SUCCESS) {
2558 pr_err("query_ehea_qp failed (2)\n");
2562 /* deregister shared memory regions */
2563 dret = ehea_rem_smrs(pr);
2565 pr_err("unreg shared memory region failed\n");
2572 free_page((unsigned long)cb0);
2577 static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2579 struct ehea_qp qp = *orig_qp;
2580 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2581 struct ehea_rwqe *rwqe;
2582 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2583 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2584 struct sk_buff *skb;
2585 u32 lkey = pr->recv_mr.lkey;
2591 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2592 rwqe = ehea_get_next_rwqe(&qp, 2);
2593 rwqe->sg_list[0].l_key = lkey;
2594 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2595 skb = skba_rq2[index];
2597 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2600 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2601 rwqe = ehea_get_next_rwqe(&qp, 3);
2602 rwqe->sg_list[0].l_key = lkey;
2603 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2604 skb = skba_rq3[index];
2606 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2610 static int ehea_restart_qps(struct net_device *dev)
2612 struct ehea_port *port = netdev_priv(dev);
2613 struct ehea_adapter *adapter = port->adapter;
2617 struct hcp_modify_qp_cb0 *cb0;
2622 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2626 for (i = 0; i < (port->num_def_qps); i++) {
2627 struct ehea_port_res *pr = &port->port_res[i];
2628 struct ehea_qp *qp = pr->qp;
2630 ret = ehea_gen_smrs(pr);
2632 netdev_err(dev, "creation of shared memory regions failed\n");
2636 ehea_update_rqs(qp, pr);
2638 /* Enable queue pair */
2639 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2640 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2642 if (hret != H_SUCCESS) {
2643 netdev_err(dev, "query_ehea_qp failed (1)\n");
2648 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2649 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2651 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2652 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2654 &dummy64, &dummy16, &dummy16);
2655 if (hret != H_SUCCESS) {
2656 netdev_err(dev, "modify_ehea_qp failed (1)\n");
2661 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2662 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2664 if (hret != H_SUCCESS) {
2665 netdev_err(dev, "query_ehea_qp failed (2)\n");
2670 /* refill entire queue */
2671 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2672 ehea_refill_rq2(pr, 0);
2673 ehea_refill_rq3(pr, 0);
2676 free_page((unsigned long)cb0);
2681 static void ehea_reset_port(struct work_struct *work)
2684 struct ehea_port *port =
2685 container_of(work, struct ehea_port, reset_task);
2686 struct net_device *dev = port->netdev;
2688 mutex_lock(&dlpar_mem_lock);
2690 mutex_lock(&port->port_lock);
2691 netif_tx_disable(dev);
2693 port_napi_disable(port);
2701 ehea_set_multicast_list(dev);
2703 netif_info(port, timer, dev, "reset successful\n");
2705 port_napi_enable(port);
2707 netif_tx_wake_all_queues(dev);
2709 mutex_unlock(&port->port_lock);
2710 mutex_unlock(&dlpar_mem_lock);
2713 static void ehea_rereg_mrs(void)
2716 struct ehea_adapter *adapter;
2718 pr_info("LPAR memory changed - re-initializing driver\n");
2720 list_for_each_entry(adapter, &adapter_list, list)
2721 if (adapter->active_ports) {
2722 /* Shutdown all ports */
2723 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2724 struct ehea_port *port = adapter->port[i];
2725 struct net_device *dev;
2732 if (dev->flags & IFF_UP) {
2733 mutex_lock(&port->port_lock);
2734 netif_tx_disable(dev);
2735 ehea_flush_sq(port);
2736 ret = ehea_stop_qps(dev);
2738 mutex_unlock(&port->port_lock);
2741 port_napi_disable(port);
2742 mutex_unlock(&port->port_lock);
2744 reset_sq_restart_flag(port);
2747 /* Unregister old memory region */
2748 ret = ehea_rem_mr(&adapter->mr);
2750 pr_err("unregister MR failed - driver inoperable!\n");
2755 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2757 list_for_each_entry(adapter, &adapter_list, list)
2758 if (adapter->active_ports) {
2759 /* Register new memory region */
2760 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2762 pr_err("register MR failed - driver inoperable!\n");
2766 /* Restart all ports */
2767 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2768 struct ehea_port *port = adapter->port[i];
2771 struct net_device *dev = port->netdev;
2773 if (dev->flags & IFF_UP) {
2774 mutex_lock(&port->port_lock);
2775 ret = ehea_restart_qps(dev);
2778 port_napi_enable(port);
2779 netif_tx_wake_all_queues(dev);
2781 netdev_err(dev, "Unable to restart QPS\n");
2783 mutex_unlock(&port->port_lock);
2788 pr_info("re-initializing driver complete\n");
2793 static void ehea_tx_watchdog(struct net_device *dev, unsigned int txqueue)
2795 struct ehea_port *port = netdev_priv(dev);
2797 if (netif_carrier_ok(dev) &&
2798 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2799 ehea_schedule_port_reset(port);
2802 static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2804 struct hcp_query_ehea *cb;
2808 cb = (void *)get_zeroed_page(GFP_KERNEL);
2814 hret = ehea_h_query_ehea(adapter->handle, cb);
2816 if (hret != H_SUCCESS) {
2821 adapter->max_mc_mac = cb->max_mc_mac - 1;
2825 free_page((unsigned long)cb);
2830 static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2832 struct hcp_ehea_port_cb4 *cb4;
2838 /* (Try to) enable *jumbo frames */
2839 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2841 pr_err("no mem for cb4\n");
2845 hret = ehea_h_query_ehea_port(port->adapter->handle,
2846 port->logical_port_id,
2848 H_PORT_CB4_JUMBO, cb4);
2849 if (hret == H_SUCCESS) {
2850 if (cb4->jumbo_frame)
2853 cb4->jumbo_frame = 1;
2854 hret = ehea_h_modify_ehea_port(port->adapter->
2861 if (hret == H_SUCCESS)
2867 free_page((unsigned long)cb4);
2873 static ssize_t log_port_id_show(struct device *dev,
2874 struct device_attribute *attr, char *buf)
2876 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2877 return sprintf(buf, "%d", port->logical_port_id);
2880 static DEVICE_ATTR_RO(log_port_id);
2882 static void logical_port_release(struct device *dev)
2884 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2885 of_node_put(port->ofdev.dev.of_node);
2888 static struct device *ehea_register_port(struct ehea_port *port,
2889 struct device_node *dn)
2893 port->ofdev.dev.of_node = of_node_get(dn);
2894 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2895 port->ofdev.dev.bus = &ibmebus_bus_type;
2897 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
2898 port->ofdev.dev.release = logical_port_release;
2900 ret = of_device_register(&port->ofdev);
2902 pr_err("failed to register device. ret=%d\n", ret);
2906 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2908 pr_err("failed to register attributes, ret=%d\n", ret);
2909 goto out_unreg_of_dev;
2912 return &port->ofdev.dev;
2915 of_device_unregister(&port->ofdev);
2920 static void ehea_unregister_port(struct ehea_port *port)
2922 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2923 of_device_unregister(&port->ofdev);
2926 static const struct net_device_ops ehea_netdev_ops = {
2927 .ndo_open = ehea_open,
2928 .ndo_stop = ehea_stop,
2929 .ndo_start_xmit = ehea_start_xmit,
2930 .ndo_get_stats64 = ehea_get_stats64,
2931 .ndo_set_mac_address = ehea_set_mac_addr,
2932 .ndo_validate_addr = eth_validate_addr,
2933 .ndo_set_rx_mode = ehea_set_multicast_list,
2934 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
2935 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
2936 .ndo_tx_timeout = ehea_tx_watchdog,
2939 static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2940 u32 logical_port_id,
2941 struct device_node *dn)
2944 struct net_device *dev;
2945 struct ehea_port *port;
2946 struct device *port_dev;
2949 /* allocate memory for the port structures */
2950 dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
2957 port = netdev_priv(dev);
2959 mutex_init(&port->port_lock);
2960 port->state = EHEA_PORT_DOWN;
2961 port->sig_comp_iv = sq_entries / 10;
2963 port->adapter = adapter;
2965 port->logical_port_id = logical_port_id;
2967 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2969 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2970 if (!port->mc_list) {
2972 goto out_free_ethdev;
2975 INIT_LIST_HEAD(&port->mc_list->list);
2977 ret = ehea_sense_port_attr(port);
2979 goto out_free_mc_list;
2981 netif_set_real_num_rx_queues(dev, port->num_def_qps);
2982 netif_set_real_num_tx_queues(dev, port->num_def_qps);
2984 port_dev = ehea_register_port(port, dn);
2986 goto out_free_mc_list;
2988 SET_NETDEV_DEV(dev, port_dev);
2990 /* initialize net_device structure */
2991 eth_hw_addr_set(dev, (u8 *)&port->mac_addr);
2993 dev->netdev_ops = &ehea_netdev_ops;
2994 ehea_set_ethtool_ops(dev);
2996 dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
2997 NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
2998 dev->features = NETIF_F_SG | NETIF_F_TSO |
2999 NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
3000 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3001 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
3002 dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
3004 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3006 /* MTU range: 68 - 9022 */
3007 dev->min_mtu = ETH_MIN_MTU;
3008 dev->max_mtu = EHEA_MAX_PACKET_SIZE;
3010 INIT_WORK(&port->reset_task, ehea_reset_port);
3011 INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3013 init_waitqueue_head(&port->swqe_avail_wq);
3014 init_waitqueue_head(&port->restart_wq);
3016 ret = register_netdev(dev);
3018 pr_err("register_netdev failed. ret=%d\n", ret);
3019 goto out_unreg_port;
3022 ret = ehea_get_jumboframe_status(port, &jumbo);
3024 netdev_err(dev, "failed determining jumbo frame status\n");
3026 netdev_info(dev, "Jumbo frames are %sabled\n",
3027 jumbo == 1 ? "en" : "dis");
3029 adapter->active_ports++;
3034 ehea_unregister_port(port);
3037 kfree(port->mc_list);
3043 pr_err("setting up logical port with id=%d failed, ret=%d\n",
3044 logical_port_id, ret);
3048 static void ehea_shutdown_single_port(struct ehea_port *port)
3050 struct ehea_adapter *adapter = port->adapter;
3052 cancel_work_sync(&port->reset_task);
3053 cancel_delayed_work_sync(&port->stats_work);
3054 unregister_netdev(port->netdev);
3055 ehea_unregister_port(port);
3056 kfree(port->mc_list);
3057 free_netdev(port->netdev);
3058 adapter->active_ports--;
3061 static int ehea_setup_ports(struct ehea_adapter *adapter)
3063 struct device_node *lhea_dn;
3064 struct device_node *eth_dn = NULL;
3066 const u32 *dn_log_port_id;
3069 lhea_dn = adapter->ofdev->dev.of_node;
3070 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3072 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3074 if (!dn_log_port_id) {
3075 pr_err("bad device node: eth_dn name=%pOF\n", eth_dn);
3079 if (ehea_add_adapter_mr(adapter)) {
3080 pr_err("creating MR failed\n");
3081 of_node_put(eth_dn);
3085 adapter->port[i] = ehea_setup_single_port(adapter,
3088 if (adapter->port[i])
3089 netdev_info(adapter->port[i]->netdev,
3090 "logical port id #%d\n", *dn_log_port_id);
3092 ehea_remove_adapter_mr(adapter);
3099 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3100 u32 logical_port_id)
3102 struct device_node *lhea_dn;
3103 struct device_node *eth_dn = NULL;
3104 const u32 *dn_log_port_id;
3106 lhea_dn = adapter->ofdev->dev.of_node;
3107 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3109 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3112 if (*dn_log_port_id == logical_port_id)
3119 static ssize_t probe_port_store(struct device *dev,
3120 struct device_attribute *attr,
3121 const char *buf, size_t count)
3123 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3124 struct ehea_port *port;
3125 struct device_node *eth_dn = NULL;
3128 u32 logical_port_id;
3130 sscanf(buf, "%d", &logical_port_id);
3132 port = ehea_get_port(adapter, logical_port_id);
3135 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3140 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3143 pr_info("no logical port with id %d found\n", logical_port_id);
3147 if (ehea_add_adapter_mr(adapter)) {
3148 pr_err("creating MR failed\n");
3149 of_node_put(eth_dn);
3153 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3155 of_node_put(eth_dn);
3158 for (i = 0; i < EHEA_MAX_PORTS; i++)
3159 if (!adapter->port[i]) {
3160 adapter->port[i] = port;
3164 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3167 ehea_remove_adapter_mr(adapter);
3171 return (ssize_t) count;
3174 static ssize_t remove_port_store(struct device *dev,
3175 struct device_attribute *attr,
3176 const char *buf, size_t count)
3178 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3179 struct ehea_port *port;
3181 u32 logical_port_id;
3183 sscanf(buf, "%d", &logical_port_id);
3185 port = ehea_get_port(adapter, logical_port_id);
3188 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3191 ehea_shutdown_single_port(port);
3193 for (i = 0; i < EHEA_MAX_PORTS; i++)
3194 if (adapter->port[i] == port) {
3195 adapter->port[i] = NULL;
3199 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3204 ehea_remove_adapter_mr(adapter);
3206 return (ssize_t) count;
3209 static DEVICE_ATTR_WO(probe_port);
3210 static DEVICE_ATTR_WO(remove_port);
3212 static int ehea_create_device_sysfs(struct platform_device *dev)
3214 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3218 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3223 static void ehea_remove_device_sysfs(struct platform_device *dev)
3225 device_remove_file(&dev->dev, &dev_attr_probe_port);
3226 device_remove_file(&dev->dev, &dev_attr_remove_port);
3229 static int ehea_reboot_notifier(struct notifier_block *nb,
3230 unsigned long action, void *unused)
3232 if (action == SYS_RESTART) {
3233 pr_info("Reboot: freeing all eHEA resources\n");
3234 ibmebus_unregister_driver(&ehea_driver);
3239 static struct notifier_block ehea_reboot_nb = {
3240 .notifier_call = ehea_reboot_notifier,
3243 static int ehea_mem_notifier(struct notifier_block *nb,
3244 unsigned long action, void *data)
3246 int ret = NOTIFY_BAD;
3247 struct memory_notify *arg = data;
3249 mutex_lock(&dlpar_mem_lock);
3252 case MEM_CANCEL_OFFLINE:
3253 pr_info("memory offlining canceled");
3254 fallthrough; /* re-add canceled memory block */
3257 pr_info("memory is going online");
3258 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3259 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3264 case MEM_GOING_OFFLINE:
3265 pr_info("memory is going offline");
3266 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3267 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3276 ehea_update_firmware_handles();
3280 mutex_unlock(&dlpar_mem_lock);
3284 static struct notifier_block ehea_mem_nb = {
3285 .notifier_call = ehea_mem_notifier,
3288 static void ehea_crash_handler(void)
3292 if (ehea_fw_handles.arr)
3293 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3294 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3295 ehea_fw_handles.arr[i].fwh,
3298 if (ehea_bcmc_regs.arr)
3299 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3300 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3301 ehea_bcmc_regs.arr[i].port_id,
3302 ehea_bcmc_regs.arr[i].reg_type,
3303 ehea_bcmc_regs.arr[i].macaddr,
3307 static atomic_t ehea_memory_hooks_registered;
3309 /* Register memory hooks on probe of first adapter */
3310 static int ehea_register_memory_hooks(void)
3314 if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
3317 ret = ehea_create_busmap();
3319 pr_info("ehea_create_busmap failed\n");
3323 ret = register_reboot_notifier(&ehea_reboot_nb);
3325 pr_info("register_reboot_notifier failed\n");
3329 ret = register_memory_notifier(&ehea_mem_nb);
3331 pr_info("register_memory_notifier failed\n");
3335 ret = crash_shutdown_register(ehea_crash_handler);
3337 pr_info("crash_shutdown_register failed\n");
3344 unregister_memory_notifier(&ehea_mem_nb);
3346 unregister_reboot_notifier(&ehea_reboot_nb);
3348 atomic_dec(&ehea_memory_hooks_registered);
3352 static void ehea_unregister_memory_hooks(void)
3354 /* Only remove the hooks if we've registered them */
3355 if (atomic_read(&ehea_memory_hooks_registered) == 0)
3358 unregister_reboot_notifier(&ehea_reboot_nb);
3359 if (crash_shutdown_unregister(ehea_crash_handler))
3360 pr_info("failed unregistering crash handler\n");
3361 unregister_memory_notifier(&ehea_mem_nb);
3364 static int ehea_probe_adapter(struct platform_device *dev)
3366 struct ehea_adapter *adapter;
3367 const u64 *adapter_handle;
3371 ret = ehea_register_memory_hooks();
3375 if (!dev || !dev->dev.of_node) {
3376 pr_err("Invalid ibmebus device probed\n");
3380 adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
3383 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3387 list_add(&adapter->list, &adapter_list);
3389 adapter->ofdev = dev;
3391 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3394 adapter->handle = *adapter_handle;
3396 if (!adapter->handle) {
3397 dev_err(&dev->dev, "failed getting handle for adapter"
3398 " '%pOF'\n", dev->dev.of_node);
3403 adapter->pd = EHEA_PD_ID;
3405 platform_set_drvdata(dev, adapter);
3408 /* initialize adapter and ports */
3409 /* get adapter properties */
3410 ret = ehea_sense_adapter_attr(adapter);
3412 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3416 adapter->neq = ehea_create_eq(adapter,
3417 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3418 if (!adapter->neq) {
3420 dev_err(&dev->dev, "NEQ creation failed\n");
3424 tasklet_setup(&adapter->neq_tasklet, ehea_neq_tasklet);
3426 ret = ehea_create_device_sysfs(dev);
3430 ret = ehea_setup_ports(adapter);
3432 dev_err(&dev->dev, "setup_ports failed\n");
3433 goto out_rem_dev_sysfs;
3436 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3437 ehea_interrupt_neq, 0,
3438 "ehea_neq", adapter);
3440 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3441 goto out_shutdown_ports;
3444 /* Handle any events that might be pending. */
3445 tasklet_hi_schedule(&adapter->neq_tasklet);
3451 for (i = 0; i < EHEA_MAX_PORTS; i++)
3452 if (adapter->port[i]) {
3453 ehea_shutdown_single_port(adapter->port[i]);
3454 adapter->port[i] = NULL;
3458 ehea_remove_device_sysfs(dev);
3461 ehea_destroy_eq(adapter->neq);
3464 list_del(&adapter->list);
3467 ehea_update_firmware_handles();
3472 static int ehea_remove(struct platform_device *dev)
3474 struct ehea_adapter *adapter = platform_get_drvdata(dev);
3477 for (i = 0; i < EHEA_MAX_PORTS; i++)
3478 if (adapter->port[i]) {
3479 ehea_shutdown_single_port(adapter->port[i]);
3480 adapter->port[i] = NULL;
3483 ehea_remove_device_sysfs(dev);
3485 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3486 tasklet_kill(&adapter->neq_tasklet);
3488 ehea_destroy_eq(adapter->neq);
3489 ehea_remove_adapter_mr(adapter);
3490 list_del(&adapter->list);
3492 ehea_update_firmware_handles();
3497 static int check_module_parm(void)
3501 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3502 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3503 pr_info("Bad parameter: rq1_entries\n");
3506 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3507 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3508 pr_info("Bad parameter: rq2_entries\n");
3511 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3512 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3513 pr_info("Bad parameter: rq3_entries\n");
3516 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3517 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3518 pr_info("Bad parameter: sq_entries\n");
3525 static ssize_t capabilities_show(struct device_driver *drv, char *buf)
3527 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3530 static DRIVER_ATTR_RO(capabilities);
3532 static int __init ehea_module_init(void)
3536 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3538 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3539 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3541 mutex_init(&ehea_fw_handles.lock);
3542 spin_lock_init(&ehea_bcmc_regs.lock);
3544 ret = check_module_parm();
3548 ret = ibmebus_register_driver(&ehea_driver);
3550 pr_err("failed registering eHEA device driver on ebus\n");
3554 ret = driver_create_file(&ehea_driver.driver,
3555 &driver_attr_capabilities);
3557 pr_err("failed to register capabilities attribute, ret=%d\n",
3565 ibmebus_unregister_driver(&ehea_driver);
3570 static void __exit ehea_module_exit(void)
3572 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3573 ibmebus_unregister_driver(&ehea_driver);
3574 ehea_unregister_memory_hooks();
3575 kfree(ehea_fw_handles.arr);
3576 kfree(ehea_bcmc_regs.arr);
3577 ehea_destroy_busmap();
3580 module_init(ehea_module_init);
3581 module_exit(ehea_module_exit);