2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/slab.h>
47 #include <linux/sysfs.h>
48 #include <linux/workqueue.h>
49 #include <linux/kdev_t.h>
50 #include <linux/etherdevice.h>
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_cm.h>
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("InfiniBand CM");
58 MODULE_LICENSE("Dual BSD/GPL");
60 static const char * const ibcm_rej_reason_strs[] = {
61 [IB_CM_REJ_NO_QP] = "no QP",
62 [IB_CM_REJ_NO_EEC] = "no EEC",
63 [IB_CM_REJ_NO_RESOURCES] = "no resources",
64 [IB_CM_REJ_TIMEOUT] = "timeout",
65 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
66 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
67 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
68 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
69 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
70 [IB_CM_REJ_STALE_CONN] = "stale conn",
71 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
72 [IB_CM_REJ_INVALID_GID] = "invalid GID",
73 [IB_CM_REJ_INVALID_LID] = "invalid LID",
74 [IB_CM_REJ_INVALID_SL] = "invalid SL",
75 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
76 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
77 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
78 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
79 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
80 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
81 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
82 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
83 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
84 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
85 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
86 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
87 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
88 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
89 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
90 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
91 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
92 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
93 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
96 const char *__attribute_const__ ibcm_reject_msg(int reason)
98 size_t index = reason;
100 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
101 ibcm_rej_reason_strs[index])
102 return ibcm_rej_reason_strs[index];
104 return "unrecognized reason";
106 EXPORT_SYMBOL(ibcm_reject_msg);
108 static void cm_add_one(struct ib_device *device);
109 static void cm_remove_one(struct ib_device *device, void *client_data);
111 static struct ib_client cm_client = {
114 .remove = cm_remove_one
117 static struct ib_cm {
119 struct list_head device_list;
120 rwlock_t device_lock;
121 struct rb_root listen_service_table;
122 u64 listen_service_id;
123 /* struct rb_root peer_service_table; todo: fix peer to peer */
124 struct rb_root remote_qp_table;
125 struct rb_root remote_id_table;
126 struct rb_root remote_sidr_table;
127 struct idr local_id_table;
128 __be32 random_id_operand;
129 struct list_head timewait_list;
130 struct workqueue_struct *wq;
133 /* Counter indexes ordered by attribute ID */
147 CM_ATTR_ID_OFFSET = 0x0010,
158 static char const counter_group_names[CM_COUNTER_GROUPS]
159 [sizeof("cm_rx_duplicates")] = {
160 "cm_tx_msgs", "cm_tx_retries",
161 "cm_rx_msgs", "cm_rx_duplicates"
164 struct cm_counter_group {
166 atomic_long_t counter[CM_ATTR_COUNT];
169 struct cm_counter_attribute {
170 struct attribute attr;
174 #define CM_COUNTER_ATTR(_name, _index) \
175 struct cm_counter_attribute cm_##_name##_counter_attr = { \
176 .attr = { .name = __stringify(_name), .mode = 0444 }, \
180 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
181 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
182 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
183 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
184 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
185 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
186 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
187 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
188 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
189 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
190 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
192 static struct attribute *cm_counter_default_attrs[] = {
193 &cm_req_counter_attr.attr,
194 &cm_mra_counter_attr.attr,
195 &cm_rej_counter_attr.attr,
196 &cm_rep_counter_attr.attr,
197 &cm_rtu_counter_attr.attr,
198 &cm_dreq_counter_attr.attr,
199 &cm_drep_counter_attr.attr,
200 &cm_sidr_req_counter_attr.attr,
201 &cm_sidr_rep_counter_attr.attr,
202 &cm_lap_counter_attr.attr,
203 &cm_apr_counter_attr.attr,
208 struct cm_device *cm_dev;
209 struct ib_mad_agent *mad_agent;
210 struct kobject port_obj;
212 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
216 struct list_head list;
217 struct ib_device *ib_device;
218 struct device *device;
221 struct cm_port *port[0];
225 struct cm_port *port;
227 struct ib_ah_attr ah_attr;
233 struct delayed_work work;
234 struct list_head list;
235 struct cm_port *port;
236 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
237 __be32 local_id; /* Established / timewait */
239 struct ib_cm_event cm_event;
240 struct ib_sa_path_rec path[0];
243 struct cm_timewait_info {
244 struct cm_work work; /* Must be first. */
245 struct list_head list;
246 struct rb_node remote_qp_node;
247 struct rb_node remote_id_node;
248 __be64 remote_ca_guid;
250 u8 inserted_remote_qp;
251 u8 inserted_remote_id;
254 struct cm_id_private {
257 struct rb_node service_node;
258 struct rb_node sidr_id_node;
259 spinlock_t lock; /* Do not acquire inside cm.lock */
260 struct completion comp;
262 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
263 * Protected by the cm.lock spinlock. */
264 int listen_sharecount;
266 struct ib_mad_send_buf *msg;
267 struct cm_timewait_info *timewait_info;
268 /* todo: use alternate port on send failure */
276 enum ib_qp_type qp_type;
280 enum ib_mtu path_mtu;
285 u8 responder_resources;
292 struct list_head work_list;
296 static void cm_work_handler(struct work_struct *work);
298 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
300 if (atomic_dec_and_test(&cm_id_priv->refcount))
301 complete(&cm_id_priv->comp);
304 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
305 struct ib_mad_send_buf **msg)
307 struct ib_mad_agent *mad_agent;
308 struct ib_mad_send_buf *m;
311 mad_agent = cm_id_priv->av.port->mad_agent;
312 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
316 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
317 cm_id_priv->av.pkey_index,
318 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
320 IB_MGMT_BASE_VERSION);
326 /* Timeout set by caller if response is expected. */
328 m->retries = cm_id_priv->max_cm_retries;
330 atomic_inc(&cm_id_priv->refcount);
331 m->context[0] = cm_id_priv;
336 static int cm_alloc_response_msg(struct cm_port *port,
337 struct ib_mad_recv_wc *mad_recv_wc,
338 struct ib_mad_send_buf **msg)
340 struct ib_mad_send_buf *m;
343 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
344 mad_recv_wc->recv_buf.grh, port->port_num);
348 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
349 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
351 IB_MGMT_BASE_VERSION);
361 static void cm_free_msg(struct ib_mad_send_buf *msg)
363 ib_destroy_ah(msg->ah);
365 cm_deref_id(msg->context[0]);
366 ib_free_send_mad(msg);
369 static void * cm_copy_private_data(const void *private_data,
374 if (!private_data || !private_data_len)
377 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
379 return ERR_PTR(-ENOMEM);
384 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
385 void *private_data, u8 private_data_len)
387 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
388 kfree(cm_id_priv->private_data);
390 cm_id_priv->private_data = private_data;
391 cm_id_priv->private_data_len = private_data_len;
394 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
395 struct ib_grh *grh, struct cm_av *av)
398 av->pkey_index = wc->pkey_index;
399 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
403 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
405 struct cm_device *cm_dev;
406 struct cm_port *port = NULL;
410 struct net_device *ndev = ib_get_ndev_from_path(path);
412 read_lock_irqsave(&cm.device_lock, flags);
413 list_for_each_entry(cm_dev, &cm.device_list, list) {
414 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
415 path->gid_type, ndev, &p, NULL)) {
416 port = cm_dev->port[p-1];
420 read_unlock_irqrestore(&cm.device_lock, flags);
428 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
429 be16_to_cpu(path->pkey), &av->pkey_index);
434 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
436 av->timeout = path->packet_life_time + 1;
441 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
446 idr_preload(GFP_KERNEL);
447 spin_lock_irqsave(&cm.lock, flags);
449 id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
451 spin_unlock_irqrestore(&cm.lock, flags);
454 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
455 return id < 0 ? id : 0;
458 static void cm_free_id(__be32 local_id)
460 spin_lock_irq(&cm.lock);
461 idr_remove(&cm.local_id_table,
462 (__force int) (local_id ^ cm.random_id_operand));
463 spin_unlock_irq(&cm.lock);
466 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
468 struct cm_id_private *cm_id_priv;
470 cm_id_priv = idr_find(&cm.local_id_table,
471 (__force int) (local_id ^ cm.random_id_operand));
473 if (cm_id_priv->id.remote_id == remote_id)
474 atomic_inc(&cm_id_priv->refcount);
482 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
484 struct cm_id_private *cm_id_priv;
486 spin_lock_irq(&cm.lock);
487 cm_id_priv = cm_get_id(local_id, remote_id);
488 spin_unlock_irq(&cm.lock);
494 * Trivial helpers to strip endian annotation and compare; the
495 * endianness doesn't actually matter since we just need a stable
496 * order for the RB tree.
498 static int be32_lt(__be32 a, __be32 b)
500 return (__force u32) a < (__force u32) b;
503 static int be32_gt(__be32 a, __be32 b)
505 return (__force u32) a > (__force u32) b;
508 static int be64_lt(__be64 a, __be64 b)
510 return (__force u64) a < (__force u64) b;
513 static int be64_gt(__be64 a, __be64 b)
515 return (__force u64) a > (__force u64) b;
518 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
520 struct rb_node **link = &cm.listen_service_table.rb_node;
521 struct rb_node *parent = NULL;
522 struct cm_id_private *cur_cm_id_priv;
523 __be64 service_id = cm_id_priv->id.service_id;
524 __be64 service_mask = cm_id_priv->id.service_mask;
528 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
530 if ((cur_cm_id_priv->id.service_mask & service_id) ==
531 (service_mask & cur_cm_id_priv->id.service_id) &&
532 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
533 return cur_cm_id_priv;
535 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
536 link = &(*link)->rb_left;
537 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
538 link = &(*link)->rb_right;
539 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
540 link = &(*link)->rb_left;
541 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
542 link = &(*link)->rb_right;
544 link = &(*link)->rb_right;
546 rb_link_node(&cm_id_priv->service_node, parent, link);
547 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
551 static struct cm_id_private * cm_find_listen(struct ib_device *device,
554 struct rb_node *node = cm.listen_service_table.rb_node;
555 struct cm_id_private *cm_id_priv;
558 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
559 if ((cm_id_priv->id.service_mask & service_id) ==
560 cm_id_priv->id.service_id &&
561 (cm_id_priv->id.device == device))
564 if (device < cm_id_priv->id.device)
565 node = node->rb_left;
566 else if (device > cm_id_priv->id.device)
567 node = node->rb_right;
568 else if (be64_lt(service_id, cm_id_priv->id.service_id))
569 node = node->rb_left;
570 else if (be64_gt(service_id, cm_id_priv->id.service_id))
571 node = node->rb_right;
573 node = node->rb_right;
578 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
581 struct rb_node **link = &cm.remote_id_table.rb_node;
582 struct rb_node *parent = NULL;
583 struct cm_timewait_info *cur_timewait_info;
584 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
585 __be32 remote_id = timewait_info->work.remote_id;
589 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
591 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
592 link = &(*link)->rb_left;
593 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
594 link = &(*link)->rb_right;
595 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
596 link = &(*link)->rb_left;
597 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
598 link = &(*link)->rb_right;
600 return cur_timewait_info;
602 timewait_info->inserted_remote_id = 1;
603 rb_link_node(&timewait_info->remote_id_node, parent, link);
604 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
608 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
611 struct rb_node *node = cm.remote_id_table.rb_node;
612 struct cm_timewait_info *timewait_info;
615 timewait_info = rb_entry(node, struct cm_timewait_info,
617 if (be32_lt(remote_id, timewait_info->work.remote_id))
618 node = node->rb_left;
619 else if (be32_gt(remote_id, timewait_info->work.remote_id))
620 node = node->rb_right;
621 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
622 node = node->rb_left;
623 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
624 node = node->rb_right;
626 return timewait_info;
631 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
634 struct rb_node **link = &cm.remote_qp_table.rb_node;
635 struct rb_node *parent = NULL;
636 struct cm_timewait_info *cur_timewait_info;
637 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
638 __be32 remote_qpn = timewait_info->remote_qpn;
642 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
644 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
645 link = &(*link)->rb_left;
646 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
647 link = &(*link)->rb_right;
648 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
649 link = &(*link)->rb_left;
650 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
651 link = &(*link)->rb_right;
653 return cur_timewait_info;
655 timewait_info->inserted_remote_qp = 1;
656 rb_link_node(&timewait_info->remote_qp_node, parent, link);
657 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
661 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
664 struct rb_node **link = &cm.remote_sidr_table.rb_node;
665 struct rb_node *parent = NULL;
666 struct cm_id_private *cur_cm_id_priv;
667 union ib_gid *port_gid = &cm_id_priv->av.dgid;
668 __be32 remote_id = cm_id_priv->id.remote_id;
672 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
674 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
675 link = &(*link)->rb_left;
676 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
677 link = &(*link)->rb_right;
680 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
683 link = &(*link)->rb_left;
685 link = &(*link)->rb_right;
687 return cur_cm_id_priv;
690 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
691 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
695 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
696 enum ib_cm_sidr_status status)
698 struct ib_cm_sidr_rep_param param;
700 memset(¶m, 0, sizeof param);
701 param.status = status;
702 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
705 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
706 ib_cm_handler cm_handler,
709 struct cm_id_private *cm_id_priv;
712 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
714 return ERR_PTR(-ENOMEM);
716 cm_id_priv->id.state = IB_CM_IDLE;
717 cm_id_priv->id.device = device;
718 cm_id_priv->id.cm_handler = cm_handler;
719 cm_id_priv->id.context = context;
720 cm_id_priv->id.remote_cm_qpn = 1;
721 ret = cm_alloc_id(cm_id_priv);
725 spin_lock_init(&cm_id_priv->lock);
726 init_completion(&cm_id_priv->comp);
727 INIT_LIST_HEAD(&cm_id_priv->work_list);
728 atomic_set(&cm_id_priv->work_count, -1);
729 atomic_set(&cm_id_priv->refcount, 1);
730 return &cm_id_priv->id;
734 return ERR_PTR(-ENOMEM);
736 EXPORT_SYMBOL(ib_create_cm_id);
738 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
740 struct cm_work *work;
742 if (list_empty(&cm_id_priv->work_list))
745 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
746 list_del(&work->list);
750 static void cm_free_work(struct cm_work *work)
752 if (work->mad_recv_wc)
753 ib_free_recv_mad(work->mad_recv_wc);
757 static inline int cm_convert_to_ms(int iba_time)
759 /* approximate conversion to ms from 4.096us x 2^iba_time */
760 return 1 << max(iba_time - 8, 0);
764 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
765 * Because of how ack_timeout is stored, adding one doubles the timeout.
766 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
767 * increment it (round up) only if the other is within 50%.
769 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
771 int ack_timeout = packet_life_time + 1;
773 if (ack_timeout >= ca_ack_delay)
774 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
776 ack_timeout = ca_ack_delay +
777 (ack_timeout >= (ca_ack_delay - 1));
779 return min(31, ack_timeout);
782 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
784 if (timewait_info->inserted_remote_id) {
785 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
786 timewait_info->inserted_remote_id = 0;
789 if (timewait_info->inserted_remote_qp) {
790 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
791 timewait_info->inserted_remote_qp = 0;
795 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
797 struct cm_timewait_info *timewait_info;
799 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
801 return ERR_PTR(-ENOMEM);
803 timewait_info->work.local_id = local_id;
804 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
805 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
806 return timewait_info;
809 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
813 struct cm_device *cm_dev;
815 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
819 spin_lock_irqsave(&cm.lock, flags);
820 cm_cleanup_timewait(cm_id_priv->timewait_info);
821 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
822 spin_unlock_irqrestore(&cm.lock, flags);
825 * The cm_id could be destroyed by the user before we exit timewait.
826 * To protect against this, we search for the cm_id after exiting
827 * timewait before notifying the user that we've exited timewait.
829 cm_id_priv->id.state = IB_CM_TIMEWAIT;
830 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
832 /* Check if the device started its remove_one */
833 spin_lock_irqsave(&cm.lock, flags);
834 if (!cm_dev->going_down)
835 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
836 msecs_to_jiffies(wait_time));
837 spin_unlock_irqrestore(&cm.lock, flags);
839 cm_id_priv->timewait_info = NULL;
842 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
846 cm_id_priv->id.state = IB_CM_IDLE;
847 if (cm_id_priv->timewait_info) {
848 spin_lock_irqsave(&cm.lock, flags);
849 cm_cleanup_timewait(cm_id_priv->timewait_info);
850 spin_unlock_irqrestore(&cm.lock, flags);
851 kfree(cm_id_priv->timewait_info);
852 cm_id_priv->timewait_info = NULL;
856 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
858 struct cm_id_private *cm_id_priv;
859 struct cm_work *work;
861 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
863 spin_lock_irq(&cm_id_priv->lock);
864 switch (cm_id->state) {
866 spin_unlock_irq(&cm_id_priv->lock);
868 spin_lock_irq(&cm.lock);
869 if (--cm_id_priv->listen_sharecount > 0) {
870 /* The id is still shared. */
871 cm_deref_id(cm_id_priv);
872 spin_unlock_irq(&cm.lock);
875 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
876 spin_unlock_irq(&cm.lock);
878 case IB_CM_SIDR_REQ_SENT:
879 cm_id->state = IB_CM_IDLE;
880 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
881 spin_unlock_irq(&cm_id_priv->lock);
883 case IB_CM_SIDR_REQ_RCVD:
884 spin_unlock_irq(&cm_id_priv->lock);
885 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
886 spin_lock_irq(&cm.lock);
887 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
888 rb_erase(&cm_id_priv->sidr_id_node,
889 &cm.remote_sidr_table);
890 spin_unlock_irq(&cm.lock);
893 case IB_CM_MRA_REQ_RCVD:
894 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
895 spin_unlock_irq(&cm_id_priv->lock);
896 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
897 &cm_id_priv->id.device->node_guid,
898 sizeof cm_id_priv->id.device->node_guid,
902 if (err == -ENOMEM) {
903 /* Do not reject to allow future retries. */
904 cm_reset_to_idle(cm_id_priv);
905 spin_unlock_irq(&cm_id_priv->lock);
907 spin_unlock_irq(&cm_id_priv->lock);
908 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
913 case IB_CM_MRA_REP_RCVD:
914 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
916 case IB_CM_MRA_REQ_SENT:
918 case IB_CM_MRA_REP_SENT:
919 spin_unlock_irq(&cm_id_priv->lock);
920 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
923 case IB_CM_ESTABLISHED:
924 spin_unlock_irq(&cm_id_priv->lock);
925 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
927 ib_send_cm_dreq(cm_id, NULL, 0);
929 case IB_CM_DREQ_SENT:
930 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
931 cm_enter_timewait(cm_id_priv);
932 spin_unlock_irq(&cm_id_priv->lock);
934 case IB_CM_DREQ_RCVD:
935 spin_unlock_irq(&cm_id_priv->lock);
936 ib_send_cm_drep(cm_id, NULL, 0);
939 spin_unlock_irq(&cm_id_priv->lock);
943 cm_free_id(cm_id->local_id);
944 cm_deref_id(cm_id_priv);
945 wait_for_completion(&cm_id_priv->comp);
946 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
948 kfree(cm_id_priv->private_data);
952 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
954 cm_destroy_id(cm_id, 0);
956 EXPORT_SYMBOL(ib_destroy_cm_id);
959 * __ib_cm_listen - Initiates listening on the specified service ID for
960 * connection and service ID resolution requests.
961 * @cm_id: Connection identifier associated with the listen request.
962 * @service_id: Service identifier matched against incoming connection
963 * and service ID resolution requests. The service ID should be specified
964 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
965 * assign a service ID to the caller.
966 * @service_mask: Mask applied to service ID used to listen across a
967 * range of service IDs. If set to 0, the service ID is matched
968 * exactly. This parameter is ignored if %service_id is set to
969 * IB_CM_ASSIGN_SERVICE_ID.
971 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
974 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
977 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
978 service_id &= service_mask;
979 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
980 (service_id != IB_CM_ASSIGN_SERVICE_ID))
983 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
984 if (cm_id->state != IB_CM_IDLE)
987 cm_id->state = IB_CM_LISTEN;
988 ++cm_id_priv->listen_sharecount;
990 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
991 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
992 cm_id->service_mask = ~cpu_to_be64(0);
994 cm_id->service_id = service_id;
995 cm_id->service_mask = service_mask;
997 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
999 if (cur_cm_id_priv) {
1000 cm_id->state = IB_CM_IDLE;
1001 --cm_id_priv->listen_sharecount;
1007 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1009 unsigned long flags;
1012 spin_lock_irqsave(&cm.lock, flags);
1013 ret = __ib_cm_listen(cm_id, service_id, service_mask);
1014 spin_unlock_irqrestore(&cm.lock, flags);
1018 EXPORT_SYMBOL(ib_cm_listen);
1021 * Create a new listening ib_cm_id and listen on the given service ID.
1023 * If there's an existing ID listening on that same device and service ID,
1026 * @device: Device associated with the cm_id. All related communication will
1027 * be associated with the specified device.
1028 * @cm_handler: Callback invoked to notify the user of CM events.
1029 * @service_id: Service identifier matched against incoming connection
1030 * and service ID resolution requests. The service ID should be specified
1031 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1032 * assign a service ID to the caller.
1034 * Callers should call ib_destroy_cm_id when done with the listener ID.
1036 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1037 ib_cm_handler cm_handler,
1040 struct cm_id_private *cm_id_priv;
1041 struct ib_cm_id *cm_id;
1042 unsigned long flags;
1045 /* Create an ID in advance, since the creation may sleep */
1046 cm_id = ib_create_cm_id(device, cm_handler, NULL);
1050 spin_lock_irqsave(&cm.lock, flags);
1052 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1055 /* Find an existing ID */
1056 cm_id_priv = cm_find_listen(device, service_id);
1058 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1059 /* Sharing an ib_cm_id with different handlers is not
1061 spin_unlock_irqrestore(&cm.lock, flags);
1062 return ERR_PTR(-EINVAL);
1064 atomic_inc(&cm_id_priv->refcount);
1065 ++cm_id_priv->listen_sharecount;
1066 spin_unlock_irqrestore(&cm.lock, flags);
1068 ib_destroy_cm_id(cm_id);
1069 cm_id = &cm_id_priv->id;
1074 /* Use newly created ID */
1075 err = __ib_cm_listen(cm_id, service_id, 0);
1077 spin_unlock_irqrestore(&cm.lock, flags);
1080 ib_destroy_cm_id(cm_id);
1081 return ERR_PTR(err);
1085 EXPORT_SYMBOL(ib_cm_insert_listen);
1087 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
1088 enum cm_msg_sequence msg_seq)
1090 u64 hi_tid, low_tid;
1092 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1093 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
1095 return cpu_to_be64(hi_tid | low_tid);
1098 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1099 __be16 attr_id, __be64 tid)
1101 hdr->base_version = IB_MGMT_BASE_VERSION;
1102 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1103 hdr->class_version = IB_CM_CLASS_VERSION;
1104 hdr->method = IB_MGMT_METHOD_SEND;
1105 hdr->attr_id = attr_id;
1109 static void cm_format_req(struct cm_req_msg *req_msg,
1110 struct cm_id_private *cm_id_priv,
1111 struct ib_cm_req_param *param)
1113 struct ib_sa_path_rec *pri_path = param->primary_path;
1114 struct ib_sa_path_rec *alt_path = param->alternate_path;
1116 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1117 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1119 req_msg->local_comm_id = cm_id_priv->id.local_id;
1120 req_msg->service_id = param->service_id;
1121 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1122 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1123 cm_req_set_init_depth(req_msg, param->initiator_depth);
1124 cm_req_set_remote_resp_timeout(req_msg,
1125 param->remote_cm_response_timeout);
1126 cm_req_set_qp_type(req_msg, param->qp_type);
1127 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1128 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1129 cm_req_set_local_resp_timeout(req_msg,
1130 param->local_cm_response_timeout);
1131 req_msg->pkey = param->primary_path->pkey;
1132 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1133 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1135 if (param->qp_type != IB_QPT_XRC_INI) {
1136 cm_req_set_resp_res(req_msg, param->responder_resources);
1137 cm_req_set_retry_count(req_msg, param->retry_count);
1138 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1139 cm_req_set_srq(req_msg, param->srq);
1142 if (pri_path->hop_limit <= 1) {
1143 req_msg->primary_local_lid = pri_path->slid;
1144 req_msg->primary_remote_lid = pri_path->dlid;
1146 /* Work-around until there's a way to obtain remote LID info */
1147 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1148 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1150 req_msg->primary_local_gid = pri_path->sgid;
1151 req_msg->primary_remote_gid = pri_path->dgid;
1152 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1153 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1154 req_msg->primary_traffic_class = pri_path->traffic_class;
1155 req_msg->primary_hop_limit = pri_path->hop_limit;
1156 cm_req_set_primary_sl(req_msg, pri_path->sl);
1157 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1158 cm_req_set_primary_local_ack_timeout(req_msg,
1159 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1160 pri_path->packet_life_time));
1163 if (alt_path->hop_limit <= 1) {
1164 req_msg->alt_local_lid = alt_path->slid;
1165 req_msg->alt_remote_lid = alt_path->dlid;
1167 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1168 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1170 req_msg->alt_local_gid = alt_path->sgid;
1171 req_msg->alt_remote_gid = alt_path->dgid;
1172 cm_req_set_alt_flow_label(req_msg,
1173 alt_path->flow_label);
1174 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1175 req_msg->alt_traffic_class = alt_path->traffic_class;
1176 req_msg->alt_hop_limit = alt_path->hop_limit;
1177 cm_req_set_alt_sl(req_msg, alt_path->sl);
1178 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1179 cm_req_set_alt_local_ack_timeout(req_msg,
1180 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1181 alt_path->packet_life_time));
1184 if (param->private_data && param->private_data_len)
1185 memcpy(req_msg->private_data, param->private_data,
1186 param->private_data_len);
1189 static int cm_validate_req_param(struct ib_cm_req_param *param)
1191 /* peer-to-peer not supported */
1192 if (param->peer_to_peer)
1195 if (!param->primary_path)
1198 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1199 param->qp_type != IB_QPT_XRC_INI)
1202 if (param->private_data &&
1203 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1206 if (param->alternate_path &&
1207 (param->alternate_path->pkey != param->primary_path->pkey ||
1208 param->alternate_path->mtu != param->primary_path->mtu))
1214 int ib_send_cm_req(struct ib_cm_id *cm_id,
1215 struct ib_cm_req_param *param)
1217 struct cm_id_private *cm_id_priv;
1218 struct cm_req_msg *req_msg;
1219 unsigned long flags;
1222 ret = cm_validate_req_param(param);
1226 /* Verify that we're not in timewait. */
1227 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1228 spin_lock_irqsave(&cm_id_priv->lock, flags);
1229 if (cm_id->state != IB_CM_IDLE) {
1230 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1234 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1236 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1238 if (IS_ERR(cm_id_priv->timewait_info)) {
1239 ret = PTR_ERR(cm_id_priv->timewait_info);
1243 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
1246 if (param->alternate_path) {
1247 ret = cm_init_av_by_path(param->alternate_path,
1248 &cm_id_priv->alt_av);
1252 cm_id->service_id = param->service_id;
1253 cm_id->service_mask = ~cpu_to_be64(0);
1254 cm_id_priv->timeout_ms = cm_convert_to_ms(
1255 param->primary_path->packet_life_time) * 2 +
1257 param->remote_cm_response_timeout);
1258 cm_id_priv->max_cm_retries = param->max_cm_retries;
1259 cm_id_priv->initiator_depth = param->initiator_depth;
1260 cm_id_priv->responder_resources = param->responder_resources;
1261 cm_id_priv->retry_count = param->retry_count;
1262 cm_id_priv->path_mtu = param->primary_path->mtu;
1263 cm_id_priv->pkey = param->primary_path->pkey;
1264 cm_id_priv->qp_type = param->qp_type;
1266 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1270 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1271 cm_format_req(req_msg, cm_id_priv, param);
1272 cm_id_priv->tid = req_msg->hdr.tid;
1273 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1274 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1276 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1277 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1279 spin_lock_irqsave(&cm_id_priv->lock, flags);
1280 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1282 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1285 BUG_ON(cm_id->state != IB_CM_IDLE);
1286 cm_id->state = IB_CM_REQ_SENT;
1287 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1290 error2: cm_free_msg(cm_id_priv->msg);
1291 error1: kfree(cm_id_priv->timewait_info);
1294 EXPORT_SYMBOL(ib_send_cm_req);
1296 static int cm_issue_rej(struct cm_port *port,
1297 struct ib_mad_recv_wc *mad_recv_wc,
1298 enum ib_cm_rej_reason reason,
1299 enum cm_msg_response msg_rejected,
1300 void *ari, u8 ari_length)
1302 struct ib_mad_send_buf *msg = NULL;
1303 struct cm_rej_msg *rej_msg, *rcv_msg;
1306 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1310 /* We just need common CM header information. Cast to any message. */
1311 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1312 rej_msg = (struct cm_rej_msg *) msg->mad;
1314 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1315 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1316 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1317 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1318 rej_msg->reason = cpu_to_be16(reason);
1320 if (ari && ari_length) {
1321 cm_rej_set_reject_info_len(rej_msg, ari_length);
1322 memcpy(rej_msg->ari, ari, ari_length);
1325 ret = ib_post_send_mad(msg, NULL);
1332 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1333 __be32 local_qpn, __be32 remote_qpn)
1335 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1336 ((local_ca_guid == remote_ca_guid) &&
1337 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1340 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1341 struct ib_sa_path_rec *primary_path,
1342 struct ib_sa_path_rec *alt_path)
1344 memset(primary_path, 0, sizeof *primary_path);
1345 primary_path->dgid = req_msg->primary_local_gid;
1346 primary_path->sgid = req_msg->primary_remote_gid;
1347 primary_path->dlid = req_msg->primary_local_lid;
1348 primary_path->slid = req_msg->primary_remote_lid;
1349 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1350 primary_path->hop_limit = req_msg->primary_hop_limit;
1351 primary_path->traffic_class = req_msg->primary_traffic_class;
1352 primary_path->reversible = 1;
1353 primary_path->pkey = req_msg->pkey;
1354 primary_path->sl = cm_req_get_primary_sl(req_msg);
1355 primary_path->mtu_selector = IB_SA_EQ;
1356 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1357 primary_path->rate_selector = IB_SA_EQ;
1358 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1359 primary_path->packet_life_time_selector = IB_SA_EQ;
1360 primary_path->packet_life_time =
1361 cm_req_get_primary_local_ack_timeout(req_msg);
1362 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1363 primary_path->service_id = req_msg->service_id;
1365 if (req_msg->alt_local_lid) {
1366 memset(alt_path, 0, sizeof *alt_path);
1367 alt_path->dgid = req_msg->alt_local_gid;
1368 alt_path->sgid = req_msg->alt_remote_gid;
1369 alt_path->dlid = req_msg->alt_local_lid;
1370 alt_path->slid = req_msg->alt_remote_lid;
1371 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1372 alt_path->hop_limit = req_msg->alt_hop_limit;
1373 alt_path->traffic_class = req_msg->alt_traffic_class;
1374 alt_path->reversible = 1;
1375 alt_path->pkey = req_msg->pkey;
1376 alt_path->sl = cm_req_get_alt_sl(req_msg);
1377 alt_path->mtu_selector = IB_SA_EQ;
1378 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1379 alt_path->rate_selector = IB_SA_EQ;
1380 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1381 alt_path->packet_life_time_selector = IB_SA_EQ;
1382 alt_path->packet_life_time =
1383 cm_req_get_alt_local_ack_timeout(req_msg);
1384 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1385 alt_path->service_id = req_msg->service_id;
1389 static u16 cm_get_bth_pkey(struct cm_work *work)
1391 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1392 u8 port_num = work->port->port_num;
1393 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1397 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1399 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1400 port_num, pkey_index, ret);
1407 static void cm_format_req_event(struct cm_work *work,
1408 struct cm_id_private *cm_id_priv,
1409 struct ib_cm_id *listen_id)
1411 struct cm_req_msg *req_msg;
1412 struct ib_cm_req_event_param *param;
1414 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1415 param = &work->cm_event.param.req_rcvd;
1416 param->listen_id = listen_id;
1417 param->bth_pkey = cm_get_bth_pkey(work);
1418 param->port = cm_id_priv->av.port->port_num;
1419 param->primary_path = &work->path[0];
1420 if (req_msg->alt_local_lid)
1421 param->alternate_path = &work->path[1];
1423 param->alternate_path = NULL;
1424 param->remote_ca_guid = req_msg->local_ca_guid;
1425 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1426 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1427 param->qp_type = cm_req_get_qp_type(req_msg);
1428 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1429 param->responder_resources = cm_req_get_init_depth(req_msg);
1430 param->initiator_depth = cm_req_get_resp_res(req_msg);
1431 param->local_cm_response_timeout =
1432 cm_req_get_remote_resp_timeout(req_msg);
1433 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1434 param->remote_cm_response_timeout =
1435 cm_req_get_local_resp_timeout(req_msg);
1436 param->retry_count = cm_req_get_retry_count(req_msg);
1437 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1438 param->srq = cm_req_get_srq(req_msg);
1439 work->cm_event.private_data = &req_msg->private_data;
1442 static void cm_process_work(struct cm_id_private *cm_id_priv,
1443 struct cm_work *work)
1447 /* We will typically only have the current event to report. */
1448 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1451 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1452 spin_lock_irq(&cm_id_priv->lock);
1453 work = cm_dequeue_work(cm_id_priv);
1454 spin_unlock_irq(&cm_id_priv->lock);
1456 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1460 cm_deref_id(cm_id_priv);
1462 cm_destroy_id(&cm_id_priv->id, ret);
1465 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1466 struct cm_id_private *cm_id_priv,
1467 enum cm_msg_response msg_mraed, u8 service_timeout,
1468 const void *private_data, u8 private_data_len)
1470 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1471 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1472 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1473 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1474 cm_mra_set_service_timeout(mra_msg, service_timeout);
1476 if (private_data && private_data_len)
1477 memcpy(mra_msg->private_data, private_data, private_data_len);
1480 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1481 struct cm_id_private *cm_id_priv,
1482 enum ib_cm_rej_reason reason,
1485 const void *private_data,
1486 u8 private_data_len)
1488 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1489 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1491 switch(cm_id_priv->id.state) {
1492 case IB_CM_REQ_RCVD:
1493 rej_msg->local_comm_id = 0;
1494 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1496 case IB_CM_MRA_REQ_SENT:
1497 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1498 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1500 case IB_CM_REP_RCVD:
1501 case IB_CM_MRA_REP_SENT:
1502 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1503 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1506 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1507 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1511 rej_msg->reason = cpu_to_be16(reason);
1512 if (ari && ari_length) {
1513 cm_rej_set_reject_info_len(rej_msg, ari_length);
1514 memcpy(rej_msg->ari, ari, ari_length);
1517 if (private_data && private_data_len)
1518 memcpy(rej_msg->private_data, private_data, private_data_len);
1521 static void cm_dup_req_handler(struct cm_work *work,
1522 struct cm_id_private *cm_id_priv)
1524 struct ib_mad_send_buf *msg = NULL;
1527 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1528 counter[CM_REQ_COUNTER]);
1530 /* Quick state check to discard duplicate REQs. */
1531 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1534 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1538 spin_lock_irq(&cm_id_priv->lock);
1539 switch (cm_id_priv->id.state) {
1540 case IB_CM_MRA_REQ_SENT:
1541 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1542 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1543 cm_id_priv->private_data,
1544 cm_id_priv->private_data_len);
1546 case IB_CM_TIMEWAIT:
1547 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1548 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1553 spin_unlock_irq(&cm_id_priv->lock);
1555 ret = ib_post_send_mad(msg, NULL);
1560 unlock: spin_unlock_irq(&cm_id_priv->lock);
1561 free: cm_free_msg(msg);
1564 static struct cm_id_private * cm_match_req(struct cm_work *work,
1565 struct cm_id_private *cm_id_priv)
1567 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1568 struct cm_timewait_info *timewait_info;
1569 struct cm_req_msg *req_msg;
1571 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1573 /* Check for possible duplicate REQ. */
1574 spin_lock_irq(&cm.lock);
1575 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1576 if (timewait_info) {
1577 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1578 timewait_info->work.remote_id);
1579 spin_unlock_irq(&cm.lock);
1580 if (cur_cm_id_priv) {
1581 cm_dup_req_handler(work, cur_cm_id_priv);
1582 cm_deref_id(cur_cm_id_priv);
1587 /* Check for stale connections. */
1588 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1589 if (timewait_info) {
1590 cm_cleanup_timewait(cm_id_priv->timewait_info);
1591 spin_unlock_irq(&cm.lock);
1592 cm_issue_rej(work->port, work->mad_recv_wc,
1593 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1598 /* Find matching listen request. */
1599 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1600 req_msg->service_id);
1601 if (!listen_cm_id_priv) {
1602 cm_cleanup_timewait(cm_id_priv->timewait_info);
1603 spin_unlock_irq(&cm.lock);
1604 cm_issue_rej(work->port, work->mad_recv_wc,
1605 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1609 atomic_inc(&listen_cm_id_priv->refcount);
1610 atomic_inc(&cm_id_priv->refcount);
1611 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1612 atomic_inc(&cm_id_priv->work_count);
1613 spin_unlock_irq(&cm.lock);
1615 return listen_cm_id_priv;
1619 * Work-around for inter-subnet connections. If the LIDs are permissive,
1620 * we need to override the LID/SL data in the REQ with the LID information
1621 * in the work completion.
1623 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1625 if (!cm_req_get_primary_subnet_local(req_msg)) {
1626 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1627 req_msg->primary_local_lid = cpu_to_be16(wc->slid);
1628 cm_req_set_primary_sl(req_msg, wc->sl);
1631 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1632 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1635 if (!cm_req_get_alt_subnet_local(req_msg)) {
1636 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1637 req_msg->alt_local_lid = cpu_to_be16(wc->slid);
1638 cm_req_set_alt_sl(req_msg, wc->sl);
1641 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1642 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1646 static int cm_req_handler(struct cm_work *work)
1648 struct ib_cm_id *cm_id;
1649 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1650 struct cm_req_msg *req_msg;
1652 struct ib_gid_attr gid_attr;
1655 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1657 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1659 return PTR_ERR(cm_id);
1661 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1662 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1663 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1664 work->mad_recv_wc->recv_buf.grh,
1666 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1668 if (IS_ERR(cm_id_priv->timewait_info)) {
1669 ret = PTR_ERR(cm_id_priv->timewait_info);
1672 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1673 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1674 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1676 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1677 if (!listen_cm_id_priv) {
1679 kfree(cm_id_priv->timewait_info);
1683 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1684 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1685 cm_id_priv->id.service_id = req_msg->service_id;
1686 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1688 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1689 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1691 memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
1692 work->path[0].hop_limit = cm_id_priv->av.ah_attr.grh.hop_limit;
1693 ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
1694 work->port->port_num,
1695 cm_id_priv->av.ah_attr.grh.sgid_index,
1698 if (gid_attr.ndev) {
1699 work->path[0].ifindex = gid_attr.ndev->ifindex;
1700 work->path[0].net = dev_net(gid_attr.ndev);
1701 dev_put(gid_attr.ndev);
1703 work->path[0].gid_type = gid_attr.gid_type;
1704 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1707 int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
1708 work->port->port_num, 0,
1709 &work->path[0].sgid,
1711 if (!err && gid_attr.ndev) {
1712 work->path[0].ifindex = gid_attr.ndev->ifindex;
1713 work->path[0].net = dev_net(gid_attr.ndev);
1714 dev_put(gid_attr.ndev);
1716 work->path[0].gid_type = gid_attr.gid_type;
1717 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1718 &work->path[0].sgid, sizeof work->path[0].sgid,
1722 if (req_msg->alt_local_lid) {
1723 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1725 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1726 &work->path[0].sgid,
1727 sizeof work->path[0].sgid, NULL, 0);
1731 cm_id_priv->tid = req_msg->hdr.tid;
1732 cm_id_priv->timeout_ms = cm_convert_to_ms(
1733 cm_req_get_local_resp_timeout(req_msg));
1734 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1735 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1736 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1737 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1738 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1739 cm_id_priv->pkey = req_msg->pkey;
1740 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1741 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1742 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1743 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1745 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1746 cm_process_work(cm_id_priv, work);
1747 cm_deref_id(listen_cm_id_priv);
1751 atomic_dec(&cm_id_priv->refcount);
1752 cm_deref_id(listen_cm_id_priv);
1754 ib_destroy_cm_id(cm_id);
1758 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1759 struct cm_id_private *cm_id_priv,
1760 struct ib_cm_rep_param *param)
1762 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1763 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1764 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1765 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1766 rep_msg->resp_resources = param->responder_resources;
1767 cm_rep_set_target_ack_delay(rep_msg,
1768 cm_id_priv->av.port->cm_dev->ack_delay);
1769 cm_rep_set_failover(rep_msg, param->failover_accepted);
1770 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1771 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1773 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
1774 rep_msg->initiator_depth = param->initiator_depth;
1775 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1776 cm_rep_set_srq(rep_msg, param->srq);
1777 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1779 cm_rep_set_srq(rep_msg, 1);
1780 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
1783 if (param->private_data && param->private_data_len)
1784 memcpy(rep_msg->private_data, param->private_data,
1785 param->private_data_len);
1788 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1789 struct ib_cm_rep_param *param)
1791 struct cm_id_private *cm_id_priv;
1792 struct ib_mad_send_buf *msg;
1793 struct cm_rep_msg *rep_msg;
1794 unsigned long flags;
1797 if (param->private_data &&
1798 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1801 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1802 spin_lock_irqsave(&cm_id_priv->lock, flags);
1803 if (cm_id->state != IB_CM_REQ_RCVD &&
1804 cm_id->state != IB_CM_MRA_REQ_SENT) {
1809 ret = cm_alloc_msg(cm_id_priv, &msg);
1813 rep_msg = (struct cm_rep_msg *) msg->mad;
1814 cm_format_rep(rep_msg, cm_id_priv, param);
1815 msg->timeout_ms = cm_id_priv->timeout_ms;
1816 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1818 ret = ib_post_send_mad(msg, NULL);
1820 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1825 cm_id->state = IB_CM_REP_SENT;
1826 cm_id_priv->msg = msg;
1827 cm_id_priv->initiator_depth = param->initiator_depth;
1828 cm_id_priv->responder_resources = param->responder_resources;
1829 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1830 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
1832 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1835 EXPORT_SYMBOL(ib_send_cm_rep);
1837 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1838 struct cm_id_private *cm_id_priv,
1839 const void *private_data,
1840 u8 private_data_len)
1842 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1843 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1844 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1846 if (private_data && private_data_len)
1847 memcpy(rtu_msg->private_data, private_data, private_data_len);
1850 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1851 const void *private_data,
1852 u8 private_data_len)
1854 struct cm_id_private *cm_id_priv;
1855 struct ib_mad_send_buf *msg;
1856 unsigned long flags;
1860 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1863 data = cm_copy_private_data(private_data, private_data_len);
1865 return PTR_ERR(data);
1867 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1868 spin_lock_irqsave(&cm_id_priv->lock, flags);
1869 if (cm_id->state != IB_CM_REP_RCVD &&
1870 cm_id->state != IB_CM_MRA_REP_SENT) {
1875 ret = cm_alloc_msg(cm_id_priv, &msg);
1879 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1880 private_data, private_data_len);
1882 ret = ib_post_send_mad(msg, NULL);
1884 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1890 cm_id->state = IB_CM_ESTABLISHED;
1891 cm_set_private_data(cm_id_priv, data, private_data_len);
1892 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1895 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1899 EXPORT_SYMBOL(ib_send_cm_rtu);
1901 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
1903 struct cm_rep_msg *rep_msg;
1904 struct ib_cm_rep_event_param *param;
1906 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1907 param = &work->cm_event.param.rep_rcvd;
1908 param->remote_ca_guid = rep_msg->local_ca_guid;
1909 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1910 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
1911 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1912 param->responder_resources = rep_msg->initiator_depth;
1913 param->initiator_depth = rep_msg->resp_resources;
1914 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1915 param->failover_accepted = cm_rep_get_failover(rep_msg);
1916 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1917 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1918 param->srq = cm_rep_get_srq(rep_msg);
1919 work->cm_event.private_data = &rep_msg->private_data;
1922 static void cm_dup_rep_handler(struct cm_work *work)
1924 struct cm_id_private *cm_id_priv;
1925 struct cm_rep_msg *rep_msg;
1926 struct ib_mad_send_buf *msg = NULL;
1929 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1930 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1931 rep_msg->local_comm_id);
1935 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1936 counter[CM_REP_COUNTER]);
1937 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1941 spin_lock_irq(&cm_id_priv->lock);
1942 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1943 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1944 cm_id_priv->private_data,
1945 cm_id_priv->private_data_len);
1946 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1947 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1948 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1949 cm_id_priv->private_data,
1950 cm_id_priv->private_data_len);
1953 spin_unlock_irq(&cm_id_priv->lock);
1955 ret = ib_post_send_mad(msg, NULL);
1960 unlock: spin_unlock_irq(&cm_id_priv->lock);
1961 free: cm_free_msg(msg);
1962 deref: cm_deref_id(cm_id_priv);
1965 static int cm_rep_handler(struct cm_work *work)
1967 struct cm_id_private *cm_id_priv;
1968 struct cm_rep_msg *rep_msg;
1971 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1972 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1974 cm_dup_rep_handler(work);
1978 cm_format_rep_event(work, cm_id_priv->qp_type);
1980 spin_lock_irq(&cm_id_priv->lock);
1981 switch (cm_id_priv->id.state) {
1982 case IB_CM_REQ_SENT:
1983 case IB_CM_MRA_REQ_RCVD:
1986 spin_unlock_irq(&cm_id_priv->lock);
1991 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1992 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1993 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
1995 spin_lock(&cm.lock);
1996 /* Check for duplicate REP. */
1997 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1998 spin_unlock(&cm.lock);
1999 spin_unlock_irq(&cm_id_priv->lock);
2003 /* Check for a stale connection. */
2004 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
2005 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
2006 &cm.remote_id_table);
2007 cm_id_priv->timewait_info->inserted_remote_id = 0;
2008 spin_unlock(&cm.lock);
2009 spin_unlock_irq(&cm_id_priv->lock);
2010 cm_issue_rej(work->port, work->mad_recv_wc,
2011 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2016 spin_unlock(&cm.lock);
2018 cm_id_priv->id.state = IB_CM_REP_RCVD;
2019 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
2020 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2021 cm_id_priv->initiator_depth = rep_msg->resp_resources;
2022 cm_id_priv->responder_resources = rep_msg->initiator_depth;
2023 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
2024 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2025 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2026 cm_id_priv->av.timeout =
2027 cm_ack_timeout(cm_id_priv->target_ack_delay,
2028 cm_id_priv->av.timeout - 1);
2029 cm_id_priv->alt_av.timeout =
2030 cm_ack_timeout(cm_id_priv->target_ack_delay,
2031 cm_id_priv->alt_av.timeout - 1);
2033 /* todo: handle peer_to_peer */
2035 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2036 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2038 list_add_tail(&work->list, &cm_id_priv->work_list);
2039 spin_unlock_irq(&cm_id_priv->lock);
2042 cm_process_work(cm_id_priv, work);
2044 cm_deref_id(cm_id_priv);
2048 cm_deref_id(cm_id_priv);
2052 static int cm_establish_handler(struct cm_work *work)
2054 struct cm_id_private *cm_id_priv;
2057 /* See comment in cm_establish about lookup. */
2058 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2062 spin_lock_irq(&cm_id_priv->lock);
2063 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2064 spin_unlock_irq(&cm_id_priv->lock);
2068 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2069 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2071 list_add_tail(&work->list, &cm_id_priv->work_list);
2072 spin_unlock_irq(&cm_id_priv->lock);
2075 cm_process_work(cm_id_priv, work);
2077 cm_deref_id(cm_id_priv);
2080 cm_deref_id(cm_id_priv);
2084 static int cm_rtu_handler(struct cm_work *work)
2086 struct cm_id_private *cm_id_priv;
2087 struct cm_rtu_msg *rtu_msg;
2090 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2091 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2092 rtu_msg->local_comm_id);
2096 work->cm_event.private_data = &rtu_msg->private_data;
2098 spin_lock_irq(&cm_id_priv->lock);
2099 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2100 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2101 spin_unlock_irq(&cm_id_priv->lock);
2102 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2103 counter[CM_RTU_COUNTER]);
2106 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2108 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2109 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2111 list_add_tail(&work->list, &cm_id_priv->work_list);
2112 spin_unlock_irq(&cm_id_priv->lock);
2115 cm_process_work(cm_id_priv, work);
2117 cm_deref_id(cm_id_priv);
2120 cm_deref_id(cm_id_priv);
2124 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2125 struct cm_id_private *cm_id_priv,
2126 const void *private_data,
2127 u8 private_data_len)
2129 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2130 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
2131 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2132 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2133 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2135 if (private_data && private_data_len)
2136 memcpy(dreq_msg->private_data, private_data, private_data_len);
2139 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2140 const void *private_data,
2141 u8 private_data_len)
2143 struct cm_id_private *cm_id_priv;
2144 struct ib_mad_send_buf *msg;
2145 unsigned long flags;
2148 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2151 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2152 spin_lock_irqsave(&cm_id_priv->lock, flags);
2153 if (cm_id->state != IB_CM_ESTABLISHED) {
2158 if (cm_id->lap_state == IB_CM_LAP_SENT ||
2159 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2160 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2162 ret = cm_alloc_msg(cm_id_priv, &msg);
2164 cm_enter_timewait(cm_id_priv);
2168 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2169 private_data, private_data_len);
2170 msg->timeout_ms = cm_id_priv->timeout_ms;
2171 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2173 ret = ib_post_send_mad(msg, NULL);
2175 cm_enter_timewait(cm_id_priv);
2176 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2181 cm_id->state = IB_CM_DREQ_SENT;
2182 cm_id_priv->msg = msg;
2183 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2186 EXPORT_SYMBOL(ib_send_cm_dreq);
2188 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2189 struct cm_id_private *cm_id_priv,
2190 const void *private_data,
2191 u8 private_data_len)
2193 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2194 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2195 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2197 if (private_data && private_data_len)
2198 memcpy(drep_msg->private_data, private_data, private_data_len);
2201 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2202 const void *private_data,
2203 u8 private_data_len)
2205 struct cm_id_private *cm_id_priv;
2206 struct ib_mad_send_buf *msg;
2207 unsigned long flags;
2211 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2214 data = cm_copy_private_data(private_data, private_data_len);
2216 return PTR_ERR(data);
2218 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2219 spin_lock_irqsave(&cm_id_priv->lock, flags);
2220 if (cm_id->state != IB_CM_DREQ_RCVD) {
2221 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2226 cm_set_private_data(cm_id_priv, data, private_data_len);
2227 cm_enter_timewait(cm_id_priv);
2229 ret = cm_alloc_msg(cm_id_priv, &msg);
2233 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2234 private_data, private_data_len);
2236 ret = ib_post_send_mad(msg, NULL);
2238 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2243 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2246 EXPORT_SYMBOL(ib_send_cm_drep);
2248 static int cm_issue_drep(struct cm_port *port,
2249 struct ib_mad_recv_wc *mad_recv_wc)
2251 struct ib_mad_send_buf *msg = NULL;
2252 struct cm_dreq_msg *dreq_msg;
2253 struct cm_drep_msg *drep_msg;
2256 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2260 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2261 drep_msg = (struct cm_drep_msg *) msg->mad;
2263 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2264 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2265 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2267 ret = ib_post_send_mad(msg, NULL);
2274 static int cm_dreq_handler(struct cm_work *work)
2276 struct cm_id_private *cm_id_priv;
2277 struct cm_dreq_msg *dreq_msg;
2278 struct ib_mad_send_buf *msg = NULL;
2281 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2282 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2283 dreq_msg->local_comm_id);
2285 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2286 counter[CM_DREQ_COUNTER]);
2287 cm_issue_drep(work->port, work->mad_recv_wc);
2291 work->cm_event.private_data = &dreq_msg->private_data;
2293 spin_lock_irq(&cm_id_priv->lock);
2294 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2297 switch (cm_id_priv->id.state) {
2298 case IB_CM_REP_SENT:
2299 case IB_CM_DREQ_SENT:
2300 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2302 case IB_CM_ESTABLISHED:
2303 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2304 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2305 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2307 case IB_CM_MRA_REP_RCVD:
2309 case IB_CM_TIMEWAIT:
2310 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2311 counter[CM_DREQ_COUNTER]);
2312 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2315 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2316 cm_id_priv->private_data,
2317 cm_id_priv->private_data_len);
2318 spin_unlock_irq(&cm_id_priv->lock);
2320 if (ib_post_send_mad(msg, NULL))
2323 case IB_CM_DREQ_RCVD:
2324 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2325 counter[CM_DREQ_COUNTER]);
2330 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2331 cm_id_priv->tid = dreq_msg->hdr.tid;
2332 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2334 list_add_tail(&work->list, &cm_id_priv->work_list);
2335 spin_unlock_irq(&cm_id_priv->lock);
2338 cm_process_work(cm_id_priv, work);
2340 cm_deref_id(cm_id_priv);
2343 unlock: spin_unlock_irq(&cm_id_priv->lock);
2344 deref: cm_deref_id(cm_id_priv);
2348 static int cm_drep_handler(struct cm_work *work)
2350 struct cm_id_private *cm_id_priv;
2351 struct cm_drep_msg *drep_msg;
2354 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2355 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2356 drep_msg->local_comm_id);
2360 work->cm_event.private_data = &drep_msg->private_data;
2362 spin_lock_irq(&cm_id_priv->lock);
2363 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2364 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2365 spin_unlock_irq(&cm_id_priv->lock);
2368 cm_enter_timewait(cm_id_priv);
2370 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2371 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2373 list_add_tail(&work->list, &cm_id_priv->work_list);
2374 spin_unlock_irq(&cm_id_priv->lock);
2377 cm_process_work(cm_id_priv, work);
2379 cm_deref_id(cm_id_priv);
2382 cm_deref_id(cm_id_priv);
2386 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2387 enum ib_cm_rej_reason reason,
2390 const void *private_data,
2391 u8 private_data_len)
2393 struct cm_id_private *cm_id_priv;
2394 struct ib_mad_send_buf *msg;
2395 unsigned long flags;
2398 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2399 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2402 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2404 spin_lock_irqsave(&cm_id_priv->lock, flags);
2405 switch (cm_id->state) {
2406 case IB_CM_REQ_SENT:
2407 case IB_CM_MRA_REQ_RCVD:
2408 case IB_CM_REQ_RCVD:
2409 case IB_CM_MRA_REQ_SENT:
2410 case IB_CM_REP_RCVD:
2411 case IB_CM_MRA_REP_SENT:
2412 ret = cm_alloc_msg(cm_id_priv, &msg);
2414 cm_format_rej((struct cm_rej_msg *) msg->mad,
2415 cm_id_priv, reason, ari, ari_length,
2416 private_data, private_data_len);
2418 cm_reset_to_idle(cm_id_priv);
2420 case IB_CM_REP_SENT:
2421 case IB_CM_MRA_REP_RCVD:
2422 ret = cm_alloc_msg(cm_id_priv, &msg);
2424 cm_format_rej((struct cm_rej_msg *) msg->mad,
2425 cm_id_priv, reason, ari, ari_length,
2426 private_data, private_data_len);
2428 cm_enter_timewait(cm_id_priv);
2438 ret = ib_post_send_mad(msg, NULL);
2442 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2445 EXPORT_SYMBOL(ib_send_cm_rej);
2447 static void cm_format_rej_event(struct cm_work *work)
2449 struct cm_rej_msg *rej_msg;
2450 struct ib_cm_rej_event_param *param;
2452 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2453 param = &work->cm_event.param.rej_rcvd;
2454 param->ari = rej_msg->ari;
2455 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2456 param->reason = __be16_to_cpu(rej_msg->reason);
2457 work->cm_event.private_data = &rej_msg->private_data;
2460 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2462 struct cm_timewait_info *timewait_info;
2463 struct cm_id_private *cm_id_priv;
2466 remote_id = rej_msg->local_comm_id;
2468 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2469 spin_lock_irq(&cm.lock);
2470 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2472 if (!timewait_info) {
2473 spin_unlock_irq(&cm.lock);
2476 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2477 (timewait_info->work.local_id ^
2478 cm.random_id_operand));
2480 if (cm_id_priv->id.remote_id == remote_id)
2481 atomic_inc(&cm_id_priv->refcount);
2485 spin_unlock_irq(&cm.lock);
2486 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2487 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2489 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2494 static int cm_rej_handler(struct cm_work *work)
2496 struct cm_id_private *cm_id_priv;
2497 struct cm_rej_msg *rej_msg;
2500 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2501 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2505 cm_format_rej_event(work);
2507 spin_lock_irq(&cm_id_priv->lock);
2508 switch (cm_id_priv->id.state) {
2509 case IB_CM_REQ_SENT:
2510 case IB_CM_MRA_REQ_RCVD:
2511 case IB_CM_REP_SENT:
2512 case IB_CM_MRA_REP_RCVD:
2513 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2515 case IB_CM_REQ_RCVD:
2516 case IB_CM_MRA_REQ_SENT:
2517 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2518 cm_enter_timewait(cm_id_priv);
2520 cm_reset_to_idle(cm_id_priv);
2522 case IB_CM_DREQ_SENT:
2523 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2525 case IB_CM_REP_RCVD:
2526 case IB_CM_MRA_REP_SENT:
2527 cm_enter_timewait(cm_id_priv);
2529 case IB_CM_ESTABLISHED:
2530 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2531 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2532 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2533 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2535 cm_enter_timewait(cm_id_priv);
2540 spin_unlock_irq(&cm_id_priv->lock);
2545 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2547 list_add_tail(&work->list, &cm_id_priv->work_list);
2548 spin_unlock_irq(&cm_id_priv->lock);
2551 cm_process_work(cm_id_priv, work);
2553 cm_deref_id(cm_id_priv);
2556 cm_deref_id(cm_id_priv);
2560 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2562 const void *private_data,
2563 u8 private_data_len)
2565 struct cm_id_private *cm_id_priv;
2566 struct ib_mad_send_buf *msg;
2567 enum ib_cm_state cm_state;
2568 enum ib_cm_lap_state lap_state;
2569 enum cm_msg_response msg_response;
2571 unsigned long flags;
2574 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2577 data = cm_copy_private_data(private_data, private_data_len);
2579 return PTR_ERR(data);
2581 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2583 spin_lock_irqsave(&cm_id_priv->lock, flags);
2584 switch(cm_id_priv->id.state) {
2585 case IB_CM_REQ_RCVD:
2586 cm_state = IB_CM_MRA_REQ_SENT;
2587 lap_state = cm_id->lap_state;
2588 msg_response = CM_MSG_RESPONSE_REQ;
2590 case IB_CM_REP_RCVD:
2591 cm_state = IB_CM_MRA_REP_SENT;
2592 lap_state = cm_id->lap_state;
2593 msg_response = CM_MSG_RESPONSE_REP;
2595 case IB_CM_ESTABLISHED:
2596 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2597 cm_state = cm_id->state;
2598 lap_state = IB_CM_MRA_LAP_SENT;
2599 msg_response = CM_MSG_RESPONSE_OTHER;
2607 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2608 ret = cm_alloc_msg(cm_id_priv, &msg);
2612 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2613 msg_response, service_timeout,
2614 private_data, private_data_len);
2615 ret = ib_post_send_mad(msg, NULL);
2620 cm_id->state = cm_state;
2621 cm_id->lap_state = lap_state;
2622 cm_id_priv->service_timeout = service_timeout;
2623 cm_set_private_data(cm_id_priv, data, private_data_len);
2624 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2627 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2631 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2636 EXPORT_SYMBOL(ib_send_cm_mra);
2638 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2640 switch (cm_mra_get_msg_mraed(mra_msg)) {
2641 case CM_MSG_RESPONSE_REQ:
2642 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2643 case CM_MSG_RESPONSE_REP:
2644 case CM_MSG_RESPONSE_OTHER:
2645 return cm_acquire_id(mra_msg->remote_comm_id,
2646 mra_msg->local_comm_id);
2652 static int cm_mra_handler(struct cm_work *work)
2654 struct cm_id_private *cm_id_priv;
2655 struct cm_mra_msg *mra_msg;
2658 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2659 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2663 work->cm_event.private_data = &mra_msg->private_data;
2664 work->cm_event.param.mra_rcvd.service_timeout =
2665 cm_mra_get_service_timeout(mra_msg);
2666 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2667 cm_convert_to_ms(cm_id_priv->av.timeout);
2669 spin_lock_irq(&cm_id_priv->lock);
2670 switch (cm_id_priv->id.state) {
2671 case IB_CM_REQ_SENT:
2672 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2673 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2674 cm_id_priv->msg, timeout))
2676 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2678 case IB_CM_REP_SENT:
2679 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2680 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2681 cm_id_priv->msg, timeout))
2683 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2685 case IB_CM_ESTABLISHED:
2686 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2687 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2688 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2689 cm_id_priv->msg, timeout)) {
2690 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2691 atomic_long_inc(&work->port->
2692 counter_group[CM_RECV_DUPLICATES].
2693 counter[CM_MRA_COUNTER]);
2696 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2698 case IB_CM_MRA_REQ_RCVD:
2699 case IB_CM_MRA_REP_RCVD:
2700 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2701 counter[CM_MRA_COUNTER]);
2707 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2708 cm_id_priv->id.state;
2709 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2711 list_add_tail(&work->list, &cm_id_priv->work_list);
2712 spin_unlock_irq(&cm_id_priv->lock);
2715 cm_process_work(cm_id_priv, work);
2717 cm_deref_id(cm_id_priv);
2720 spin_unlock_irq(&cm_id_priv->lock);
2721 cm_deref_id(cm_id_priv);
2725 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2726 struct cm_id_private *cm_id_priv,
2727 struct ib_sa_path_rec *alternate_path,
2728 const void *private_data,
2729 u8 private_data_len)
2731 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2732 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2733 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2734 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2735 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2736 /* todo: need remote CM response timeout */
2737 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2738 lap_msg->alt_local_lid = alternate_path->slid;
2739 lap_msg->alt_remote_lid = alternate_path->dlid;
2740 lap_msg->alt_local_gid = alternate_path->sgid;
2741 lap_msg->alt_remote_gid = alternate_path->dgid;
2742 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2743 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2744 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2745 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2746 cm_lap_set_sl(lap_msg, alternate_path->sl);
2747 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2748 cm_lap_set_local_ack_timeout(lap_msg,
2749 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2750 alternate_path->packet_life_time));
2752 if (private_data && private_data_len)
2753 memcpy(lap_msg->private_data, private_data, private_data_len);
2756 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2757 struct ib_sa_path_rec *alternate_path,
2758 const void *private_data,
2759 u8 private_data_len)
2761 struct cm_id_private *cm_id_priv;
2762 struct ib_mad_send_buf *msg;
2763 unsigned long flags;
2766 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2769 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2770 spin_lock_irqsave(&cm_id_priv->lock, flags);
2771 if (cm_id->state != IB_CM_ESTABLISHED ||
2772 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2773 cm_id->lap_state != IB_CM_LAP_IDLE)) {
2778 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
2781 cm_id_priv->alt_av.timeout =
2782 cm_ack_timeout(cm_id_priv->target_ack_delay,
2783 cm_id_priv->alt_av.timeout - 1);
2785 ret = cm_alloc_msg(cm_id_priv, &msg);
2789 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2790 alternate_path, private_data, private_data_len);
2791 msg->timeout_ms = cm_id_priv->timeout_ms;
2792 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2794 ret = ib_post_send_mad(msg, NULL);
2796 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2801 cm_id->lap_state = IB_CM_LAP_SENT;
2802 cm_id_priv->msg = msg;
2804 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2807 EXPORT_SYMBOL(ib_send_cm_lap);
2809 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2810 struct ib_sa_path_rec *path,
2811 struct cm_lap_msg *lap_msg)
2813 memset(path, 0, sizeof *path);
2814 path->dgid = lap_msg->alt_local_gid;
2815 path->sgid = lap_msg->alt_remote_gid;
2816 path->dlid = lap_msg->alt_local_lid;
2817 path->slid = lap_msg->alt_remote_lid;
2818 path->flow_label = cm_lap_get_flow_label(lap_msg);
2819 path->hop_limit = lap_msg->alt_hop_limit;
2820 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2821 path->reversible = 1;
2822 path->pkey = cm_id_priv->pkey;
2823 path->sl = cm_lap_get_sl(lap_msg);
2824 path->mtu_selector = IB_SA_EQ;
2825 path->mtu = cm_id_priv->path_mtu;
2826 path->rate_selector = IB_SA_EQ;
2827 path->rate = cm_lap_get_packet_rate(lap_msg);
2828 path->packet_life_time_selector = IB_SA_EQ;
2829 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2830 path->packet_life_time -= (path->packet_life_time > 0);
2833 static int cm_lap_handler(struct cm_work *work)
2835 struct cm_id_private *cm_id_priv;
2836 struct cm_lap_msg *lap_msg;
2837 struct ib_cm_lap_event_param *param;
2838 struct ib_mad_send_buf *msg = NULL;
2841 /* todo: verify LAP request and send reject APR if invalid. */
2842 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2843 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2844 lap_msg->local_comm_id);
2848 param = &work->cm_event.param.lap_rcvd;
2849 param->alternate_path = &work->path[0];
2850 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2851 work->cm_event.private_data = &lap_msg->private_data;
2853 spin_lock_irq(&cm_id_priv->lock);
2854 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2857 switch (cm_id_priv->id.lap_state) {
2858 case IB_CM_LAP_UNINIT:
2859 case IB_CM_LAP_IDLE:
2861 case IB_CM_MRA_LAP_SENT:
2862 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2863 counter[CM_LAP_COUNTER]);
2864 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2867 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2868 CM_MSG_RESPONSE_OTHER,
2869 cm_id_priv->service_timeout,
2870 cm_id_priv->private_data,
2871 cm_id_priv->private_data_len);
2872 spin_unlock_irq(&cm_id_priv->lock);
2874 if (ib_post_send_mad(msg, NULL))
2877 case IB_CM_LAP_RCVD:
2878 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2879 counter[CM_LAP_COUNTER]);
2885 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2886 cm_id_priv->tid = lap_msg->hdr.tid;
2887 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2888 work->mad_recv_wc->recv_buf.grh,
2890 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
2891 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2893 list_add_tail(&work->list, &cm_id_priv->work_list);
2894 spin_unlock_irq(&cm_id_priv->lock);
2897 cm_process_work(cm_id_priv, work);
2899 cm_deref_id(cm_id_priv);
2902 unlock: spin_unlock_irq(&cm_id_priv->lock);
2903 deref: cm_deref_id(cm_id_priv);
2907 static void cm_format_apr(struct cm_apr_msg *apr_msg,
2908 struct cm_id_private *cm_id_priv,
2909 enum ib_cm_apr_status status,
2912 const void *private_data,
2913 u8 private_data_len)
2915 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2916 apr_msg->local_comm_id = cm_id_priv->id.local_id;
2917 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2918 apr_msg->ap_status = (u8) status;
2920 if (info && info_length) {
2921 apr_msg->info_length = info_length;
2922 memcpy(apr_msg->info, info, info_length);
2925 if (private_data && private_data_len)
2926 memcpy(apr_msg->private_data, private_data, private_data_len);
2929 int ib_send_cm_apr(struct ib_cm_id *cm_id,
2930 enum ib_cm_apr_status status,
2933 const void *private_data,
2934 u8 private_data_len)
2936 struct cm_id_private *cm_id_priv;
2937 struct ib_mad_send_buf *msg;
2938 unsigned long flags;
2941 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2942 (info && info_length > IB_CM_APR_INFO_LENGTH))
2945 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2946 spin_lock_irqsave(&cm_id_priv->lock, flags);
2947 if (cm_id->state != IB_CM_ESTABLISHED ||
2948 (cm_id->lap_state != IB_CM_LAP_RCVD &&
2949 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2954 ret = cm_alloc_msg(cm_id_priv, &msg);
2958 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2959 info, info_length, private_data, private_data_len);
2960 ret = ib_post_send_mad(msg, NULL);
2962 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2967 cm_id->lap_state = IB_CM_LAP_IDLE;
2968 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2971 EXPORT_SYMBOL(ib_send_cm_apr);
2973 static int cm_apr_handler(struct cm_work *work)
2975 struct cm_id_private *cm_id_priv;
2976 struct cm_apr_msg *apr_msg;
2979 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2980 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2981 apr_msg->local_comm_id);
2983 return -EINVAL; /* Unmatched reply. */
2985 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2986 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2987 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2988 work->cm_event.private_data = &apr_msg->private_data;
2990 spin_lock_irq(&cm_id_priv->lock);
2991 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2992 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2993 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2994 spin_unlock_irq(&cm_id_priv->lock);
2997 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2998 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2999 cm_id_priv->msg = NULL;
3001 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3003 list_add_tail(&work->list, &cm_id_priv->work_list);
3004 spin_unlock_irq(&cm_id_priv->lock);
3007 cm_process_work(cm_id_priv, work);
3009 cm_deref_id(cm_id_priv);
3012 cm_deref_id(cm_id_priv);
3016 static int cm_timewait_handler(struct cm_work *work)
3018 struct cm_timewait_info *timewait_info;
3019 struct cm_id_private *cm_id_priv;
3022 timewait_info = (struct cm_timewait_info *)work;
3023 spin_lock_irq(&cm.lock);
3024 list_del(&timewait_info->list);
3025 spin_unlock_irq(&cm.lock);
3027 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3028 timewait_info->work.remote_id);
3032 spin_lock_irq(&cm_id_priv->lock);
3033 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3034 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3035 spin_unlock_irq(&cm_id_priv->lock);
3038 cm_id_priv->id.state = IB_CM_IDLE;
3039 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3041 list_add_tail(&work->list, &cm_id_priv->work_list);
3042 spin_unlock_irq(&cm_id_priv->lock);
3045 cm_process_work(cm_id_priv, work);
3047 cm_deref_id(cm_id_priv);
3050 cm_deref_id(cm_id_priv);
3054 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3055 struct cm_id_private *cm_id_priv,
3056 struct ib_cm_sidr_req_param *param)
3058 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3059 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
3060 sidr_req_msg->request_id = cm_id_priv->id.local_id;
3061 sidr_req_msg->pkey = param->path->pkey;
3062 sidr_req_msg->service_id = param->service_id;
3064 if (param->private_data && param->private_data_len)
3065 memcpy(sidr_req_msg->private_data, param->private_data,
3066 param->private_data_len);
3069 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3070 struct ib_cm_sidr_req_param *param)
3072 struct cm_id_private *cm_id_priv;
3073 struct ib_mad_send_buf *msg;
3074 unsigned long flags;
3077 if (!param->path || (param->private_data &&
3078 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3081 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3082 ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
3086 cm_id->service_id = param->service_id;
3087 cm_id->service_mask = ~cpu_to_be64(0);
3088 cm_id_priv->timeout_ms = param->timeout_ms;
3089 cm_id_priv->max_cm_retries = param->max_cm_retries;
3090 ret = cm_alloc_msg(cm_id_priv, &msg);
3094 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3096 msg->timeout_ms = cm_id_priv->timeout_ms;
3097 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3099 spin_lock_irqsave(&cm_id_priv->lock, flags);
3100 if (cm_id->state == IB_CM_IDLE)
3101 ret = ib_post_send_mad(msg, NULL);
3106 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3110 cm_id->state = IB_CM_SIDR_REQ_SENT;
3111 cm_id_priv->msg = msg;
3112 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3116 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3118 static void cm_format_sidr_req_event(struct cm_work *work,
3119 struct ib_cm_id *listen_id)
3121 struct cm_sidr_req_msg *sidr_req_msg;
3122 struct ib_cm_sidr_req_event_param *param;
3124 sidr_req_msg = (struct cm_sidr_req_msg *)
3125 work->mad_recv_wc->recv_buf.mad;
3126 param = &work->cm_event.param.sidr_req_rcvd;
3127 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3128 param->listen_id = listen_id;
3129 param->service_id = sidr_req_msg->service_id;
3130 param->bth_pkey = cm_get_bth_pkey(work);
3131 param->port = work->port->port_num;
3132 work->cm_event.private_data = &sidr_req_msg->private_data;
3135 static int cm_sidr_req_handler(struct cm_work *work)
3137 struct ib_cm_id *cm_id;
3138 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3139 struct cm_sidr_req_msg *sidr_req_msg;
3142 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3144 return PTR_ERR(cm_id);
3145 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3147 /* Record SGID/SLID and request ID for lookup. */
3148 sidr_req_msg = (struct cm_sidr_req_msg *)
3149 work->mad_recv_wc->recv_buf.mad;
3150 wc = work->mad_recv_wc->wc;
3151 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3152 cm_id_priv->av.dgid.global.interface_id = 0;
3153 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3154 work->mad_recv_wc->recv_buf.grh,
3156 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3157 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3158 atomic_inc(&cm_id_priv->work_count);
3160 spin_lock_irq(&cm.lock);
3161 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3162 if (cur_cm_id_priv) {
3163 spin_unlock_irq(&cm.lock);
3164 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3165 counter[CM_SIDR_REQ_COUNTER]);
3166 goto out; /* Duplicate message. */
3168 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3169 cur_cm_id_priv = cm_find_listen(cm_id->device,
3170 sidr_req_msg->service_id);
3171 if (!cur_cm_id_priv) {
3172 spin_unlock_irq(&cm.lock);
3173 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3174 goto out; /* No match. */
3176 atomic_inc(&cur_cm_id_priv->refcount);
3177 atomic_inc(&cm_id_priv->refcount);
3178 spin_unlock_irq(&cm.lock);
3180 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3181 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3182 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3183 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3185 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3186 cm_process_work(cm_id_priv, work);
3187 cm_deref_id(cur_cm_id_priv);
3190 ib_destroy_cm_id(&cm_id_priv->id);
3194 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3195 struct cm_id_private *cm_id_priv,
3196 struct ib_cm_sidr_rep_param *param)
3198 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3200 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3201 sidr_rep_msg->status = param->status;
3202 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3203 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3204 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3206 if (param->info && param->info_length)
3207 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3209 if (param->private_data && param->private_data_len)
3210 memcpy(sidr_rep_msg->private_data, param->private_data,
3211 param->private_data_len);
3214 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3215 struct ib_cm_sidr_rep_param *param)
3217 struct cm_id_private *cm_id_priv;
3218 struct ib_mad_send_buf *msg;
3219 unsigned long flags;
3222 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3223 (param->private_data &&
3224 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3227 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3228 spin_lock_irqsave(&cm_id_priv->lock, flags);
3229 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3234 ret = cm_alloc_msg(cm_id_priv, &msg);
3238 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3240 ret = ib_post_send_mad(msg, NULL);
3242 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3246 cm_id->state = IB_CM_IDLE;
3247 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3249 spin_lock_irqsave(&cm.lock, flags);
3250 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3251 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3252 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3254 spin_unlock_irqrestore(&cm.lock, flags);
3257 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3260 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3262 static void cm_format_sidr_rep_event(struct cm_work *work)
3264 struct cm_sidr_rep_msg *sidr_rep_msg;
3265 struct ib_cm_sidr_rep_event_param *param;
3267 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3268 work->mad_recv_wc->recv_buf.mad;
3269 param = &work->cm_event.param.sidr_rep_rcvd;
3270 param->status = sidr_rep_msg->status;
3271 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3272 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3273 param->info = &sidr_rep_msg->info;
3274 param->info_len = sidr_rep_msg->info_length;
3275 work->cm_event.private_data = &sidr_rep_msg->private_data;
3278 static int cm_sidr_rep_handler(struct cm_work *work)
3280 struct cm_sidr_rep_msg *sidr_rep_msg;
3281 struct cm_id_private *cm_id_priv;
3283 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3284 work->mad_recv_wc->recv_buf.mad;
3285 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3287 return -EINVAL; /* Unmatched reply. */
3289 spin_lock_irq(&cm_id_priv->lock);
3290 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3291 spin_unlock_irq(&cm_id_priv->lock);
3294 cm_id_priv->id.state = IB_CM_IDLE;
3295 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3296 spin_unlock_irq(&cm_id_priv->lock);
3298 cm_format_sidr_rep_event(work);
3299 cm_process_work(cm_id_priv, work);
3302 cm_deref_id(cm_id_priv);
3306 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3307 enum ib_wc_status wc_status)
3309 struct cm_id_private *cm_id_priv;
3310 struct ib_cm_event cm_event;
3311 enum ib_cm_state state;
3314 memset(&cm_event, 0, sizeof cm_event);
3315 cm_id_priv = msg->context[0];
3317 /* Discard old sends or ones without a response. */
3318 spin_lock_irq(&cm_id_priv->lock);
3319 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3320 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3324 case IB_CM_REQ_SENT:
3325 case IB_CM_MRA_REQ_RCVD:
3326 cm_reset_to_idle(cm_id_priv);
3327 cm_event.event = IB_CM_REQ_ERROR;
3329 case IB_CM_REP_SENT:
3330 case IB_CM_MRA_REP_RCVD:
3331 cm_reset_to_idle(cm_id_priv);
3332 cm_event.event = IB_CM_REP_ERROR;
3334 case IB_CM_DREQ_SENT:
3335 cm_enter_timewait(cm_id_priv);
3336 cm_event.event = IB_CM_DREQ_ERROR;
3338 case IB_CM_SIDR_REQ_SENT:
3339 cm_id_priv->id.state = IB_CM_IDLE;
3340 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3345 spin_unlock_irq(&cm_id_priv->lock);
3346 cm_event.param.send_status = wc_status;
3348 /* No other events can occur on the cm_id at this point. */
3349 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3352 ib_destroy_cm_id(&cm_id_priv->id);
3355 spin_unlock_irq(&cm_id_priv->lock);
3359 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3360 struct ib_mad_send_wc *mad_send_wc)
3362 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3363 struct cm_port *port;
3366 port = mad_agent->context;
3367 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3368 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3371 * If the send was in response to a received message (context[0] is not
3372 * set to a cm_id), and is not a REJ, then it is a send that was
3375 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3378 atomic_long_add(1 + msg->retries,
3379 &port->counter_group[CM_XMIT].counter[attr_index]);
3381 atomic_long_add(msg->retries,
3382 &port->counter_group[CM_XMIT_RETRIES].
3383 counter[attr_index]);
3385 switch (mad_send_wc->status) {
3387 case IB_WC_WR_FLUSH_ERR:
3391 if (msg->context[0] && msg->context[1])
3392 cm_process_send_error(msg, mad_send_wc->status);
3399 static void cm_work_handler(struct work_struct *_work)
3401 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3404 switch (work->cm_event.event) {
3405 case IB_CM_REQ_RECEIVED:
3406 ret = cm_req_handler(work);
3408 case IB_CM_MRA_RECEIVED:
3409 ret = cm_mra_handler(work);
3411 case IB_CM_REJ_RECEIVED:
3412 ret = cm_rej_handler(work);
3414 case IB_CM_REP_RECEIVED:
3415 ret = cm_rep_handler(work);
3417 case IB_CM_RTU_RECEIVED:
3418 ret = cm_rtu_handler(work);
3420 case IB_CM_USER_ESTABLISHED:
3421 ret = cm_establish_handler(work);
3423 case IB_CM_DREQ_RECEIVED:
3424 ret = cm_dreq_handler(work);
3426 case IB_CM_DREP_RECEIVED:
3427 ret = cm_drep_handler(work);
3429 case IB_CM_SIDR_REQ_RECEIVED:
3430 ret = cm_sidr_req_handler(work);
3432 case IB_CM_SIDR_REP_RECEIVED:
3433 ret = cm_sidr_rep_handler(work);
3435 case IB_CM_LAP_RECEIVED:
3436 ret = cm_lap_handler(work);
3438 case IB_CM_APR_RECEIVED:
3439 ret = cm_apr_handler(work);
3441 case IB_CM_TIMEWAIT_EXIT:
3442 ret = cm_timewait_handler(work);
3452 static int cm_establish(struct ib_cm_id *cm_id)
3454 struct cm_id_private *cm_id_priv;
3455 struct cm_work *work;
3456 unsigned long flags;
3458 struct cm_device *cm_dev;
3460 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3464 work = kmalloc(sizeof *work, GFP_ATOMIC);
3468 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3469 spin_lock_irqsave(&cm_id_priv->lock, flags);
3470 switch (cm_id->state)
3472 case IB_CM_REP_SENT:
3473 case IB_CM_MRA_REP_RCVD:
3474 cm_id->state = IB_CM_ESTABLISHED;
3476 case IB_CM_ESTABLISHED:
3483 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3491 * The CM worker thread may try to destroy the cm_id before it
3492 * can execute this work item. To prevent potential deadlock,
3493 * we need to find the cm_id once we're in the context of the
3494 * worker thread, rather than holding a reference on it.
3496 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3497 work->local_id = cm_id->local_id;
3498 work->remote_id = cm_id->remote_id;
3499 work->mad_recv_wc = NULL;
3500 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3502 /* Check if the device started its remove_one */
3503 spin_lock_irqsave(&cm.lock, flags);
3504 if (!cm_dev->going_down) {
3505 queue_delayed_work(cm.wq, &work->work, 0);
3510 spin_unlock_irqrestore(&cm.lock, flags);
3516 static int cm_migrate(struct ib_cm_id *cm_id)
3518 struct cm_id_private *cm_id_priv;
3519 unsigned long flags;
3522 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3523 spin_lock_irqsave(&cm_id_priv->lock, flags);
3524 if (cm_id->state == IB_CM_ESTABLISHED &&
3525 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3526 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3527 cm_id->lap_state = IB_CM_LAP_IDLE;
3528 cm_id_priv->av = cm_id_priv->alt_av;
3531 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3536 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3541 case IB_EVENT_COMM_EST:
3542 ret = cm_establish(cm_id);
3544 case IB_EVENT_PATH_MIG:
3545 ret = cm_migrate(cm_id);
3552 EXPORT_SYMBOL(ib_cm_notify);
3554 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3555 struct ib_mad_send_buf *send_buf,
3556 struct ib_mad_recv_wc *mad_recv_wc)
3558 struct cm_port *port = mad_agent->context;
3559 struct cm_work *work;
3560 enum ib_cm_event_type event;
3565 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3566 case CM_REQ_ATTR_ID:
3567 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3568 alt_local_lid != 0);
3569 event = IB_CM_REQ_RECEIVED;
3571 case CM_MRA_ATTR_ID:
3572 event = IB_CM_MRA_RECEIVED;
3574 case CM_REJ_ATTR_ID:
3575 event = IB_CM_REJ_RECEIVED;
3577 case CM_REP_ATTR_ID:
3578 event = IB_CM_REP_RECEIVED;
3580 case CM_RTU_ATTR_ID:
3581 event = IB_CM_RTU_RECEIVED;
3583 case CM_DREQ_ATTR_ID:
3584 event = IB_CM_DREQ_RECEIVED;
3586 case CM_DREP_ATTR_ID:
3587 event = IB_CM_DREP_RECEIVED;
3589 case CM_SIDR_REQ_ATTR_ID:
3590 event = IB_CM_SIDR_REQ_RECEIVED;
3592 case CM_SIDR_REP_ATTR_ID:
3593 event = IB_CM_SIDR_REP_RECEIVED;
3595 case CM_LAP_ATTR_ID:
3597 event = IB_CM_LAP_RECEIVED;
3599 case CM_APR_ATTR_ID:
3600 event = IB_CM_APR_RECEIVED;
3603 ib_free_recv_mad(mad_recv_wc);
3607 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3608 atomic_long_inc(&port->counter_group[CM_RECV].
3609 counter[attr_id - CM_ATTR_ID_OFFSET]);
3611 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3614 ib_free_recv_mad(mad_recv_wc);
3618 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3619 work->cm_event.event = event;
3620 work->mad_recv_wc = mad_recv_wc;
3623 /* Check if the device started its remove_one */
3624 spin_lock_irq(&cm.lock);
3625 if (!port->cm_dev->going_down)
3626 queue_delayed_work(cm.wq, &work->work, 0);
3629 spin_unlock_irq(&cm.lock);
3633 ib_free_recv_mad(mad_recv_wc);
3637 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3638 struct ib_qp_attr *qp_attr,
3641 unsigned long flags;
3644 spin_lock_irqsave(&cm_id_priv->lock, flags);
3645 switch (cm_id_priv->id.state) {
3646 case IB_CM_REQ_SENT:
3647 case IB_CM_MRA_REQ_RCVD:
3648 case IB_CM_REQ_RCVD:
3649 case IB_CM_MRA_REQ_SENT:
3650 case IB_CM_REP_RCVD:
3651 case IB_CM_MRA_REP_SENT:
3652 case IB_CM_REP_SENT:
3653 case IB_CM_MRA_REP_RCVD:
3654 case IB_CM_ESTABLISHED:
3655 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3656 IB_QP_PKEY_INDEX | IB_QP_PORT;
3657 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3658 if (cm_id_priv->responder_resources)
3659 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3660 IB_ACCESS_REMOTE_ATOMIC;
3661 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3662 qp_attr->port_num = cm_id_priv->av.port->port_num;
3669 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3673 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3674 struct ib_qp_attr *qp_attr,
3677 unsigned long flags;
3680 spin_lock_irqsave(&cm_id_priv->lock, flags);
3681 switch (cm_id_priv->id.state) {
3682 case IB_CM_REQ_RCVD:
3683 case IB_CM_MRA_REQ_SENT:
3684 case IB_CM_REP_RCVD:
3685 case IB_CM_MRA_REP_SENT:
3686 case IB_CM_REP_SENT:
3687 case IB_CM_MRA_REP_RCVD:
3688 case IB_CM_ESTABLISHED:
3689 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3690 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3691 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3692 qp_attr->path_mtu = cm_id_priv->path_mtu;
3693 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3694 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3695 if (cm_id_priv->qp_type == IB_QPT_RC ||
3696 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
3697 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3698 IB_QP_MIN_RNR_TIMER;
3699 qp_attr->max_dest_rd_atomic =
3700 cm_id_priv->responder_resources;
3701 qp_attr->min_rnr_timer = 0;
3703 if (cm_id_priv->alt_av.ah_attr.dlid) {
3704 *qp_attr_mask |= IB_QP_ALT_PATH;
3705 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3706 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3707 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3708 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3716 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3720 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3721 struct ib_qp_attr *qp_attr,
3724 unsigned long flags;
3727 spin_lock_irqsave(&cm_id_priv->lock, flags);
3728 switch (cm_id_priv->id.state) {
3729 /* Allow transition to RTS before sending REP */
3730 case IB_CM_REQ_RCVD:
3731 case IB_CM_MRA_REQ_SENT:
3733 case IB_CM_REP_RCVD:
3734 case IB_CM_MRA_REP_SENT:
3735 case IB_CM_REP_SENT:
3736 case IB_CM_MRA_REP_RCVD:
3737 case IB_CM_ESTABLISHED:
3738 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3739 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3740 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3741 switch (cm_id_priv->qp_type) {
3743 case IB_QPT_XRC_INI:
3744 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
3745 IB_QP_MAX_QP_RD_ATOMIC;
3746 qp_attr->retry_cnt = cm_id_priv->retry_count;
3747 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3748 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3750 case IB_QPT_XRC_TGT:
3751 *qp_attr_mask |= IB_QP_TIMEOUT;
3752 qp_attr->timeout = cm_id_priv->av.timeout;
3757 if (cm_id_priv->alt_av.ah_attr.dlid) {
3758 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3759 qp_attr->path_mig_state = IB_MIG_REARM;
3762 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3763 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3764 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3765 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3766 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3767 qp_attr->path_mig_state = IB_MIG_REARM;
3775 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3779 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3780 struct ib_qp_attr *qp_attr,
3783 struct cm_id_private *cm_id_priv;
3786 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3787 switch (qp_attr->qp_state) {
3789 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3792 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3795 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3803 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3805 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
3808 struct cm_counter_group *group;
3809 struct cm_counter_attribute *cm_attr;
3811 group = container_of(obj, struct cm_counter_group, obj);
3812 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
3814 return sprintf(buf, "%ld\n",
3815 atomic_long_read(&group->counter[cm_attr->index]));
3818 static const struct sysfs_ops cm_counter_ops = {
3819 .show = cm_show_counter
3822 static struct kobj_type cm_counter_obj_type = {
3823 .sysfs_ops = &cm_counter_ops,
3824 .default_attrs = cm_counter_default_attrs
3827 static void cm_release_port_obj(struct kobject *obj)
3829 struct cm_port *cm_port;
3831 cm_port = container_of(obj, struct cm_port, port_obj);
3835 static struct kobj_type cm_port_obj_type = {
3836 .release = cm_release_port_obj
3839 static char *cm_devnode(struct device *dev, umode_t *mode)
3843 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
3846 struct class cm_class = {
3847 .owner = THIS_MODULE,
3848 .name = "infiniband_cm",
3849 .devnode = cm_devnode,
3851 EXPORT_SYMBOL(cm_class);
3853 static int cm_create_port_fs(struct cm_port *port)
3857 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3858 &port->cm_dev->device->kobj,
3859 "%d", port->port_num);
3865 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
3866 ret = kobject_init_and_add(&port->counter_group[i].obj,
3867 &cm_counter_obj_type,
3869 "%s", counter_group_names[i]);
3878 kobject_put(&port->counter_group[i].obj);
3879 kobject_put(&port->port_obj);
3884 static void cm_remove_port_fs(struct cm_port *port)
3888 for (i = 0; i < CM_COUNTER_GROUPS; i++)
3889 kobject_put(&port->counter_group[i].obj);
3891 kobject_put(&port->port_obj);
3894 static void cm_add_one(struct ib_device *ib_device)
3896 struct cm_device *cm_dev;
3897 struct cm_port *port;
3898 struct ib_mad_reg_req reg_req = {
3899 .mgmt_class = IB_MGMT_CLASS_CM,
3900 .mgmt_class_version = IB_CM_CLASS_VERSION,
3902 struct ib_port_modify port_modify = {
3903 .set_port_cap_mask = IB_PORT_CM_SUP
3905 unsigned long flags;
3910 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
3911 ib_device->phys_port_cnt, GFP_KERNEL);
3915 cm_dev->ib_device = ib_device;
3916 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
3917 cm_dev->going_down = 0;
3918 cm_dev->device = device_create(&cm_class, &ib_device->dev,
3920 "%s", ib_device->name);
3921 if (IS_ERR(cm_dev->device)) {
3926 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3927 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3928 if (!rdma_cap_ib_cm(ib_device, i))
3931 port = kzalloc(sizeof *port, GFP_KERNEL);
3935 cm_dev->port[i-1] = port;
3936 port->cm_dev = cm_dev;
3939 ret = cm_create_port_fs(port);
3943 port->mad_agent = ib_register_mad_agent(ib_device, i,
3951 if (IS_ERR(port->mad_agent))
3954 ret = ib_modify_port(ib_device, i, 0, &port_modify);
3964 ib_set_client_data(ib_device, &cm_client, cm_dev);
3966 write_lock_irqsave(&cm.device_lock, flags);
3967 list_add_tail(&cm_dev->list, &cm.device_list);
3968 write_unlock_irqrestore(&cm.device_lock, flags);
3972 ib_unregister_mad_agent(port->mad_agent);
3974 cm_remove_port_fs(port);
3976 port_modify.set_port_cap_mask = 0;
3977 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3979 if (!rdma_cap_ib_cm(ib_device, i))
3982 port = cm_dev->port[i-1];
3983 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3984 ib_unregister_mad_agent(port->mad_agent);
3985 cm_remove_port_fs(port);
3988 device_unregister(cm_dev->device);
3992 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
3994 struct cm_device *cm_dev = client_data;
3995 struct cm_port *port;
3996 struct ib_port_modify port_modify = {
3997 .clr_port_cap_mask = IB_PORT_CM_SUP
3999 unsigned long flags;
4005 write_lock_irqsave(&cm.device_lock, flags);
4006 list_del(&cm_dev->list);
4007 write_unlock_irqrestore(&cm.device_lock, flags);
4009 spin_lock_irq(&cm.lock);
4010 cm_dev->going_down = 1;
4011 spin_unlock_irq(&cm.lock);
4013 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4014 if (!rdma_cap_ib_cm(ib_device, i))
4017 port = cm_dev->port[i-1];
4018 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4020 * We flush the queue here after the going_down set, this
4021 * verify that no new works will be queued in the recv handler,
4022 * after that we can call the unregister_mad_agent
4024 flush_workqueue(cm.wq);
4025 ib_unregister_mad_agent(port->mad_agent);
4026 cm_remove_port_fs(port);
4028 device_unregister(cm_dev->device);
4032 static int __init ib_cm_init(void)
4036 memset(&cm, 0, sizeof cm);
4037 INIT_LIST_HEAD(&cm.device_list);
4038 rwlock_init(&cm.device_lock);
4039 spin_lock_init(&cm.lock);
4040 cm.listen_service_table = RB_ROOT;
4041 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4042 cm.remote_id_table = RB_ROOT;
4043 cm.remote_qp_table = RB_ROOT;
4044 cm.remote_sidr_table = RB_ROOT;
4045 idr_init(&cm.local_id_table);
4046 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4047 INIT_LIST_HEAD(&cm.timewait_list);
4049 ret = class_register(&cm_class);
4055 cm.wq = create_workqueue("ib_cm");
4061 ret = ib_register_client(&cm_client);
4067 destroy_workqueue(cm.wq);
4069 class_unregister(&cm_class);
4071 idr_destroy(&cm.local_id_table);
4075 static void __exit ib_cm_cleanup(void)
4077 struct cm_timewait_info *timewait_info, *tmp;
4079 spin_lock_irq(&cm.lock);
4080 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4081 cancel_delayed_work(&timewait_info->work.work);
4082 spin_unlock_irq(&cm.lock);
4084 ib_unregister_client(&cm_client);
4085 destroy_workqueue(cm.wq);
4087 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4088 list_del(&timewait_info->list);
4089 kfree(timewait_info);
4092 class_unregister(&cm_class);
4093 idr_destroy(&cm.local_id_table);
4096 module_init(ib_cm_init);
4097 module_exit(ib_cm_cleanup);