2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <rdma/ib_cache.h>
50 #include "core_priv.h"
52 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
53 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
55 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
56 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
57 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
58 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
60 static struct list_head ib_mad_port_list;
61 static u32 ib_mad_client_id = 0;
64 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
66 /* Forward declarations */
67 static int method_in_use(struct ib_mad_mgmt_method_table **method,
68 struct ib_mad_reg_req *mad_reg_req);
69 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
70 static struct ib_mad_agent_private *find_mad_agent(
71 struct ib_mad_port_private *port_priv,
72 const struct ib_mad_hdr *mad);
73 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
74 struct ib_mad_private *mad);
75 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
76 static void timeout_sends(struct work_struct *work);
77 static void local_completions(struct work_struct *work);
78 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
79 struct ib_mad_agent_private *agent_priv,
81 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
82 struct ib_mad_agent_private *agent_priv);
83 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
85 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
88 * Returns a ib_mad_port_private structure or NULL for a device/port
89 * Assumes ib_mad_port_list_lock is being held
91 static inline struct ib_mad_port_private *
92 __ib_get_mad_port(struct ib_device *device, int port_num)
94 struct ib_mad_port_private *entry;
96 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
97 if (entry->device == device && entry->port_num == port_num)
104 * Wrapper function to return a ib_mad_port_private structure or NULL
107 static inline struct ib_mad_port_private *
108 ib_get_mad_port(struct ib_device *device, int port_num)
110 struct ib_mad_port_private *entry;
113 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
114 entry = __ib_get_mad_port(device, port_num);
115 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
120 static inline u8 convert_mgmt_class(u8 mgmt_class)
122 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
123 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
127 static int get_spl_qp_index(enum ib_qp_type qp_type)
140 static int vendor_class_index(u8 mgmt_class)
142 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
145 static int is_vendor_class(u8 mgmt_class)
147 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
148 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
153 static int is_vendor_oui(char *oui)
155 if (oui[0] || oui[1] || oui[2])
160 static int is_vendor_method_in_use(
161 struct ib_mad_mgmt_vendor_class *vendor_class,
162 struct ib_mad_reg_req *mad_reg_req)
164 struct ib_mad_mgmt_method_table *method;
167 for (i = 0; i < MAX_MGMT_OUI; i++) {
168 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
169 method = vendor_class->method_table[i];
171 if (method_in_use(&method, mad_reg_req))
181 int ib_response_mad(const struct ib_mad_hdr *hdr)
183 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
184 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
185 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
186 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
188 EXPORT_SYMBOL(ib_response_mad);
191 * ib_register_mad_agent - Register to send/receive MADs
193 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
195 enum ib_qp_type qp_type,
196 struct ib_mad_reg_req *mad_reg_req,
198 ib_mad_send_handler send_handler,
199 ib_mad_recv_handler recv_handler,
201 u32 registration_flags)
203 struct ib_mad_port_private *port_priv;
204 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
205 struct ib_mad_agent_private *mad_agent_priv;
206 struct ib_mad_reg_req *reg_req = NULL;
207 struct ib_mad_mgmt_class_table *class;
208 struct ib_mad_mgmt_vendor_class_table *vendor;
209 struct ib_mad_mgmt_vendor_class *vendor_class;
210 struct ib_mad_mgmt_method_table *method;
213 u8 mgmt_class, vclass;
215 /* Validate parameters */
216 qpn = get_spl_qp_index(qp_type);
218 dev_notice(&device->dev,
219 "ib_register_mad_agent: invalid QP Type %d\n",
224 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
225 dev_notice(&device->dev,
226 "ib_register_mad_agent: invalid RMPP Version %u\n",
231 /* Validate MAD registration request if supplied */
233 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
234 dev_notice(&device->dev,
235 "ib_register_mad_agent: invalid Class Version %u\n",
236 mad_reg_req->mgmt_class_version);
240 dev_notice(&device->dev,
241 "ib_register_mad_agent: no recv_handler\n");
244 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
246 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
247 * one in this range currently allowed
249 if (mad_reg_req->mgmt_class !=
250 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
251 dev_notice(&device->dev,
252 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
253 mad_reg_req->mgmt_class);
256 } else if (mad_reg_req->mgmt_class == 0) {
258 * Class 0 is reserved in IBA and is used for
259 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
261 dev_notice(&device->dev,
262 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
264 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
266 * If class is in "new" vendor range,
267 * ensure supplied OUI is not zero
269 if (!is_vendor_oui(mad_reg_req->oui)) {
270 dev_notice(&device->dev,
271 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
272 mad_reg_req->mgmt_class);
276 /* Make sure class supplied is consistent with RMPP */
277 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
279 dev_notice(&device->dev,
280 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
281 mad_reg_req->mgmt_class);
286 /* Make sure class supplied is consistent with QP type */
287 if (qp_type == IB_QPT_SMI) {
288 if ((mad_reg_req->mgmt_class !=
289 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
290 (mad_reg_req->mgmt_class !=
291 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
292 dev_notice(&device->dev,
293 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
294 mad_reg_req->mgmt_class);
298 if ((mad_reg_req->mgmt_class ==
299 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
300 (mad_reg_req->mgmt_class ==
301 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
302 dev_notice(&device->dev,
303 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
304 mad_reg_req->mgmt_class);
309 /* No registration request supplied */
312 if (registration_flags & IB_MAD_USER_RMPP)
316 /* Validate device and port */
317 port_priv = ib_get_mad_port(device, port_num);
319 dev_notice(&device->dev,
320 "ib_register_mad_agent: Invalid port %d\n",
322 ret = ERR_PTR(-ENODEV);
326 /* Verify the QP requested is supported. For example, Ethernet devices
327 * will not have QP0 */
328 if (!port_priv->qp_info[qpn].qp) {
329 dev_notice(&device->dev,
330 "ib_register_mad_agent: QP %d not supported\n", qpn);
331 ret = ERR_PTR(-EPROTONOSUPPORT);
335 /* Allocate structures */
336 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
337 if (!mad_agent_priv) {
338 ret = ERR_PTR(-ENOMEM);
343 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
345 ret = ERR_PTR(-ENOMEM);
350 /* Now, fill in the various structures */
351 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
352 mad_agent_priv->reg_req = reg_req;
353 mad_agent_priv->agent.rmpp_version = rmpp_version;
354 mad_agent_priv->agent.device = device;
355 mad_agent_priv->agent.recv_handler = recv_handler;
356 mad_agent_priv->agent.send_handler = send_handler;
357 mad_agent_priv->agent.context = context;
358 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
359 mad_agent_priv->agent.port_num = port_num;
360 mad_agent_priv->agent.flags = registration_flags;
361 spin_lock_init(&mad_agent_priv->lock);
362 INIT_LIST_HEAD(&mad_agent_priv->send_list);
363 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
364 INIT_LIST_HEAD(&mad_agent_priv->done_list);
365 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
366 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
367 INIT_LIST_HEAD(&mad_agent_priv->local_list);
368 INIT_WORK(&mad_agent_priv->local_work, local_completions);
369 atomic_set(&mad_agent_priv->refcount, 1);
370 init_completion(&mad_agent_priv->comp);
372 spin_lock_irqsave(&port_priv->reg_lock, flags);
373 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
376 * Make sure MAD registration (if supplied)
377 * is non overlapping with any existing ones
380 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
381 if (!is_vendor_class(mgmt_class)) {
382 class = port_priv->version[mad_reg_req->
383 mgmt_class_version].class;
385 method = class->method_table[mgmt_class];
387 if (method_in_use(&method,
392 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
395 /* "New" vendor class range */
396 vendor = port_priv->version[mad_reg_req->
397 mgmt_class_version].vendor;
399 vclass = vendor_class_index(mgmt_class);
400 vendor_class = vendor->vendor_class[vclass];
402 if (is_vendor_method_in_use(
408 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
416 /* Add mad agent into port's agent list */
417 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
418 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
420 return &mad_agent_priv->agent;
423 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
426 kfree(mad_agent_priv);
430 EXPORT_SYMBOL(ib_register_mad_agent);
432 static inline int is_snooping_sends(int mad_snoop_flags)
434 return (mad_snoop_flags &
435 (/*IB_MAD_SNOOP_POSTED_SENDS |
436 IB_MAD_SNOOP_RMPP_SENDS |*/
437 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
438 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
441 static inline int is_snooping_recvs(int mad_snoop_flags)
443 return (mad_snoop_flags &
444 (IB_MAD_SNOOP_RECVS /*|
445 IB_MAD_SNOOP_RMPP_RECVS*/));
448 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
449 struct ib_mad_snoop_private *mad_snoop_priv)
451 struct ib_mad_snoop_private **new_snoop_table;
455 spin_lock_irqsave(&qp_info->snoop_lock, flags);
456 /* Check for empty slot in array. */
457 for (i = 0; i < qp_info->snoop_table_size; i++)
458 if (!qp_info->snoop_table[i])
461 if (i == qp_info->snoop_table_size) {
463 new_snoop_table = krealloc(qp_info->snoop_table,
464 sizeof mad_snoop_priv *
465 (qp_info->snoop_table_size + 1),
467 if (!new_snoop_table) {
472 qp_info->snoop_table = new_snoop_table;
473 qp_info->snoop_table_size++;
475 qp_info->snoop_table[i] = mad_snoop_priv;
476 atomic_inc(&qp_info->snoop_count);
478 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
482 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
484 enum ib_qp_type qp_type,
486 ib_mad_snoop_handler snoop_handler,
487 ib_mad_recv_handler recv_handler,
490 struct ib_mad_port_private *port_priv;
491 struct ib_mad_agent *ret;
492 struct ib_mad_snoop_private *mad_snoop_priv;
495 /* Validate parameters */
496 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
497 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
498 ret = ERR_PTR(-EINVAL);
501 qpn = get_spl_qp_index(qp_type);
503 ret = ERR_PTR(-EINVAL);
506 port_priv = ib_get_mad_port(device, port_num);
508 ret = ERR_PTR(-ENODEV);
511 /* Allocate structures */
512 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
513 if (!mad_snoop_priv) {
514 ret = ERR_PTR(-ENOMEM);
518 /* Now, fill in the various structures */
519 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
520 mad_snoop_priv->agent.device = device;
521 mad_snoop_priv->agent.recv_handler = recv_handler;
522 mad_snoop_priv->agent.snoop_handler = snoop_handler;
523 mad_snoop_priv->agent.context = context;
524 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
525 mad_snoop_priv->agent.port_num = port_num;
526 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
527 init_completion(&mad_snoop_priv->comp);
528 mad_snoop_priv->snoop_index = register_snoop_agent(
529 &port_priv->qp_info[qpn],
531 if (mad_snoop_priv->snoop_index < 0) {
532 ret = ERR_PTR(mad_snoop_priv->snoop_index);
536 atomic_set(&mad_snoop_priv->refcount, 1);
537 return &mad_snoop_priv->agent;
540 kfree(mad_snoop_priv);
544 EXPORT_SYMBOL(ib_register_mad_snoop);
546 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
548 if (atomic_dec_and_test(&mad_agent_priv->refcount))
549 complete(&mad_agent_priv->comp);
552 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
554 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
555 complete(&mad_snoop_priv->comp);
558 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
560 struct ib_mad_port_private *port_priv;
563 /* Note that we could still be handling received MADs */
566 * Canceling all sends results in dropping received response
567 * MADs, preventing us from queuing additional work
569 cancel_mads(mad_agent_priv);
570 port_priv = mad_agent_priv->qp_info->port_priv;
571 cancel_delayed_work(&mad_agent_priv->timed_work);
573 spin_lock_irqsave(&port_priv->reg_lock, flags);
574 remove_mad_reg_req(mad_agent_priv);
575 list_del(&mad_agent_priv->agent_list);
576 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
578 flush_workqueue(port_priv->wq);
579 ib_cancel_rmpp_recvs(mad_agent_priv);
581 deref_mad_agent(mad_agent_priv);
582 wait_for_completion(&mad_agent_priv->comp);
584 kfree(mad_agent_priv->reg_req);
585 kfree(mad_agent_priv);
588 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
590 struct ib_mad_qp_info *qp_info;
593 qp_info = mad_snoop_priv->qp_info;
594 spin_lock_irqsave(&qp_info->snoop_lock, flags);
595 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
596 atomic_dec(&qp_info->snoop_count);
597 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
599 deref_snoop_agent(mad_snoop_priv);
600 wait_for_completion(&mad_snoop_priv->comp);
602 kfree(mad_snoop_priv);
606 * ib_unregister_mad_agent - Unregisters a client from using MAD services
608 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
610 struct ib_mad_agent_private *mad_agent_priv;
611 struct ib_mad_snoop_private *mad_snoop_priv;
613 /* If the TID is zero, the agent can only snoop. */
614 if (mad_agent->hi_tid) {
615 mad_agent_priv = container_of(mad_agent,
616 struct ib_mad_agent_private,
618 unregister_mad_agent(mad_agent_priv);
620 mad_snoop_priv = container_of(mad_agent,
621 struct ib_mad_snoop_private,
623 unregister_mad_snoop(mad_snoop_priv);
626 EXPORT_SYMBOL(ib_unregister_mad_agent);
628 static void dequeue_mad(struct ib_mad_list_head *mad_list)
630 struct ib_mad_queue *mad_queue;
633 BUG_ON(!mad_list->mad_queue);
634 mad_queue = mad_list->mad_queue;
635 spin_lock_irqsave(&mad_queue->lock, flags);
636 list_del(&mad_list->list);
638 spin_unlock_irqrestore(&mad_queue->lock, flags);
641 static void snoop_send(struct ib_mad_qp_info *qp_info,
642 struct ib_mad_send_buf *send_buf,
643 struct ib_mad_send_wc *mad_send_wc,
646 struct ib_mad_snoop_private *mad_snoop_priv;
650 spin_lock_irqsave(&qp_info->snoop_lock, flags);
651 for (i = 0; i < qp_info->snoop_table_size; i++) {
652 mad_snoop_priv = qp_info->snoop_table[i];
653 if (!mad_snoop_priv ||
654 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
657 atomic_inc(&mad_snoop_priv->refcount);
658 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
659 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
660 send_buf, mad_send_wc);
661 deref_snoop_agent(mad_snoop_priv);
662 spin_lock_irqsave(&qp_info->snoop_lock, flags);
664 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
667 static void snoop_recv(struct ib_mad_qp_info *qp_info,
668 struct ib_mad_recv_wc *mad_recv_wc,
671 struct ib_mad_snoop_private *mad_snoop_priv;
675 spin_lock_irqsave(&qp_info->snoop_lock, flags);
676 for (i = 0; i < qp_info->snoop_table_size; i++) {
677 mad_snoop_priv = qp_info->snoop_table[i];
678 if (!mad_snoop_priv ||
679 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
682 atomic_inc(&mad_snoop_priv->refcount);
683 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
684 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
686 deref_snoop_agent(mad_snoop_priv);
687 spin_lock_irqsave(&qp_info->snoop_lock, flags);
689 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
692 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
693 u16 pkey_index, u8 port_num, struct ib_wc *wc)
695 memset(wc, 0, sizeof *wc);
697 wc->status = IB_WC_SUCCESS;
698 wc->opcode = IB_WC_RECV;
699 wc->pkey_index = pkey_index;
700 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
705 wc->dlid_path_bits = 0;
706 wc->port_num = port_num;
709 static size_t mad_priv_size(const struct ib_mad_private *mp)
711 return sizeof(struct ib_mad_private) + mp->mad_size;
714 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
716 size_t size = sizeof(struct ib_mad_private) + mad_size;
717 struct ib_mad_private *ret = kzalloc(size, flags);
720 ret->mad_size = mad_size;
725 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
727 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
730 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
732 return sizeof(struct ib_grh) + mp->mad_size;
736 * Return 0 if SMP is to be sent
737 * Return 1 if SMP was consumed locally (whether or not solicited)
738 * Return < 0 if error
740 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
741 struct ib_mad_send_wr_private *mad_send_wr)
744 struct ib_smp *smp = mad_send_wr->send_buf.mad;
745 struct opa_smp *opa_smp = (struct opa_smp *)smp;
747 struct ib_mad_local_private *local;
748 struct ib_mad_private *mad_priv;
749 struct ib_mad_port_private *port_priv;
750 struct ib_mad_agent_private *recv_mad_agent = NULL;
751 struct ib_device *device = mad_agent_priv->agent.device;
754 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
755 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
756 u16 out_mad_pkey_index = 0;
758 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
759 mad_agent_priv->qp_info->port_priv->port_num);
761 if (rdma_cap_ib_switch(device) &&
762 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
763 port_num = send_wr->port_num;
765 port_num = mad_agent_priv->agent.port_num;
768 * Directed route handling starts if the initial LID routed part of
769 * a request or the ending LID routed part of a response is empty.
770 * If we are at the start of the LID routed part, don't update the
771 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
773 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
776 if ((opa_get_smp_direction(opa_smp)
777 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
778 OPA_LID_PERMISSIVE &&
779 opa_smi_handle_dr_smp_send(opa_smp,
780 rdma_cap_ib_switch(device),
781 port_num) == IB_SMI_DISCARD) {
783 dev_err(&device->dev, "OPA Invalid directed route\n");
786 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
787 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
788 opa_drslid & 0xffff0000) {
790 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
794 drslid = (u16)(opa_drslid & 0x0000ffff);
796 /* Check to post send on QP or process locally */
797 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
798 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
801 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
803 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
806 dev_err(&device->dev, "Invalid directed route\n");
809 drslid = be16_to_cpu(smp->dr_slid);
811 /* Check to post send on QP or process locally */
812 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
813 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
817 local = kmalloc(sizeof *local, GFP_ATOMIC);
822 local->mad_priv = NULL;
823 local->recv_mad_agent = NULL;
824 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
831 build_smp_wc(mad_agent_priv->agent.qp,
832 send_wr->wr.wr_cqe, drslid,
834 send_wr->port_num, &mad_wc);
836 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
837 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
838 + mad_send_wr->send_buf.data_len
839 + sizeof(struct ib_grh);
842 /* No GRH for DR SMP */
843 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
844 (const struct ib_mad_hdr *)smp, mad_size,
845 (struct ib_mad_hdr *)mad_priv->mad,
846 &mad_size, &out_mad_pkey_index);
849 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
850 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
851 mad_agent_priv->agent.recv_handler) {
852 local->mad_priv = mad_priv;
853 local->recv_mad_agent = mad_agent_priv;
855 * Reference MAD agent until receive
856 * side of local completion handled
858 atomic_inc(&mad_agent_priv->refcount);
862 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
865 case IB_MAD_RESULT_SUCCESS:
866 /* Treat like an incoming receive MAD */
867 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
868 mad_agent_priv->agent.port_num);
870 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
871 recv_mad_agent = find_mad_agent(port_priv,
872 (const struct ib_mad_hdr *)mad_priv->mad);
874 if (!port_priv || !recv_mad_agent) {
876 * No receiving agent so drop packet and
877 * generate send completion.
882 local->mad_priv = mad_priv;
883 local->recv_mad_agent = recv_mad_agent;
892 local->mad_send_wr = mad_send_wr;
894 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
895 local->return_wc_byte_len = mad_size;
897 /* Reference MAD agent until send side of local completion handled */
898 atomic_inc(&mad_agent_priv->refcount);
899 /* Queue local completion to local list */
900 spin_lock_irqsave(&mad_agent_priv->lock, flags);
901 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
902 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
903 queue_work(mad_agent_priv->qp_info->port_priv->wq,
904 &mad_agent_priv->local_work);
910 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
914 seg_size = mad_size - hdr_len;
915 if (data_len && seg_size) {
916 pad = seg_size - data_len % seg_size;
917 return pad == seg_size ? 0 : pad;
922 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
924 struct ib_rmpp_segment *s, *t;
926 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
932 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
933 size_t mad_size, gfp_t gfp_mask)
935 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
936 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
937 struct ib_rmpp_segment *seg = NULL;
938 int left, seg_size, pad;
940 send_buf->seg_size = mad_size - send_buf->hdr_len;
941 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
942 seg_size = send_buf->seg_size;
945 /* Allocate data segments. */
946 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
947 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
949 free_send_rmpp_list(send_wr);
952 seg->num = ++send_buf->seg_count;
953 list_add_tail(&seg->list, &send_wr->rmpp_list);
956 /* Zero any padding */
958 memset(seg->data + seg_size - pad, 0, pad);
960 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
962 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
963 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
965 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
966 struct ib_rmpp_segment, list);
967 send_wr->last_ack_seg = send_wr->cur_seg;
971 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
973 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
975 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
977 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
978 u32 remote_qpn, u16 pkey_index,
980 int hdr_len, int data_len,
984 struct ib_mad_agent_private *mad_agent_priv;
985 struct ib_mad_send_wr_private *mad_send_wr;
986 int pad, message_size, ret, size;
991 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
994 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
996 if (opa && base_version == OPA_MGMT_BASE_VERSION)
997 mad_size = sizeof(struct opa_mad);
999 mad_size = sizeof(struct ib_mad);
1001 pad = get_pad_size(hdr_len, data_len, mad_size);
1002 message_size = hdr_len + data_len + pad;
1004 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1005 if (!rmpp_active && message_size > mad_size)
1006 return ERR_PTR(-EINVAL);
1008 if (rmpp_active || message_size > mad_size)
1009 return ERR_PTR(-EINVAL);
1011 size = rmpp_active ? hdr_len : mad_size;
1012 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1014 return ERR_PTR(-ENOMEM);
1016 mad_send_wr = buf + size;
1017 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1018 mad_send_wr->send_buf.mad = buf;
1019 mad_send_wr->send_buf.hdr_len = hdr_len;
1020 mad_send_wr->send_buf.data_len = data_len;
1021 mad_send_wr->pad = pad;
1023 mad_send_wr->mad_agent_priv = mad_agent_priv;
1024 mad_send_wr->sg_list[0].length = hdr_len;
1025 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1027 /* OPA MADs don't have to be the full 2048 bytes */
1028 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1029 data_len < mad_size - hdr_len)
1030 mad_send_wr->sg_list[1].length = data_len;
1032 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1034 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1036 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1038 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1039 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1040 mad_send_wr->send_wr.wr.num_sge = 2;
1041 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1042 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1043 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1044 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1045 mad_send_wr->send_wr.pkey_index = pkey_index;
1048 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1051 return ERR_PTR(ret);
1055 mad_send_wr->send_buf.mad_agent = mad_agent;
1056 atomic_inc(&mad_agent_priv->refcount);
1057 return &mad_send_wr->send_buf;
1059 EXPORT_SYMBOL(ib_create_send_mad);
1061 int ib_get_mad_data_offset(u8 mgmt_class)
1063 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1064 return IB_MGMT_SA_HDR;
1065 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1066 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1067 (mgmt_class == IB_MGMT_CLASS_BIS))
1068 return IB_MGMT_DEVICE_HDR;
1069 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1070 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1071 return IB_MGMT_VENDOR_HDR;
1073 return IB_MGMT_MAD_HDR;
1075 EXPORT_SYMBOL(ib_get_mad_data_offset);
1077 int ib_is_mad_class_rmpp(u8 mgmt_class)
1079 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1080 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1081 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1082 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1083 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1084 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1088 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1090 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1092 struct ib_mad_send_wr_private *mad_send_wr;
1093 struct list_head *list;
1095 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1097 list = &mad_send_wr->cur_seg->list;
1099 if (mad_send_wr->cur_seg->num < seg_num) {
1100 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1101 if (mad_send_wr->cur_seg->num == seg_num)
1103 } else if (mad_send_wr->cur_seg->num > seg_num) {
1104 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1105 if (mad_send_wr->cur_seg->num == seg_num)
1108 return mad_send_wr->cur_seg->data;
1110 EXPORT_SYMBOL(ib_get_rmpp_segment);
1112 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1114 if (mad_send_wr->send_buf.seg_count)
1115 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1116 mad_send_wr->seg_num);
1118 return mad_send_wr->send_buf.mad +
1119 mad_send_wr->send_buf.hdr_len;
1122 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1124 struct ib_mad_agent_private *mad_agent_priv;
1125 struct ib_mad_send_wr_private *mad_send_wr;
1127 mad_agent_priv = container_of(send_buf->mad_agent,
1128 struct ib_mad_agent_private, agent);
1129 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1132 free_send_rmpp_list(mad_send_wr);
1133 kfree(send_buf->mad);
1134 deref_mad_agent(mad_agent_priv);
1136 EXPORT_SYMBOL(ib_free_send_mad);
1138 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1140 struct ib_mad_qp_info *qp_info;
1141 struct list_head *list;
1142 struct ib_send_wr *bad_send_wr;
1143 struct ib_mad_agent *mad_agent;
1145 unsigned long flags;
1148 /* Set WR ID to find mad_send_wr upon completion */
1149 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1150 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1151 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1152 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1154 mad_agent = mad_send_wr->send_buf.mad_agent;
1155 sge = mad_send_wr->sg_list;
1156 sge[0].addr = ib_dma_map_single(mad_agent->device,
1157 mad_send_wr->send_buf.mad,
1160 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1163 mad_send_wr->header_mapping = sge[0].addr;
1165 sge[1].addr = ib_dma_map_single(mad_agent->device,
1166 ib_get_payload(mad_send_wr),
1169 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1170 ib_dma_unmap_single(mad_agent->device,
1171 mad_send_wr->header_mapping,
1172 sge[0].length, DMA_TO_DEVICE);
1175 mad_send_wr->payload_mapping = sge[1].addr;
1177 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1178 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1179 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1181 list = &qp_info->send_queue.list;
1184 list = &qp_info->overflow_list;
1188 qp_info->send_queue.count++;
1189 list_add_tail(&mad_send_wr->mad_list.list, list);
1191 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1193 ib_dma_unmap_single(mad_agent->device,
1194 mad_send_wr->header_mapping,
1195 sge[0].length, DMA_TO_DEVICE);
1196 ib_dma_unmap_single(mad_agent->device,
1197 mad_send_wr->payload_mapping,
1198 sge[1].length, DMA_TO_DEVICE);
1204 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1205 * with the registered client
1207 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1208 struct ib_mad_send_buf **bad_send_buf)
1210 struct ib_mad_agent_private *mad_agent_priv;
1211 struct ib_mad_send_buf *next_send_buf;
1212 struct ib_mad_send_wr_private *mad_send_wr;
1213 unsigned long flags;
1216 /* Walk list of send WRs and post each on send list */
1217 for (; send_buf; send_buf = next_send_buf) {
1219 mad_send_wr = container_of(send_buf,
1220 struct ib_mad_send_wr_private,
1222 mad_agent_priv = mad_send_wr->mad_agent_priv;
1224 if (!send_buf->mad_agent->send_handler ||
1225 (send_buf->timeout_ms &&
1226 !send_buf->mad_agent->recv_handler)) {
1231 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1232 if (mad_agent_priv->agent.rmpp_version) {
1239 * Save pointer to next work request to post in case the
1240 * current one completes, and the user modifies the work
1241 * request associated with the completion
1243 next_send_buf = send_buf->next;
1244 mad_send_wr->send_wr.ah = send_buf->ah;
1246 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1247 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1248 ret = handle_outgoing_dr_smp(mad_agent_priv,
1250 if (ret < 0) /* error */
1252 else if (ret == 1) /* locally consumed */
1256 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1257 /* Timeout will be updated after send completes */
1258 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1259 mad_send_wr->max_retries = send_buf->retries;
1260 mad_send_wr->retries_left = send_buf->retries;
1261 send_buf->retries = 0;
1262 /* Reference for work request to QP + response */
1263 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1264 mad_send_wr->status = IB_WC_SUCCESS;
1266 /* Reference MAD agent until send completes */
1267 atomic_inc(&mad_agent_priv->refcount);
1268 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1269 list_add_tail(&mad_send_wr->agent_list,
1270 &mad_agent_priv->send_list);
1271 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1273 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1274 ret = ib_send_rmpp_mad(mad_send_wr);
1275 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1276 ret = ib_send_mad(mad_send_wr);
1278 ret = ib_send_mad(mad_send_wr);
1280 /* Fail send request */
1281 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1282 list_del(&mad_send_wr->agent_list);
1283 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1284 atomic_dec(&mad_agent_priv->refcount);
1291 *bad_send_buf = send_buf;
1294 EXPORT_SYMBOL(ib_post_send_mad);
1297 * ib_free_recv_mad - Returns data buffers used to receive
1298 * a MAD to the access layer
1300 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1302 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1303 struct ib_mad_private_header *mad_priv_hdr;
1304 struct ib_mad_private *priv;
1305 struct list_head free_list;
1307 INIT_LIST_HEAD(&free_list);
1308 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1310 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1312 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1314 mad_priv_hdr = container_of(mad_recv_wc,
1315 struct ib_mad_private_header,
1317 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1322 EXPORT_SYMBOL(ib_free_recv_mad);
1324 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1326 ib_mad_send_handler send_handler,
1327 ib_mad_recv_handler recv_handler,
1330 return ERR_PTR(-EINVAL); /* XXX: for now */
1332 EXPORT_SYMBOL(ib_redirect_mad_qp);
1334 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1337 dev_err(&mad_agent->device->dev,
1338 "ib_process_mad_wc() not implemented yet\n");
1341 EXPORT_SYMBOL(ib_process_mad_wc);
1343 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1344 struct ib_mad_reg_req *mad_reg_req)
1348 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1349 if ((*method)->agent[i]) {
1350 pr_err("Method %d already in use\n", i);
1357 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1359 /* Allocate management method table */
1360 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1361 return (*method) ? 0 : (-ENOMEM);
1365 * Check to see if there are any methods still in use
1367 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1371 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1372 if (method->agent[i])
1378 * Check to see if there are any method tables for this class still in use
1380 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1384 for (i = 0; i < MAX_MGMT_CLASS; i++)
1385 if (class->method_table[i])
1390 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1394 for (i = 0; i < MAX_MGMT_OUI; i++)
1395 if (vendor_class->method_table[i])
1400 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1405 for (i = 0; i < MAX_MGMT_OUI; i++)
1406 /* Is there matching OUI for this vendor class ? */
1407 if (!memcmp(vendor_class->oui[i], oui, 3))
1413 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1417 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1418 if (vendor->vendor_class[i])
1424 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1425 struct ib_mad_agent_private *agent)
1429 /* Remove any methods for this mad agent */
1430 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1431 if (method->agent[i] == agent) {
1432 method->agent[i] = NULL;
1437 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1438 struct ib_mad_agent_private *agent_priv,
1441 struct ib_mad_port_private *port_priv;
1442 struct ib_mad_mgmt_class_table **class;
1443 struct ib_mad_mgmt_method_table **method;
1446 port_priv = agent_priv->qp_info->port_priv;
1447 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1449 /* Allocate management class table for "new" class version */
1450 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1456 /* Allocate method table for this management class */
1457 method = &(*class)->method_table[mgmt_class];
1458 if ((ret = allocate_method_table(method)))
1461 method = &(*class)->method_table[mgmt_class];
1463 /* Allocate method table for this management class */
1464 if ((ret = allocate_method_table(method)))
1469 /* Now, make sure methods are not already in use */
1470 if (method_in_use(method, mad_reg_req))
1473 /* Finally, add in methods being registered */
1474 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1475 (*method)->agent[i] = agent_priv;
1480 /* Remove any methods for this mad agent */
1481 remove_methods_mad_agent(*method, agent_priv);
1482 /* Now, check to see if there are any methods in use */
1483 if (!check_method_table(*method)) {
1484 /* If not, release management method table */
1497 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1498 struct ib_mad_agent_private *agent_priv)
1500 struct ib_mad_port_private *port_priv;
1501 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1502 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1503 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1504 struct ib_mad_mgmt_method_table **method;
1505 int i, ret = -ENOMEM;
1508 /* "New" vendor (with OUI) class */
1509 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1510 port_priv = agent_priv->qp_info->port_priv;
1511 vendor_table = &port_priv->version[
1512 mad_reg_req->mgmt_class_version].vendor;
1513 if (!*vendor_table) {
1514 /* Allocate mgmt vendor class table for "new" class version */
1515 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1519 *vendor_table = vendor;
1521 if (!(*vendor_table)->vendor_class[vclass]) {
1522 /* Allocate table for this management vendor class */
1523 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1527 (*vendor_table)->vendor_class[vclass] = vendor_class;
1529 for (i = 0; i < MAX_MGMT_OUI; i++) {
1530 /* Is there matching OUI for this vendor class ? */
1531 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1532 mad_reg_req->oui, 3)) {
1533 method = &(*vendor_table)->vendor_class[
1534 vclass]->method_table[i];
1539 for (i = 0; i < MAX_MGMT_OUI; i++) {
1540 /* OUI slot available ? */
1541 if (!is_vendor_oui((*vendor_table)->vendor_class[
1543 method = &(*vendor_table)->vendor_class[
1544 vclass]->method_table[i];
1546 /* Allocate method table for this OUI */
1547 if ((ret = allocate_method_table(method)))
1549 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1550 mad_reg_req->oui, 3);
1554 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1558 /* Now, make sure methods are not already in use */
1559 if (method_in_use(method, mad_reg_req))
1562 /* Finally, add in methods being registered */
1563 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1564 (*method)->agent[i] = agent_priv;
1569 /* Remove any methods for this mad agent */
1570 remove_methods_mad_agent(*method, agent_priv);
1571 /* Now, check to see if there are any methods in use */
1572 if (!check_method_table(*method)) {
1573 /* If not, release management method table */
1580 (*vendor_table)->vendor_class[vclass] = NULL;
1581 kfree(vendor_class);
1585 *vendor_table = NULL;
1592 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1594 struct ib_mad_port_private *port_priv;
1595 struct ib_mad_mgmt_class_table *class;
1596 struct ib_mad_mgmt_method_table *method;
1597 struct ib_mad_mgmt_vendor_class_table *vendor;
1598 struct ib_mad_mgmt_vendor_class *vendor_class;
1603 * Was MAD registration request supplied
1604 * with original registration ?
1606 if (!agent_priv->reg_req) {
1610 port_priv = agent_priv->qp_info->port_priv;
1611 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1612 class = port_priv->version[
1613 agent_priv->reg_req->mgmt_class_version].class;
1617 method = class->method_table[mgmt_class];
1619 /* Remove any methods for this mad agent */
1620 remove_methods_mad_agent(method, agent_priv);
1621 /* Now, check to see if there are any methods still in use */
1622 if (!check_method_table(method)) {
1623 /* If not, release management method table */
1625 class->method_table[mgmt_class] = NULL;
1626 /* Any management classes left ? */
1627 if (!check_class_table(class)) {
1628 /* If not, release management class table */
1631 agent_priv->reg_req->
1632 mgmt_class_version].class = NULL;
1638 if (!is_vendor_class(mgmt_class))
1641 /* normalize mgmt_class to vendor range 2 */
1642 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1643 vendor = port_priv->version[
1644 agent_priv->reg_req->mgmt_class_version].vendor;
1649 vendor_class = vendor->vendor_class[mgmt_class];
1651 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1654 method = vendor_class->method_table[index];
1656 /* Remove any methods for this mad agent */
1657 remove_methods_mad_agent(method, agent_priv);
1659 * Now, check to see if there are
1660 * any methods still in use
1662 if (!check_method_table(method)) {
1663 /* If not, release management method table */
1665 vendor_class->method_table[index] = NULL;
1666 memset(vendor_class->oui[index], 0, 3);
1667 /* Any OUIs left ? */
1668 if (!check_vendor_class(vendor_class)) {
1669 /* If not, release vendor class table */
1670 kfree(vendor_class);
1671 vendor->vendor_class[mgmt_class] = NULL;
1672 /* Any other vendor classes left ? */
1673 if (!check_vendor_table(vendor)) {
1676 agent_priv->reg_req->
1677 mgmt_class_version].
1689 static struct ib_mad_agent_private *
1690 find_mad_agent(struct ib_mad_port_private *port_priv,
1691 const struct ib_mad_hdr *mad_hdr)
1693 struct ib_mad_agent_private *mad_agent = NULL;
1694 unsigned long flags;
1696 spin_lock_irqsave(&port_priv->reg_lock, flags);
1697 if (ib_response_mad(mad_hdr)) {
1699 struct ib_mad_agent_private *entry;
1702 * Routing is based on high 32 bits of transaction ID
1705 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1706 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1707 if (entry->agent.hi_tid == hi_tid) {
1713 struct ib_mad_mgmt_class_table *class;
1714 struct ib_mad_mgmt_method_table *method;
1715 struct ib_mad_mgmt_vendor_class_table *vendor;
1716 struct ib_mad_mgmt_vendor_class *vendor_class;
1717 const struct ib_vendor_mad *vendor_mad;
1721 * Routing is based on version, class, and method
1722 * For "newer" vendor MADs, also based on OUI
1724 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1726 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1727 class = port_priv->version[
1728 mad_hdr->class_version].class;
1731 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1732 ARRAY_SIZE(class->method_table))
1734 method = class->method_table[convert_mgmt_class(
1735 mad_hdr->mgmt_class)];
1737 mad_agent = method->agent[mad_hdr->method &
1738 ~IB_MGMT_METHOD_RESP];
1740 vendor = port_priv->version[
1741 mad_hdr->class_version].vendor;
1744 vendor_class = vendor->vendor_class[vendor_class_index(
1745 mad_hdr->mgmt_class)];
1748 /* Find matching OUI */
1749 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1750 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1753 method = vendor_class->method_table[index];
1755 mad_agent = method->agent[mad_hdr->method &
1756 ~IB_MGMT_METHOD_RESP];
1762 if (mad_agent->agent.recv_handler)
1763 atomic_inc(&mad_agent->refcount);
1765 dev_notice(&port_priv->device->dev,
1766 "No receive handler for client %p on port %d\n",
1767 &mad_agent->agent, port_priv->port_num);
1772 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1777 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1778 const struct ib_mad_qp_info *qp_info,
1782 u32 qp_num = qp_info->qp->qp_num;
1784 /* Make sure MAD base version is understood */
1785 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1786 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1787 pr_err("MAD received with unsupported base version %d %s\n",
1788 mad_hdr->base_version, opa ? "(opa)" : "");
1792 /* Filter SMI packets sent to other than QP0 */
1793 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1794 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1798 /* CM attributes other than ClassPortInfo only use Send method */
1799 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1800 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1801 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1803 /* Filter GSI packets sent to QP0 */
1812 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1813 const struct ib_mad_hdr *mad_hdr)
1815 struct ib_rmpp_mad *rmpp_mad;
1817 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1818 return !mad_agent_priv->agent.rmpp_version ||
1819 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1820 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1821 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1822 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1825 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1826 const struct ib_mad_recv_wc *rwc)
1828 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1829 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1832 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1833 const struct ib_mad_send_wr_private *wr,
1834 const struct ib_mad_recv_wc *rwc )
1836 struct rdma_ah_attr attr;
1837 u8 send_resp, rcv_resp;
1839 struct ib_device *device = mad_agent_priv->agent.device;
1840 u8 port_num = mad_agent_priv->agent.port_num;
1844 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1845 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1847 if (send_resp == rcv_resp)
1848 /* both requests, or both responses. GIDs different */
1851 if (rdma_query_ah(wr->send_buf.ah, &attr))
1852 /* Assume not equal, to avoid false positives. */
1855 has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH);
1856 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH))
1857 /* one has GID, other does not. Assume different */
1860 if (!send_resp && rcv_resp) {
1861 /* is request/response. */
1863 if (ib_get_cached_lmc(device, port_num, &lmc))
1865 return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
1866 rwc->wc->dlid_path_bits) &
1869 const struct ib_global_route *grh =
1870 rdma_ah_read_grh(&attr);
1872 if (ib_get_cached_gid(device, port_num,
1873 grh->sgid_index, &sgid, NULL))
1875 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1881 return rdma_ah_get_dlid(&attr) == rwc->wc->slid;
1883 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw,
1884 rwc->recv_buf.grh->sgid.raw,
1888 static inline int is_direct(u8 class)
1890 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1893 struct ib_mad_send_wr_private*
1894 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1895 const struct ib_mad_recv_wc *wc)
1897 struct ib_mad_send_wr_private *wr;
1898 const struct ib_mad_hdr *mad_hdr;
1900 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1902 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1903 if ((wr->tid == mad_hdr->tid) &&
1904 rcv_has_same_class(wr, wc) &&
1906 * Don't check GID for direct routed MADs.
1907 * These might have permissive LIDs.
1909 (is_direct(mad_hdr->mgmt_class) ||
1910 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1911 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1915 * It's possible to receive the response before we've
1916 * been notified that the send has completed
1918 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1919 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1920 wr->tid == mad_hdr->tid &&
1922 rcv_has_same_class(wr, wc) &&
1924 * Don't check GID for direct routed MADs.
1925 * These might have permissive LIDs.
1927 (is_direct(mad_hdr->mgmt_class) ||
1928 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1929 /* Verify request has not been canceled */
1930 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1935 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1937 mad_send_wr->timeout = 0;
1938 if (mad_send_wr->refcount == 1)
1939 list_move_tail(&mad_send_wr->agent_list,
1940 &mad_send_wr->mad_agent_priv->done_list);
1943 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1944 struct ib_mad_recv_wc *mad_recv_wc)
1946 struct ib_mad_send_wr_private *mad_send_wr;
1947 struct ib_mad_send_wc mad_send_wc;
1948 unsigned long flags;
1950 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1951 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1952 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1953 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1956 deref_mad_agent(mad_agent_priv);
1961 /* Complete corresponding request */
1962 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1963 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1964 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1966 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1967 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1968 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1969 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1970 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1971 /* user rmpp is in effect
1972 * and this is an active RMPP MAD
1974 mad_agent_priv->agent.recv_handler(
1975 &mad_agent_priv->agent, NULL,
1977 atomic_dec(&mad_agent_priv->refcount);
1979 /* not user rmpp, revert to normal behavior and
1981 ib_free_recv_mad(mad_recv_wc);
1982 deref_mad_agent(mad_agent_priv);
1986 ib_mark_mad_done(mad_send_wr);
1987 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1989 /* Defined behavior is to complete response before request */
1990 mad_agent_priv->agent.recv_handler(
1991 &mad_agent_priv->agent,
1992 &mad_send_wr->send_buf,
1994 atomic_dec(&mad_agent_priv->refcount);
1996 mad_send_wc.status = IB_WC_SUCCESS;
1997 mad_send_wc.vendor_err = 0;
1998 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1999 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2002 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2004 deref_mad_agent(mad_agent_priv);
2008 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2009 const struct ib_mad_qp_info *qp_info,
2010 const struct ib_wc *wc,
2012 struct ib_mad_private *recv,
2013 struct ib_mad_private *response)
2015 enum smi_forward_action retsmi;
2016 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2018 if (smi_handle_dr_smp_recv(smp,
2019 rdma_cap_ib_switch(port_priv->device),
2021 port_priv->device->phys_port_cnt) ==
2023 return IB_SMI_DISCARD;
2025 retsmi = smi_check_forward_dr_smp(smp);
2026 if (retsmi == IB_SMI_LOCAL)
2027 return IB_SMI_HANDLE;
2029 if (retsmi == IB_SMI_SEND) { /* don't forward */
2030 if (smi_handle_dr_smp_send(smp,
2031 rdma_cap_ib_switch(port_priv->device),
2032 port_num) == IB_SMI_DISCARD)
2033 return IB_SMI_DISCARD;
2035 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2036 return IB_SMI_DISCARD;
2037 } else if (rdma_cap_ib_switch(port_priv->device)) {
2038 /* forward case for switches */
2039 memcpy(response, recv, mad_priv_size(response));
2040 response->header.recv_wc.wc = &response->header.wc;
2041 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2042 response->header.recv_wc.recv_buf.grh = &response->grh;
2044 agent_send_response((const struct ib_mad_hdr *)response->mad,
2047 smi_get_fwd_port(smp),
2048 qp_info->qp->qp_num,
2052 return IB_SMI_DISCARD;
2054 return IB_SMI_HANDLE;
2057 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2058 struct ib_mad_private *response,
2059 size_t *resp_len, bool opa)
2061 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2062 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2064 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2065 recv_hdr->method == IB_MGMT_METHOD_SET) {
2066 memcpy(response, recv, mad_priv_size(response));
2067 response->header.recv_wc.wc = &response->header.wc;
2068 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2069 response->header.recv_wc.recv_buf.grh = &response->grh;
2070 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2071 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2072 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2073 resp_hdr->status |= IB_SMP_DIRECTION;
2075 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2076 if (recv_hdr->mgmt_class ==
2077 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2078 recv_hdr->mgmt_class ==
2079 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2080 *resp_len = opa_get_smp_header_size(
2081 (struct opa_smp *)recv->mad);
2083 *resp_len = sizeof(struct ib_mad_hdr);
2092 static enum smi_action
2093 handle_opa_smi(struct ib_mad_port_private *port_priv,
2094 struct ib_mad_qp_info *qp_info,
2097 struct ib_mad_private *recv,
2098 struct ib_mad_private *response)
2100 enum smi_forward_action retsmi;
2101 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2103 if (opa_smi_handle_dr_smp_recv(smp,
2104 rdma_cap_ib_switch(port_priv->device),
2106 port_priv->device->phys_port_cnt) ==
2108 return IB_SMI_DISCARD;
2110 retsmi = opa_smi_check_forward_dr_smp(smp);
2111 if (retsmi == IB_SMI_LOCAL)
2112 return IB_SMI_HANDLE;
2114 if (retsmi == IB_SMI_SEND) { /* don't forward */
2115 if (opa_smi_handle_dr_smp_send(smp,
2116 rdma_cap_ib_switch(port_priv->device),
2117 port_num) == IB_SMI_DISCARD)
2118 return IB_SMI_DISCARD;
2120 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2122 return IB_SMI_DISCARD;
2124 } else if (rdma_cap_ib_switch(port_priv->device)) {
2125 /* forward case for switches */
2126 memcpy(response, recv, mad_priv_size(response));
2127 response->header.recv_wc.wc = &response->header.wc;
2128 response->header.recv_wc.recv_buf.opa_mad =
2129 (struct opa_mad *)response->mad;
2130 response->header.recv_wc.recv_buf.grh = &response->grh;
2132 agent_send_response((const struct ib_mad_hdr *)response->mad,
2135 opa_smi_get_fwd_port(smp),
2136 qp_info->qp->qp_num,
2137 recv->header.wc.byte_len,
2140 return IB_SMI_DISCARD;
2143 return IB_SMI_HANDLE;
2146 static enum smi_action
2147 handle_smi(struct ib_mad_port_private *port_priv,
2148 struct ib_mad_qp_info *qp_info,
2151 struct ib_mad_private *recv,
2152 struct ib_mad_private *response,
2155 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2157 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2158 mad_hdr->class_version == OPA_SM_CLASS_VERSION)
2159 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2162 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2165 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2167 struct ib_mad_port_private *port_priv = cq->cq_context;
2168 struct ib_mad_list_head *mad_list =
2169 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2170 struct ib_mad_qp_info *qp_info;
2171 struct ib_mad_private_header *mad_priv_hdr;
2172 struct ib_mad_private *recv, *response = NULL;
2173 struct ib_mad_agent_private *mad_agent;
2175 int ret = IB_MAD_RESULT_SUCCESS;
2177 u16 resp_mad_pkey_index = 0;
2180 if (list_empty_careful(&port_priv->port_list))
2183 if (wc->status != IB_WC_SUCCESS) {
2185 * Receive errors indicate that the QP has entered the error
2186 * state - error handling/shutdown code will cleanup
2191 qp_info = mad_list->mad_queue->qp_info;
2192 dequeue_mad(mad_list);
2194 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2195 qp_info->port_priv->port_num);
2197 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2199 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2200 ib_dma_unmap_single(port_priv->device,
2201 recv->header.mapping,
2202 mad_priv_dma_size(recv),
2205 /* Setup MAD receive work completion from "normal" work completion */
2206 recv->header.wc = *wc;
2207 recv->header.recv_wc.wc = &recv->header.wc;
2209 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2210 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2211 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2213 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2214 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2217 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2218 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2220 if (atomic_read(&qp_info->snoop_count))
2221 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2224 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2227 mad_size = recv->mad_size;
2228 response = alloc_mad_private(mad_size, GFP_KERNEL);
2232 if (rdma_cap_ib_switch(port_priv->device))
2233 port_num = wc->port_num;
2235 port_num = port_priv->port_num;
2237 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2238 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2239 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2245 /* Give driver "right of first refusal" on incoming MAD */
2246 if (port_priv->device->process_mad) {
2247 ret = port_priv->device->process_mad(port_priv->device, 0,
2248 port_priv->port_num,
2250 (const struct ib_mad_hdr *)recv->mad,
2252 (struct ib_mad_hdr *)response->mad,
2253 &mad_size, &resp_mad_pkey_index);
2256 wc->pkey_index = resp_mad_pkey_index;
2258 if (ret & IB_MAD_RESULT_SUCCESS) {
2259 if (ret & IB_MAD_RESULT_CONSUMED)
2261 if (ret & IB_MAD_RESULT_REPLY) {
2262 agent_send_response((const struct ib_mad_hdr *)response->mad,
2266 qp_info->qp->qp_num,
2273 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2275 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2277 * recv is freed up in error cases in ib_mad_complete_recv
2278 * or via recv_handler in ib_mad_complete_recv()
2281 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2282 generate_unmatched_resp(recv, response, &mad_size, opa)) {
2283 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2284 port_priv->device, port_num,
2285 qp_info->qp->qp_num, mad_size, opa);
2289 /* Post another receive request for this QP */
2291 ib_mad_post_receive_mads(qp_info, response);
2294 ib_mad_post_receive_mads(qp_info, recv);
2297 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2299 struct ib_mad_send_wr_private *mad_send_wr;
2300 unsigned long delay;
2302 if (list_empty(&mad_agent_priv->wait_list)) {
2303 cancel_delayed_work(&mad_agent_priv->timed_work);
2305 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2306 struct ib_mad_send_wr_private,
2309 if (time_after(mad_agent_priv->timeout,
2310 mad_send_wr->timeout)) {
2311 mad_agent_priv->timeout = mad_send_wr->timeout;
2312 delay = mad_send_wr->timeout - jiffies;
2313 if ((long)delay <= 0)
2315 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2316 &mad_agent_priv->timed_work, delay);
2321 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2323 struct ib_mad_agent_private *mad_agent_priv;
2324 struct ib_mad_send_wr_private *temp_mad_send_wr;
2325 struct list_head *list_item;
2326 unsigned long delay;
2328 mad_agent_priv = mad_send_wr->mad_agent_priv;
2329 list_del(&mad_send_wr->agent_list);
2331 delay = mad_send_wr->timeout;
2332 mad_send_wr->timeout += jiffies;
2335 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2336 temp_mad_send_wr = list_entry(list_item,
2337 struct ib_mad_send_wr_private,
2339 if (time_after(mad_send_wr->timeout,
2340 temp_mad_send_wr->timeout))
2345 list_item = &mad_agent_priv->wait_list;
2346 list_add(&mad_send_wr->agent_list, list_item);
2348 /* Reschedule a work item if we have a shorter timeout */
2349 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2350 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2351 &mad_agent_priv->timed_work, delay);
2354 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2357 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2358 wait_for_response(mad_send_wr);
2362 * Process a send work completion
2364 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2365 struct ib_mad_send_wc *mad_send_wc)
2367 struct ib_mad_agent_private *mad_agent_priv;
2368 unsigned long flags;
2371 mad_agent_priv = mad_send_wr->mad_agent_priv;
2372 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2373 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2374 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2375 if (ret == IB_RMPP_RESULT_CONSUMED)
2378 ret = IB_RMPP_RESULT_UNHANDLED;
2380 if (mad_send_wc->status != IB_WC_SUCCESS &&
2381 mad_send_wr->status == IB_WC_SUCCESS) {
2382 mad_send_wr->status = mad_send_wc->status;
2383 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2386 if (--mad_send_wr->refcount > 0) {
2387 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2388 mad_send_wr->status == IB_WC_SUCCESS) {
2389 wait_for_response(mad_send_wr);
2394 /* Remove send from MAD agent and notify client of completion */
2395 list_del(&mad_send_wr->agent_list);
2396 adjust_timeout(mad_agent_priv);
2397 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2399 if (mad_send_wr->status != IB_WC_SUCCESS )
2400 mad_send_wc->status = mad_send_wr->status;
2401 if (ret == IB_RMPP_RESULT_INTERNAL)
2402 ib_rmpp_send_handler(mad_send_wc);
2404 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2407 /* Release reference on agent taken when sending */
2408 deref_mad_agent(mad_agent_priv);
2411 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2414 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2416 struct ib_mad_port_private *port_priv = cq->cq_context;
2417 struct ib_mad_list_head *mad_list =
2418 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2419 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2420 struct ib_mad_qp_info *qp_info;
2421 struct ib_mad_queue *send_queue;
2422 struct ib_send_wr *bad_send_wr;
2423 struct ib_mad_send_wc mad_send_wc;
2424 unsigned long flags;
2427 if (list_empty_careful(&port_priv->port_list))
2430 if (wc->status != IB_WC_SUCCESS) {
2431 if (!ib_mad_send_error(port_priv, wc))
2435 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2437 send_queue = mad_list->mad_queue;
2438 qp_info = send_queue->qp_info;
2441 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2442 mad_send_wr->header_mapping,
2443 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2444 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2445 mad_send_wr->payload_mapping,
2446 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2447 queued_send_wr = NULL;
2448 spin_lock_irqsave(&send_queue->lock, flags);
2449 list_del(&mad_list->list);
2451 /* Move queued send to the send queue */
2452 if (send_queue->count-- > send_queue->max_active) {
2453 mad_list = container_of(qp_info->overflow_list.next,
2454 struct ib_mad_list_head, list);
2455 queued_send_wr = container_of(mad_list,
2456 struct ib_mad_send_wr_private,
2458 list_move_tail(&mad_list->list, &send_queue->list);
2460 spin_unlock_irqrestore(&send_queue->lock, flags);
2462 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2463 mad_send_wc.status = wc->status;
2464 mad_send_wc.vendor_err = wc->vendor_err;
2465 if (atomic_read(&qp_info->snoop_count))
2466 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2467 IB_MAD_SNOOP_SEND_COMPLETIONS);
2468 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2470 if (queued_send_wr) {
2471 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2474 dev_err(&port_priv->device->dev,
2475 "ib_post_send failed: %d\n", ret);
2476 mad_send_wr = queued_send_wr;
2477 wc->status = IB_WC_LOC_QP_OP_ERR;
2483 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2485 struct ib_mad_send_wr_private *mad_send_wr;
2486 struct ib_mad_list_head *mad_list;
2487 unsigned long flags;
2489 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2490 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2491 mad_send_wr = container_of(mad_list,
2492 struct ib_mad_send_wr_private,
2494 mad_send_wr->retry = 1;
2496 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2499 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2502 struct ib_mad_list_head *mad_list =
2503 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2504 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2505 struct ib_mad_send_wr_private *mad_send_wr;
2509 * Send errors will transition the QP to SQE - move
2510 * QP to RTS and repost flushed work requests
2512 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2514 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2515 if (mad_send_wr->retry) {
2517 struct ib_send_wr *bad_send_wr;
2519 mad_send_wr->retry = 0;
2520 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2526 struct ib_qp_attr *attr;
2528 /* Transition QP to RTS and fail offending send */
2529 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2531 attr->qp_state = IB_QPS_RTS;
2532 attr->cur_qp_state = IB_QPS_SQE;
2533 ret = ib_modify_qp(qp_info->qp, attr,
2534 IB_QP_STATE | IB_QP_CUR_STATE);
2537 dev_err(&port_priv->device->dev,
2538 "%s - ib_modify_qp to RTS: %d\n",
2541 mark_sends_for_retry(qp_info);
2548 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2550 unsigned long flags;
2551 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2552 struct ib_mad_send_wc mad_send_wc;
2553 struct list_head cancel_list;
2555 INIT_LIST_HEAD(&cancel_list);
2557 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2558 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2559 &mad_agent_priv->send_list, agent_list) {
2560 if (mad_send_wr->status == IB_WC_SUCCESS) {
2561 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2562 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2566 /* Empty wait list to prevent receives from finding a request */
2567 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2568 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2570 /* Report all cancelled requests */
2571 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2572 mad_send_wc.vendor_err = 0;
2574 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2575 &cancel_list, agent_list) {
2576 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2577 list_del(&mad_send_wr->agent_list);
2578 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2580 atomic_dec(&mad_agent_priv->refcount);
2584 static struct ib_mad_send_wr_private*
2585 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2586 struct ib_mad_send_buf *send_buf)
2588 struct ib_mad_send_wr_private *mad_send_wr;
2590 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2592 if (&mad_send_wr->send_buf == send_buf)
2596 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2598 if (is_rmpp_data_mad(mad_agent_priv,
2599 mad_send_wr->send_buf.mad) &&
2600 &mad_send_wr->send_buf == send_buf)
2606 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2607 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2609 struct ib_mad_agent_private *mad_agent_priv;
2610 struct ib_mad_send_wr_private *mad_send_wr;
2611 unsigned long flags;
2614 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2616 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2617 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2618 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2619 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2623 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2625 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2626 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2629 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2631 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2633 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2635 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2638 EXPORT_SYMBOL(ib_modify_mad);
2640 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2641 struct ib_mad_send_buf *send_buf)
2643 ib_modify_mad(mad_agent, send_buf, 0);
2645 EXPORT_SYMBOL(ib_cancel_mad);
2647 static void local_completions(struct work_struct *work)
2649 struct ib_mad_agent_private *mad_agent_priv;
2650 struct ib_mad_local_private *local;
2651 struct ib_mad_agent_private *recv_mad_agent;
2652 unsigned long flags;
2655 struct ib_mad_send_wc mad_send_wc;
2659 container_of(work, struct ib_mad_agent_private, local_work);
2661 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2662 mad_agent_priv->qp_info->port_priv->port_num);
2664 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2665 while (!list_empty(&mad_agent_priv->local_list)) {
2666 local = list_entry(mad_agent_priv->local_list.next,
2667 struct ib_mad_local_private,
2669 list_del(&local->completion_list);
2670 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2672 if (local->mad_priv) {
2674 recv_mad_agent = local->recv_mad_agent;
2675 if (!recv_mad_agent) {
2676 dev_err(&mad_agent_priv->agent.device->dev,
2677 "No receive MAD agent for local completion\n");
2679 goto local_send_completion;
2683 * Defined behavior is to complete response
2686 build_smp_wc(recv_mad_agent->agent.qp,
2687 local->mad_send_wr->send_wr.wr.wr_cqe,
2688 be16_to_cpu(IB_LID_PERMISSIVE),
2689 local->mad_send_wr->send_wr.pkey_index,
2690 recv_mad_agent->agent.port_num, &wc);
2692 local->mad_priv->header.recv_wc.wc = &wc;
2694 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2695 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2696 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2697 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2699 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2700 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2703 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2704 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2705 &local->mad_priv->header.recv_wc.rmpp_list);
2706 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2707 local->mad_priv->header.recv_wc.recv_buf.mad =
2708 (struct ib_mad *)local->mad_priv->mad;
2709 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2710 snoop_recv(recv_mad_agent->qp_info,
2711 &local->mad_priv->header.recv_wc,
2712 IB_MAD_SNOOP_RECVS);
2713 recv_mad_agent->agent.recv_handler(
2714 &recv_mad_agent->agent,
2715 &local->mad_send_wr->send_buf,
2716 &local->mad_priv->header.recv_wc);
2717 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2718 atomic_dec(&recv_mad_agent->refcount);
2719 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2722 local_send_completion:
2724 mad_send_wc.status = IB_WC_SUCCESS;
2725 mad_send_wc.vendor_err = 0;
2726 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2727 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2728 snoop_send(mad_agent_priv->qp_info,
2729 &local->mad_send_wr->send_buf,
2730 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2731 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2734 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2735 atomic_dec(&mad_agent_priv->refcount);
2737 kfree(local->mad_priv);
2740 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2743 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2747 if (!mad_send_wr->retries_left)
2750 mad_send_wr->retries_left--;
2751 mad_send_wr->send_buf.retries++;
2753 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2755 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2756 ret = ib_retry_rmpp(mad_send_wr);
2758 case IB_RMPP_RESULT_UNHANDLED:
2759 ret = ib_send_mad(mad_send_wr);
2761 case IB_RMPP_RESULT_CONSUMED:
2769 ret = ib_send_mad(mad_send_wr);
2772 mad_send_wr->refcount++;
2773 list_add_tail(&mad_send_wr->agent_list,
2774 &mad_send_wr->mad_agent_priv->send_list);
2779 static void timeout_sends(struct work_struct *work)
2781 struct ib_mad_agent_private *mad_agent_priv;
2782 struct ib_mad_send_wr_private *mad_send_wr;
2783 struct ib_mad_send_wc mad_send_wc;
2784 unsigned long flags, delay;
2786 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2788 mad_send_wc.vendor_err = 0;
2790 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2791 while (!list_empty(&mad_agent_priv->wait_list)) {
2792 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2793 struct ib_mad_send_wr_private,
2796 if (time_after(mad_send_wr->timeout, jiffies)) {
2797 delay = mad_send_wr->timeout - jiffies;
2798 if ((long)delay <= 0)
2800 queue_delayed_work(mad_agent_priv->qp_info->
2802 &mad_agent_priv->timed_work, delay);
2806 list_del(&mad_send_wr->agent_list);
2807 if (mad_send_wr->status == IB_WC_SUCCESS &&
2808 !retry_send(mad_send_wr))
2811 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2813 if (mad_send_wr->status == IB_WC_SUCCESS)
2814 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2816 mad_send_wc.status = mad_send_wr->status;
2817 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2818 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2821 atomic_dec(&mad_agent_priv->refcount);
2822 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2824 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2828 * Allocate receive MADs and post receive WRs for them
2830 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2831 struct ib_mad_private *mad)
2833 unsigned long flags;
2835 struct ib_mad_private *mad_priv;
2836 struct ib_sge sg_list;
2837 struct ib_recv_wr recv_wr, *bad_recv_wr;
2838 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2840 /* Initialize common scatter list fields */
2841 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2843 /* Initialize common receive WR fields */
2844 recv_wr.next = NULL;
2845 recv_wr.sg_list = &sg_list;
2846 recv_wr.num_sge = 1;
2849 /* Allocate and map receive buffer */
2854 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2861 sg_list.length = mad_priv_dma_size(mad_priv);
2862 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2864 mad_priv_dma_size(mad_priv),
2866 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2871 mad_priv->header.mapping = sg_list.addr;
2872 mad_priv->header.mad_list.mad_queue = recv_queue;
2873 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2874 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2876 /* Post receive WR */
2877 spin_lock_irqsave(&recv_queue->lock, flags);
2878 post = (++recv_queue->count < recv_queue->max_active);
2879 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2880 spin_unlock_irqrestore(&recv_queue->lock, flags);
2881 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2883 spin_lock_irqsave(&recv_queue->lock, flags);
2884 list_del(&mad_priv->header.mad_list.list);
2885 recv_queue->count--;
2886 spin_unlock_irqrestore(&recv_queue->lock, flags);
2887 ib_dma_unmap_single(qp_info->port_priv->device,
2888 mad_priv->header.mapping,
2889 mad_priv_dma_size(mad_priv),
2892 dev_err(&qp_info->port_priv->device->dev,
2893 "ib_post_recv failed: %d\n", ret);
2902 * Return all the posted receive MADs
2904 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2906 struct ib_mad_private_header *mad_priv_hdr;
2907 struct ib_mad_private *recv;
2908 struct ib_mad_list_head *mad_list;
2913 while (!list_empty(&qp_info->recv_queue.list)) {
2915 mad_list = list_entry(qp_info->recv_queue.list.next,
2916 struct ib_mad_list_head, list);
2917 mad_priv_hdr = container_of(mad_list,
2918 struct ib_mad_private_header,
2920 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2923 /* Remove from posted receive MAD list */
2924 list_del(&mad_list->list);
2926 ib_dma_unmap_single(qp_info->port_priv->device,
2927 recv->header.mapping,
2928 mad_priv_dma_size(recv),
2933 qp_info->recv_queue.count = 0;
2939 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2942 struct ib_qp_attr *attr;
2946 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2950 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2951 IB_DEFAULT_PKEY_FULL, &pkey_index);
2955 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2956 qp = port_priv->qp_info[i].qp;
2961 * PKey index for QP1 is irrelevant but
2962 * one is needed for the Reset to Init transition
2964 attr->qp_state = IB_QPS_INIT;
2965 attr->pkey_index = pkey_index;
2966 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2967 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2968 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2970 dev_err(&port_priv->device->dev,
2971 "Couldn't change QP%d state to INIT: %d\n",
2976 attr->qp_state = IB_QPS_RTR;
2977 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2979 dev_err(&port_priv->device->dev,
2980 "Couldn't change QP%d state to RTR: %d\n",
2985 attr->qp_state = IB_QPS_RTS;
2986 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2987 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2989 dev_err(&port_priv->device->dev,
2990 "Couldn't change QP%d state to RTS: %d\n",
2996 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2998 dev_err(&port_priv->device->dev,
2999 "Failed to request completion notification: %d\n",
3004 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3005 if (!port_priv->qp_info[i].qp)
3008 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3010 dev_err(&port_priv->device->dev,
3011 "Couldn't post receive WRs\n");
3020 static void qp_event_handler(struct ib_event *event, void *qp_context)
3022 struct ib_mad_qp_info *qp_info = qp_context;
3024 /* It's worse than that! He's dead, Jim! */
3025 dev_err(&qp_info->port_priv->device->dev,
3026 "Fatal error (%d) on MAD QP (%d)\n",
3027 event->event, qp_info->qp->qp_num);
3030 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3031 struct ib_mad_queue *mad_queue)
3033 mad_queue->qp_info = qp_info;
3034 mad_queue->count = 0;
3035 spin_lock_init(&mad_queue->lock);
3036 INIT_LIST_HEAD(&mad_queue->list);
3039 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3040 struct ib_mad_qp_info *qp_info)
3042 qp_info->port_priv = port_priv;
3043 init_mad_queue(qp_info, &qp_info->send_queue);
3044 init_mad_queue(qp_info, &qp_info->recv_queue);
3045 INIT_LIST_HEAD(&qp_info->overflow_list);
3046 spin_lock_init(&qp_info->snoop_lock);
3047 qp_info->snoop_table = NULL;
3048 qp_info->snoop_table_size = 0;
3049 atomic_set(&qp_info->snoop_count, 0);
3052 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3053 enum ib_qp_type qp_type)
3055 struct ib_qp_init_attr qp_init_attr;
3058 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3059 qp_init_attr.send_cq = qp_info->port_priv->cq;
3060 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3061 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3062 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3063 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3064 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3065 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3066 qp_init_attr.qp_type = qp_type;
3067 qp_init_attr.port_num = qp_info->port_priv->port_num;
3068 qp_init_attr.qp_context = qp_info;
3069 qp_init_attr.event_handler = qp_event_handler;
3070 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3071 if (IS_ERR(qp_info->qp)) {
3072 dev_err(&qp_info->port_priv->device->dev,
3073 "Couldn't create ib_mad QP%d\n",
3074 get_spl_qp_index(qp_type));
3075 ret = PTR_ERR(qp_info->qp);
3078 /* Use minimum queue sizes unless the CQ is resized */
3079 qp_info->send_queue.max_active = mad_sendq_size;
3080 qp_info->recv_queue.max_active = mad_recvq_size;
3087 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3092 ib_destroy_qp(qp_info->qp);
3093 kfree(qp_info->snoop_table);
3098 * Create the QP, PD, MR, and CQ if needed
3100 static int ib_mad_port_open(struct ib_device *device,
3104 struct ib_mad_port_private *port_priv;
3105 unsigned long flags;
3106 char name[sizeof "ib_mad123"];
3109 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3112 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3113 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3116 /* Create new device info */
3117 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3121 port_priv->device = device;
3122 port_priv->port_num = port_num;
3123 spin_lock_init(&port_priv->reg_lock);
3124 INIT_LIST_HEAD(&port_priv->agent_list);
3125 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3126 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3128 cq_size = mad_sendq_size + mad_recvq_size;
3129 has_smi = rdma_cap_ib_smi(device, port_num);
3133 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3135 if (IS_ERR(port_priv->cq)) {
3136 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3137 ret = PTR_ERR(port_priv->cq);
3141 port_priv->pd = ib_alloc_pd(device, 0);
3142 if (IS_ERR(port_priv->pd)) {
3143 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3144 ret = PTR_ERR(port_priv->pd);
3149 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3153 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3157 snprintf(name, sizeof name, "ib_mad%d", port_num);
3158 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3159 if (!port_priv->wq) {
3164 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3165 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3166 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3168 ret = ib_mad_port_start(port_priv);
3170 dev_err(&device->dev, "Couldn't start port\n");
3177 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3178 list_del_init(&port_priv->port_list);
3179 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3181 destroy_workqueue(port_priv->wq);
3183 destroy_mad_qp(&port_priv->qp_info[1]);
3185 destroy_mad_qp(&port_priv->qp_info[0]);
3187 ib_dealloc_pd(port_priv->pd);
3189 ib_free_cq(port_priv->cq);
3190 cleanup_recv_queue(&port_priv->qp_info[1]);
3191 cleanup_recv_queue(&port_priv->qp_info[0]);
3200 * If there are no classes using the port, free the port
3201 * resources (CQ, MR, PD, QP) and remove the port's info structure
3203 static int ib_mad_port_close(struct ib_device *device, int port_num)
3205 struct ib_mad_port_private *port_priv;
3206 unsigned long flags;
3208 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3209 port_priv = __ib_get_mad_port(device, port_num);
3210 if (port_priv == NULL) {
3211 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3212 dev_err(&device->dev, "Port %d not found\n", port_num);
3215 list_del_init(&port_priv->port_list);
3216 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3218 destroy_workqueue(port_priv->wq);
3219 destroy_mad_qp(&port_priv->qp_info[1]);
3220 destroy_mad_qp(&port_priv->qp_info[0]);
3221 ib_dealloc_pd(port_priv->pd);
3222 ib_free_cq(port_priv->cq);
3223 cleanup_recv_queue(&port_priv->qp_info[1]);
3224 cleanup_recv_queue(&port_priv->qp_info[0]);
3225 /* XXX: Handle deallocation of MAD registration tables */
3232 static void ib_mad_init_device(struct ib_device *device)
3236 start = rdma_start_port(device);
3238 for (i = start; i <= rdma_end_port(device); i++) {
3239 if (!rdma_cap_ib_mad(device, i))
3242 if (ib_mad_port_open(device, i)) {
3243 dev_err(&device->dev, "Couldn't open port %d\n", i);
3246 if (ib_agent_port_open(device, i)) {
3247 dev_err(&device->dev,
3248 "Couldn't open port %d for agents\n", i);
3255 if (ib_mad_port_close(device, i))
3256 dev_err(&device->dev, "Couldn't close port %d\n", i);
3259 while (--i >= start) {
3260 if (!rdma_cap_ib_mad(device, i))
3263 if (ib_agent_port_close(device, i))
3264 dev_err(&device->dev,
3265 "Couldn't close port %d for agents\n", i);
3266 if (ib_mad_port_close(device, i))
3267 dev_err(&device->dev, "Couldn't close port %d\n", i);
3271 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3275 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3276 if (!rdma_cap_ib_mad(device, i))
3279 if (ib_agent_port_close(device, i))
3280 dev_err(&device->dev,
3281 "Couldn't close port %d for agents\n", i);
3282 if (ib_mad_port_close(device, i))
3283 dev_err(&device->dev, "Couldn't close port %d\n", i);
3287 static struct ib_client mad_client = {
3289 .add = ib_mad_init_device,
3290 .remove = ib_mad_remove_device
3293 int ib_mad_init(void)
3295 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3296 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3298 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3299 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3301 INIT_LIST_HEAD(&ib_mad_port_list);
3303 if (ib_register_client(&mad_client)) {
3304 pr_err("Couldn't register ib_mad client\n");
3311 void ib_mad_cleanup(void)
3313 ib_unregister_client(&mad_client);