2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/gfp.h>
38 #include <rdma/ib_pma.h>
43 MLX4_IB_VENDOR_CLASS1 = 0x9,
44 MLX4_IB_VENDOR_CLASS2 = 0xa
47 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
48 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
49 void *in_mad, void *response_mad)
51 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
54 u32 in_modifier = port;
57 inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
58 if (IS_ERR(inmailbox))
59 return PTR_ERR(inmailbox);
60 inbox = inmailbox->buf;
62 outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
63 if (IS_ERR(outmailbox)) {
64 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
65 return PTR_ERR(outmailbox);
68 memcpy(inbox, in_mad, 256);
71 * Key check traps can't be generated unless we have in_wc to
72 * tell us where to send the trap.
74 if (ignore_mkey || !in_wc)
76 if (ignore_bkey || !in_wc)
92 memset(inbox + 256, 0, 256);
93 ext_info = inbox + 256;
95 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
96 ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
97 ext_info->sl = in_wc->sl << 4;
98 ext_info->g_path = in_wc->dlid_path_bits |
99 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
100 ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
103 memcpy(ext_info->grh, in_grh, 40);
107 in_modifier |= in_wc->slid << 16;
110 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma,
111 in_modifier, op_modifier,
112 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
116 memcpy(response_mad, outmailbox->buf, 256);
118 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
119 mlx4_free_cmd_mailbox(dev->dev, outmailbox);
124 static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
126 struct ib_ah *new_ah;
127 struct ib_ah_attr ah_attr;
129 if (!dev->send_agent[port_num - 1][0])
132 memset(&ah_attr, 0, sizeof ah_attr);
135 ah_attr.port_num = port_num;
137 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
142 spin_lock(&dev->sm_lock);
143 if (dev->sm_ah[port_num - 1])
144 ib_destroy_ah(dev->sm_ah[port_num - 1]);
145 dev->sm_ah[port_num - 1] = new_ah;
146 spin_unlock(&dev->sm_lock);
150 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
151 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
153 static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
156 struct ib_port_info *pinfo;
159 struct mlx4_ib_dev *dev = to_mdev(ibdev);
160 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
161 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
162 mad->mad_hdr.method == IB_MGMT_METHOD_SET)
163 switch (mad->mad_hdr.attr_id) {
164 case IB_SMP_ATTR_PORT_INFO:
165 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
166 lid = be16_to_cpu(pinfo->lid);
168 update_sm_ah(dev, port_num,
169 be16_to_cpu(pinfo->sm_lid),
170 pinfo->neighbormtu_mastersmsl & 0xf);
172 if (pinfo->clientrereg_resv_subnetto & 0x80)
173 mlx4_ib_dispatch_event(dev, port_num,
174 IB_EVENT_CLIENT_REREGISTER);
177 mlx4_ib_dispatch_event(dev, port_num,
178 IB_EVENT_LID_CHANGE);
181 case IB_SMP_ATTR_PKEY_TABLE:
182 mlx4_ib_dispatch_event(dev, port_num,
183 IB_EVENT_PKEY_CHANGE);
186 case IB_SMP_ATTR_GUID_INFO:
187 /* paravirtualized master's guid is guid 0 -- does not change */
188 if (!mlx4_is_master(dev->dev))
189 mlx4_ib_dispatch_event(dev, port_num,
190 IB_EVENT_GID_CHANGE);
197 static void node_desc_override(struct ib_device *dev,
200 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
201 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
202 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
203 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
204 spin_lock(&to_mdev(dev)->sm_lock);
205 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
206 spin_unlock(&to_mdev(dev)->sm_lock);
210 static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
212 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
213 struct ib_mad_send_buf *send_buf;
214 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
218 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
219 IB_MGMT_MAD_DATA, GFP_ATOMIC);
220 if (IS_ERR(send_buf))
223 * We rely here on the fact that MLX QPs don't use the
224 * address handle after the send is posted (this is
225 * wrong following the IB spec strictly, but we know
226 * it's OK for our devices).
228 spin_lock(&dev->sm_lock);
229 memcpy(send_buf->mad, mad, sizeof *mad);
230 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
231 ret = ib_post_send_mad(send_buf, NULL);
234 spin_unlock(&dev->sm_lock);
237 ib_free_send_mad(send_buf);
241 static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
242 struct ib_wc *in_wc, struct ib_grh *in_grh,
243 struct ib_mad *in_mad, struct ib_mad *out_mad)
245 u16 slid, prev_lid = 0;
247 struct ib_port_attr pattr;
249 if (in_wc && in_wc->qp->qp_num) {
250 pr_debug("received MAD: slid:%d sqpn:%d "
251 "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
252 in_wc->slid, in_wc->src_qp,
253 in_wc->dlid_path_bits,
256 in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
257 be16_to_cpu(in_mad->mad_hdr.attr_id));
258 if (in_wc->wc_flags & IB_WC_GRH) {
259 pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
260 be64_to_cpu(in_grh->sgid.global.subnet_prefix),
261 be64_to_cpu(in_grh->sgid.global.interface_id));
262 pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
263 be64_to_cpu(in_grh->dgid.global.subnet_prefix),
264 be64_to_cpu(in_grh->dgid.global.interface_id));
268 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
270 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
271 forward_trap(to_mdev(ibdev), port_num, in_mad);
272 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
275 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
276 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
277 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
278 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
279 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
280 return IB_MAD_RESULT_SUCCESS;
283 * Don't process SMInfo queries -- the SMA can't handle them.
285 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
286 return IB_MAD_RESULT_SUCCESS;
287 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
288 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
289 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
290 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
291 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
292 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
293 return IB_MAD_RESULT_SUCCESS;
295 return IB_MAD_RESULT_SUCCESS;
297 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
298 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
299 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
300 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
301 !ib_query_port(ibdev, port_num, &pattr))
302 prev_lid = pattr.lid;
304 err = mlx4_MAD_IFC(to_mdev(ibdev),
305 mad_flags & IB_MAD_IGNORE_MKEY,
306 mad_flags & IB_MAD_IGNORE_BKEY,
307 port_num, in_wc, in_grh, in_mad, out_mad);
309 return IB_MAD_RESULT_FAILURE;
311 if (!out_mad->mad_hdr.status) {
312 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
313 smp_snoop(ibdev, port_num, in_mad, prev_lid);
314 node_desc_override(ibdev, out_mad);
317 /* set return bit in status of directed route responses */
318 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
319 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
321 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
322 /* no response for trap repress */
323 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
325 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
328 static void edit_counter(struct mlx4_counter *cnt,
329 struct ib_pma_portcounters *pma_cnt)
331 pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
332 pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
333 pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
334 pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
337 static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
338 struct ib_wc *in_wc, struct ib_grh *in_grh,
339 struct ib_mad *in_mad, struct ib_mad *out_mad)
341 struct mlx4_cmd_mailbox *mailbox;
342 struct mlx4_ib_dev *dev = to_mdev(ibdev);
344 u32 inmod = dev->counters[port_num - 1] & 0xffff;
347 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
350 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
352 return IB_MAD_RESULT_FAILURE;
354 err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
355 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
358 err = IB_MAD_RESULT_FAILURE;
360 memset(out_mad->data, 0, sizeof out_mad->data);
361 mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
362 switch (mode & 0xf) {
364 edit_counter(mailbox->buf,
365 (void *)(out_mad->data + 40));
366 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
369 err = IB_MAD_RESULT_FAILURE;
373 mlx4_free_cmd_mailbox(dev->dev, mailbox);
378 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
379 struct ib_wc *in_wc, struct ib_grh *in_grh,
380 struct ib_mad *in_mad, struct ib_mad *out_mad)
382 switch (rdma_port_get_link_layer(ibdev, port_num)) {
383 case IB_LINK_LAYER_INFINIBAND:
384 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
385 in_grh, in_mad, out_mad);
386 case IB_LINK_LAYER_ETHERNET:
387 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
388 in_grh, in_mad, out_mad);
394 static void send_handler(struct ib_mad_agent *agent,
395 struct ib_mad_send_wc *mad_send_wc)
397 ib_free_send_mad(mad_send_wc->send_buf);
400 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
402 struct ib_mad_agent *agent;
405 enum rdma_link_layer ll;
407 for (p = 0; p < dev->num_ports; ++p) {
408 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
409 for (q = 0; q <= 1; ++q) {
410 if (ll == IB_LINK_LAYER_INFINIBAND) {
411 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
412 q ? IB_QPT_GSI : IB_QPT_SMI,
413 NULL, 0, send_handler,
416 ret = PTR_ERR(agent);
419 dev->send_agent[p][q] = agent;
421 dev->send_agent[p][q] = NULL;
428 for (p = 0; p < dev->num_ports; ++p)
429 for (q = 0; q <= 1; ++q)
430 if (dev->send_agent[p][q])
431 ib_unregister_mad_agent(dev->send_agent[p][q]);
436 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
438 struct ib_mad_agent *agent;
441 for (p = 0; p < dev->num_ports; ++p) {
442 for (q = 0; q <= 1; ++q) {
443 agent = dev->send_agent[p][q];
445 dev->send_agent[p][q] = NULL;
446 ib_unregister_mad_agent(agent);
451 ib_destroy_ah(dev->sm_ah[p]);
455 void handle_port_mgmt_change_event(struct work_struct *work)
457 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
458 struct mlx4_ib_dev *dev = ew->ib_dev;
459 struct mlx4_eqe *eqe = &(ew->ib_eqe);
460 u8 port = eqe->event.port_mgmt_change.port;
463 switch (eqe->subtype) {
464 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
465 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
467 /* Update the SM ah - This should be done before handling
468 the other changed attributes so that MADs can be sent to the SM */
469 if (changed_attr & MSTR_SM_CHANGE_MASK) {
470 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
471 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
472 update_sm_ah(dev, port, lid, sl);
475 /* Check if it is a lid change event */
476 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
477 mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE);
479 /* Generate GUID changed event */
480 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK)
481 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
483 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
484 mlx4_ib_dispatch_event(dev, port,
485 IB_EVENT_CLIENT_REREGISTER);
488 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
489 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
491 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
492 /* paravirtualized master's guid is guid 0 -- does not change */
493 if (!mlx4_is_master(dev->dev))
494 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
497 pr_warn("Unsupported subtype 0x%x for "
498 "Port Management Change event\n", eqe->subtype);
504 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
505 enum ib_event_type type)
507 struct ib_event event;
509 event.device = &dev->ib_dev;
510 event.element.port_num = port_num;
513 ib_dispatch_event(&event);