2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
45 #include <linux/mlx4/device.h>
46 #include <linux/mlx4/doorbell.h>
52 MODULE_AUTHOR("Roland Dreier");
53 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
54 MODULE_LICENSE("Dual BSD/GPL");
55 MODULE_VERSION(DRV_VERSION);
57 struct workqueue_struct *mlx4_wq;
59 #ifdef CONFIG_MLX4_DEBUG
61 int mlx4_debug_level = 0;
62 module_param_named(debug_level, mlx4_debug_level, int, 0644);
63 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
65 #endif /* CONFIG_MLX4_DEBUG */
70 module_param(msi_x, int, 0444);
71 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
73 #else /* CONFIG_PCI_MSI */
77 #endif /* CONFIG_PCI_MSI */
80 module_param(num_vfs, int, 0444);
81 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
84 module_param(probe_vf, int, 0644);
85 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
87 int mlx4_log_num_mgm_entry_size = 10;
88 module_param_named(log_num_mgm_entry_size,
89 mlx4_log_num_mgm_entry_size, int, 0444);
90 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
91 " of qp per mcg, for example:"
92 " 10 gives 248.range: 9<="
93 " log_num_mgm_entry_size <= 12");
95 #define MLX4_VF (1 << 0)
97 #define HCA_GLOBAL_CAP_MASK 0
98 #define PF_CONTEXT_BEHAVIOUR_MASK 0
100 static char mlx4_version[] __devinitdata =
101 DRV_NAME ": Mellanox ConnectX core driver v"
102 DRV_VERSION " (" DRV_RELDATE ")\n";
104 static struct mlx4_profile default_profile = {
107 .rdmarc_per_qp = 1 << 4,
111 .num_mtt = 1 << 20, /* It is really num mtt segements */
114 static int log_num_mac = 7;
115 module_param_named(log_num_mac, log_num_mac, int, 0444);
116 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
118 static int log_num_vlan;
119 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
120 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
121 /* Log2 max number of VLANs per ETH port (0-7) */
122 #define MLX4_LOG_NUM_VLANS 7
124 static bool use_prio;
125 module_param_named(use_prio, use_prio, bool, 0444);
126 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
129 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
130 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
131 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
133 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
134 static int arr_argc = 2;
135 module_param_array(port_type_array, int, &arr_argc, 0444);
136 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
137 "1 for IB, 2 for Ethernet");
139 struct mlx4_port_config {
140 struct list_head list;
141 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
142 struct pci_dev *pdev;
145 static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
147 return dev->caps.reserved_eqs +
148 MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
151 int mlx4_check_port_params(struct mlx4_dev *dev,
152 enum mlx4_port_type *port_type)
156 for (i = 0; i < dev->caps.num_ports - 1; i++) {
157 if (port_type[i] != port_type[i + 1]) {
158 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
159 mlx4_err(dev, "Only same port types supported "
160 "on this HCA, aborting.\n");
163 if (port_type[i] == MLX4_PORT_TYPE_ETH &&
164 port_type[i + 1] == MLX4_PORT_TYPE_IB)
169 for (i = 0; i < dev->caps.num_ports; i++) {
170 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
171 mlx4_err(dev, "Requested port type for port %d is not "
172 "supported on this HCA\n", i + 1);
179 static void mlx4_set_port_mask(struct mlx4_dev *dev)
183 for (i = 1; i <= dev->caps.num_ports; ++i)
184 dev->caps.port_mask[i] = dev->caps.port_type[i];
187 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
192 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
194 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
198 if (dev_cap->min_page_sz > PAGE_SIZE) {
199 mlx4_err(dev, "HCA minimum page size of %d bigger than "
200 "kernel PAGE_SIZE of %ld, aborting.\n",
201 dev_cap->min_page_sz, PAGE_SIZE);
204 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
205 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
207 dev_cap->num_ports, MLX4_MAX_PORTS);
211 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
212 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
213 "PCI resource 2 size of 0x%llx, aborting.\n",
215 (unsigned long long) pci_resource_len(dev->pdev, 2));
219 dev->caps.num_ports = dev_cap->num_ports;
220 for (i = 1; i <= dev->caps.num_ports; ++i) {
221 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
222 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
223 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
224 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
225 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
226 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
227 dev->caps.def_mac[i] = dev_cap->def_mac[i];
228 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
229 dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
230 dev->caps.default_sense[i] = dev_cap->default_sense[i];
231 dev->caps.trans_type[i] = dev_cap->trans_type[i];
232 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
233 dev->caps.wavelength[i] = dev_cap->wavelength[i];
234 dev->caps.trans_code[i] = dev_cap->trans_code[i];
237 dev->caps.uar_page_size = PAGE_SIZE;
238 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
239 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
240 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
241 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
242 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
243 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
244 dev->caps.max_wqes = dev_cap->max_qp_sz;
245 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
246 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
247 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
248 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
249 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
250 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
251 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
253 * Subtract 1 from the limit because we need to allocate a
254 * spare CQE so the HCA HW can tell the difference between an
255 * empty CQ and a full CQ.
257 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
258 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
259 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
260 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
261 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
263 /* The first 128 UARs are used for EQ doorbells */
264 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
265 dev->caps.reserved_pds = dev_cap->reserved_pds;
266 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
267 dev_cap->reserved_xrcds : 0;
268 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
269 dev_cap->max_xrcds : 0;
270 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
272 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
273 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
274 dev->caps.flags = dev_cap->flags;
275 dev->caps.bmme_flags = dev_cap->bmme_flags;
276 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
277 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
278 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
280 /* Sense port always allowed on supported devices for ConnectX1 and 2 */
281 if (dev->pdev->device != 0x1003)
282 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
284 dev->caps.log_num_macs = log_num_mac;
285 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
286 dev->caps.log_num_prios = use_prio ? 3 : 0;
288 for (i = 1; i <= dev->caps.num_ports; ++i) {
289 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
290 if (dev->caps.supported_type[i]) {
291 /* if only ETH is supported - assign ETH */
292 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
293 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
294 /* if only IB is supported,
295 * assign IB only if SRIOV is off*/
296 else if (dev->caps.supported_type[i] ==
298 if (dev->flags & MLX4_FLAG_SRIOV)
299 dev->caps.port_type[i] =
302 dev->caps.port_type[i] =
304 /* if IB and ETH are supported,
305 * first of all check if SRIOV is on */
306 } else if (dev->flags & MLX4_FLAG_SRIOV)
307 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
309 /* In non-SRIOV mode, we set the port type
310 * according to user selection of port type,
311 * if usere selected none, take the FW hint */
312 if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE)
313 dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
314 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
316 dev->caps.port_type[i] = port_type_array[i-1];
320 * Link sensing is allowed on the port if 3 conditions are true:
321 * 1. Both protocols are supported on the port.
322 * 2. Different types are supported on the port
323 * 3. FW declared that it supports link sensing
325 mlx4_priv(dev)->sense.sense_allowed[i] =
326 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
327 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
328 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
331 * If "default_sense" bit is set, we move the port to "AUTO" mode
332 * and perform sense_port FW command to try and set the correct
333 * port type from beginning
335 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
336 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
337 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
338 mlx4_SENSE_PORT(dev, i, &sensed_port);
339 if (sensed_port != MLX4_PORT_TYPE_NONE)
340 dev->caps.port_type[i] = sensed_port;
342 dev->caps.possible_type[i] = dev->caps.port_type[i];
345 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
346 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
347 mlx4_warn(dev, "Requested number of MACs is too much "
348 "for port %d, reducing to %d.\n",
349 i, 1 << dev->caps.log_num_macs);
351 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
352 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
353 mlx4_warn(dev, "Requested number of VLANs is too much "
354 "for port %d, reducing to %d.\n",
355 i, 1 << dev->caps.log_num_vlans);
359 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
361 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
362 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
363 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
364 (1 << dev->caps.log_num_macs) *
365 (1 << dev->caps.log_num_vlans) *
366 (1 << dev->caps.log_num_prios) *
368 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
370 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
371 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
372 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
373 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
377 /*The function checks if there are live vf, return the num of them*/
378 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
380 struct mlx4_priv *priv = mlx4_priv(dev);
381 struct mlx4_slave_state *s_state;
385 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
386 s_state = &priv->mfunc.master.slave_state[i];
387 if (s_state->active && s_state->last_cmd !=
388 MLX4_COMM_CMD_RESET) {
389 mlx4_warn(dev, "%s: slave: %d is still active\n",
397 static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
399 struct mlx4_priv *priv = mlx4_priv(dev);
400 struct mlx4_slave_state *s_slave;
402 if (!mlx4_is_master(dev))
405 s_slave = &priv->mfunc.master.slave_state[slave];
406 return !!s_slave->active;
408 EXPORT_SYMBOL(mlx4_is_slave_active);
410 static int mlx4_slave_cap(struct mlx4_dev *dev)
414 struct mlx4_dev_cap dev_cap;
415 struct mlx4_func_cap func_cap;
416 struct mlx4_init_hca_param hca_param;
419 memset(&hca_param, 0, sizeof(hca_param));
420 err = mlx4_QUERY_HCA(dev, &hca_param);
422 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
426 /*fail if the hca has an unknown capability */
427 if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
428 HCA_GLOBAL_CAP_MASK) {
429 mlx4_err(dev, "Unknown hca global capabilities\n");
433 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
435 memset(&dev_cap, 0, sizeof(dev_cap));
436 err = mlx4_dev_cap(dev, &dev_cap);
438 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
442 page_size = ~dev->caps.page_size_cap + 1;
443 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
444 if (page_size > PAGE_SIZE) {
445 mlx4_err(dev, "HCA minimum page size of %d bigger than "
446 "kernel PAGE_SIZE of %ld, aborting.\n",
447 page_size, PAGE_SIZE);
451 /* slave gets uar page size from QUERY_HCA fw command */
452 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
454 /* TODO: relax this assumption */
455 if (dev->caps.uar_page_size != PAGE_SIZE) {
456 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
457 dev->caps.uar_page_size, PAGE_SIZE);
461 memset(&func_cap, 0, sizeof(func_cap));
462 err = mlx4_QUERY_FUNC_CAP(dev, &func_cap);
464 mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n");
468 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
469 PF_CONTEXT_BEHAVIOUR_MASK) {
470 mlx4_err(dev, "Unknown pf context behaviour\n");
474 dev->caps.num_ports = func_cap.num_ports;
475 dev->caps.num_qps = func_cap.qp_quota;
476 dev->caps.num_srqs = func_cap.srq_quota;
477 dev->caps.num_cqs = func_cap.cq_quota;
478 dev->caps.num_eqs = func_cap.max_eq;
479 dev->caps.reserved_eqs = func_cap.reserved_eq;
480 dev->caps.num_mpts = func_cap.mpt_quota;
481 dev->caps.num_mtts = func_cap.mtt_quota;
482 dev->caps.num_pds = MLX4_NUM_PDS;
483 dev->caps.num_mgms = 0;
484 dev->caps.num_amgms = 0;
486 for (i = 1; i <= dev->caps.num_ports; ++i)
487 dev->caps.port_mask[i] = dev->caps.port_type[i];
489 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
490 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
491 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
495 if (dev->caps.uar_page_size * (dev->caps.num_uars -
496 dev->caps.reserved_uars) >
497 pci_resource_len(dev->pdev, 2)) {
498 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
499 "PCI resource 2 size of 0x%llx, aborting.\n",
500 dev->caps.uar_page_size * dev->caps.num_uars,
501 (unsigned long long) pci_resource_len(dev->pdev, 2));
506 mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
507 mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
508 dev->caps.num_uars, dev->caps.reserved_uars,
509 dev->caps.uar_page_size * dev->caps.num_uars,
510 pci_resource_len(dev->pdev, 2));
511 mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
512 dev->caps.reserved_eqs);
513 mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
514 dev->caps.num_pds, dev->caps.reserved_pds,
515 dev->caps.slave_pd_shift, dev->caps.pd_base);
521 * Change the port configuration of the device.
522 * Every user of this function must hold the port mutex.
524 int mlx4_change_port_types(struct mlx4_dev *dev,
525 enum mlx4_port_type *port_types)
531 for (port = 0; port < dev->caps.num_ports; port++) {
532 /* Change the port type only if the new type is different
533 * from the current, and not set to Auto */
534 if (port_types[port] != dev->caps.port_type[port + 1]) {
536 dev->caps.port_type[port + 1] = port_types[port];
540 mlx4_unregister_device(dev);
541 for (port = 1; port <= dev->caps.num_ports; port++) {
542 mlx4_CLOSE_PORT(dev, port);
543 err = mlx4_SET_PORT(dev, port);
545 mlx4_err(dev, "Failed to set port %d, "
550 mlx4_set_port_mask(dev);
551 err = mlx4_register_device(dev);
558 static ssize_t show_port_type(struct device *dev,
559 struct device_attribute *attr,
562 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
564 struct mlx4_dev *mdev = info->dev;
568 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
570 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
571 sprintf(buf, "auto (%s)\n", type);
573 sprintf(buf, "%s\n", type);
578 static ssize_t set_port_type(struct device *dev,
579 struct device_attribute *attr,
580 const char *buf, size_t count)
582 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
584 struct mlx4_dev *mdev = info->dev;
585 struct mlx4_priv *priv = mlx4_priv(mdev);
586 enum mlx4_port_type types[MLX4_MAX_PORTS];
587 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
591 if (!strcmp(buf, "ib\n"))
592 info->tmp_type = MLX4_PORT_TYPE_IB;
593 else if (!strcmp(buf, "eth\n"))
594 info->tmp_type = MLX4_PORT_TYPE_ETH;
595 else if (!strcmp(buf, "auto\n"))
596 info->tmp_type = MLX4_PORT_TYPE_AUTO;
598 mlx4_err(mdev, "%s is not supported port type\n", buf);
602 mlx4_stop_sense(mdev);
603 mutex_lock(&priv->port_mutex);
604 /* Possible type is always the one that was delivered */
605 mdev->caps.possible_type[info->port] = info->tmp_type;
607 for (i = 0; i < mdev->caps.num_ports; i++) {
608 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
609 mdev->caps.possible_type[i+1];
610 if (types[i] == MLX4_PORT_TYPE_AUTO)
611 types[i] = mdev->caps.port_type[i+1];
614 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
615 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
616 for (i = 1; i <= mdev->caps.num_ports; i++) {
617 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
618 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
624 mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
625 "Set only 'eth' or 'ib' for both ports "
626 "(should be the same)\n");
630 mlx4_do_sense_ports(mdev, new_types, types);
632 err = mlx4_check_port_params(mdev, new_types);
636 /* We are about to apply the changes after the configuration
637 * was verified, no need to remember the temporary types
639 for (i = 0; i < mdev->caps.num_ports; i++)
640 priv->port[i + 1].tmp_type = 0;
642 err = mlx4_change_port_types(mdev, new_types);
645 mlx4_start_sense(mdev);
646 mutex_unlock(&priv->port_mutex);
647 return err ? err : count;
650 static int mlx4_load_fw(struct mlx4_dev *dev)
652 struct mlx4_priv *priv = mlx4_priv(dev);
655 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
656 GFP_HIGHUSER | __GFP_NOWARN, 0);
657 if (!priv->fw.fw_icm) {
658 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
662 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
664 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
668 err = mlx4_RUN_FW(dev);
670 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
680 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
684 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
687 struct mlx4_priv *priv = mlx4_priv(dev);
691 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
693 ((u64) (MLX4_CMPT_TYPE_QP *
694 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
695 cmpt_entry_sz, dev->caps.num_qps,
696 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
701 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
703 ((u64) (MLX4_CMPT_TYPE_SRQ *
704 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
705 cmpt_entry_sz, dev->caps.num_srqs,
706 dev->caps.reserved_srqs, 0, 0);
710 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
712 ((u64) (MLX4_CMPT_TYPE_CQ *
713 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
714 cmpt_entry_sz, dev->caps.num_cqs,
715 dev->caps.reserved_cqs, 0, 0);
719 num_eqs = (mlx4_is_master(dev)) ?
720 roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
722 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
724 ((u64) (MLX4_CMPT_TYPE_EQ *
725 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
726 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
733 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
736 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
739 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
745 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
746 struct mlx4_init_hca_param *init_hca, u64 icm_size)
748 struct mlx4_priv *priv = mlx4_priv(dev);
753 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
755 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
759 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
760 (unsigned long long) icm_size >> 10,
761 (unsigned long long) aux_pages << 2);
763 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
764 GFP_HIGHUSER | __GFP_NOWARN, 0);
765 if (!priv->fw.aux_icm) {
766 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
770 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
772 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
776 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
778 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
783 num_eqs = (mlx4_is_master(dev)) ?
784 roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
786 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
787 init_hca->eqc_base, dev_cap->eqc_entry_sz,
788 num_eqs, num_eqs, 0, 0);
790 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
795 * Reserved MTT entries must be aligned up to a cacheline
796 * boundary, since the FW will write to them, while the driver
797 * writes to all other MTT entries. (The variable
798 * dev->caps.mtt_entry_sz below is really the MTT segment
799 * size, not the raw entry size)
801 dev->caps.reserved_mtts =
802 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
803 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
805 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
807 dev->caps.mtt_entry_sz,
809 dev->caps.reserved_mtts, 1, 0);
811 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
815 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
817 dev_cap->dmpt_entry_sz,
819 dev->caps.reserved_mrws, 1, 1);
821 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
825 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
827 dev_cap->qpc_entry_sz,
829 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
832 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
836 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
838 dev_cap->aux_entry_sz,
840 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
843 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
847 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
849 dev_cap->altc_entry_sz,
851 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
854 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
858 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
859 init_hca->rdmarc_base,
860 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
862 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
865 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
869 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
871 dev_cap->cqc_entry_sz,
873 dev->caps.reserved_cqs, 0, 0);
875 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
876 goto err_unmap_rdmarc;
879 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
881 dev_cap->srq_entry_sz,
883 dev->caps.reserved_srqs, 0, 0);
885 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
890 * It's not strictly required, but for simplicity just map the
891 * whole multicast group table now. The table isn't very big
892 * and it's a lot easier than trying to track ref counts.
894 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
896 mlx4_get_mgm_entry_size(dev),
897 dev->caps.num_mgms + dev->caps.num_amgms,
898 dev->caps.num_mgms + dev->caps.num_amgms,
901 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
908 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
911 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
914 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
917 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
920 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
923 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
926 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
929 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
932 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
935 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
936 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
937 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
938 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
941 mlx4_UNMAP_ICM_AUX(dev);
944 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
949 static void mlx4_free_icms(struct mlx4_dev *dev)
951 struct mlx4_priv *priv = mlx4_priv(dev);
953 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
954 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
955 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
956 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
957 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
958 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
959 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
960 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
961 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
962 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
963 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
964 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
965 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
966 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
968 mlx4_UNMAP_ICM_AUX(dev);
969 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
972 static void mlx4_slave_exit(struct mlx4_dev *dev)
974 struct mlx4_priv *priv = mlx4_priv(dev);
976 down(&priv->cmd.slave_sem);
977 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
978 mlx4_warn(dev, "Failed to close slave function.\n");
979 up(&priv->cmd.slave_sem);
982 static int map_bf_area(struct mlx4_dev *dev)
984 struct mlx4_priv *priv = mlx4_priv(dev);
985 resource_size_t bf_start;
986 resource_size_t bf_len;
989 bf_start = pci_resource_start(dev->pdev, 2) +
990 (dev->caps.num_uars << PAGE_SHIFT);
991 bf_len = pci_resource_len(dev->pdev, 2) -
992 (dev->caps.num_uars << PAGE_SHIFT);
993 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
994 if (!priv->bf_mapping)
1000 static void unmap_bf_area(struct mlx4_dev *dev)
1002 if (mlx4_priv(dev)->bf_mapping)
1003 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1006 static void mlx4_close_hca(struct mlx4_dev *dev)
1009 if (mlx4_is_slave(dev))
1010 mlx4_slave_exit(dev);
1012 mlx4_CLOSE_HCA(dev, 0);
1013 mlx4_free_icms(dev);
1015 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1019 static int mlx4_init_slave(struct mlx4_dev *dev)
1021 struct mlx4_priv *priv = mlx4_priv(dev);
1022 u64 dma = (u64) priv->mfunc.vhcr_dma;
1023 int num_of_reset_retries = NUM_OF_RESET_RETRIES;
1024 int ret_from_reset = 0;
1026 u32 cmd_channel_ver;
1028 down(&priv->cmd.slave_sem);
1029 priv->cmd.max_cmds = 1;
1030 mlx4_warn(dev, "Sending reset\n");
1031 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1033 /* if we are in the middle of flr the slave will try
1034 * NUM_OF_RESET_RETRIES times before leaving.*/
1035 if (ret_from_reset) {
1036 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1037 msleep(SLEEP_TIME_IN_RESET);
1038 while (ret_from_reset && num_of_reset_retries) {
1039 mlx4_warn(dev, "slave is currently in the"
1040 "middle of FLR. retrying..."
1042 (NUM_OF_RESET_RETRIES -
1043 num_of_reset_retries + 1));
1045 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
1047 num_of_reset_retries = num_of_reset_retries - 1;
1053 /* check the driver version - the slave I/F revision
1054 * must match the master's */
1055 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
1056 cmd_channel_ver = mlx4_comm_get_version();
1058 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1059 MLX4_COMM_GET_IF_REV(slave_read)) {
1060 mlx4_err(dev, "slave driver version is not supported"
1061 " by the master\n");
1065 mlx4_warn(dev, "Sending vhcr0\n");
1066 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
1069 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
1072 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
1075 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
1077 up(&priv->cmd.slave_sem);
1081 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
1082 up(&priv->cmd.slave_sem);
1086 static int mlx4_init_hca(struct mlx4_dev *dev)
1088 struct mlx4_priv *priv = mlx4_priv(dev);
1089 struct mlx4_adapter adapter;
1090 struct mlx4_dev_cap dev_cap;
1091 struct mlx4_mod_stat_cfg mlx4_cfg;
1092 struct mlx4_profile profile;
1093 struct mlx4_init_hca_param init_hca;
1097 if (!mlx4_is_slave(dev)) {
1098 err = mlx4_QUERY_FW(dev);
1101 mlx4_info(dev, "non-primary physical function, skipping.\n");
1103 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
1107 err = mlx4_load_fw(dev);
1109 mlx4_err(dev, "Failed to start FW, aborting.\n");
1113 mlx4_cfg.log_pg_sz_m = 1;
1114 mlx4_cfg.log_pg_sz = 0;
1115 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
1117 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
1119 err = mlx4_dev_cap(dev, &dev_cap);
1121 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
1125 profile = default_profile;
1127 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
1129 if ((long long) icm_size < 0) {
1134 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
1135 init_hca.uar_page_sz = PAGE_SHIFT - 12;
1137 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
1141 err = mlx4_INIT_HCA(dev, &init_hca);
1143 mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
1147 err = mlx4_init_slave(dev);
1149 mlx4_err(dev, "Failed to initialize slave\n");
1153 err = mlx4_slave_cap(dev);
1155 mlx4_err(dev, "Failed to obtain slave caps\n");
1160 if (map_bf_area(dev))
1161 mlx4_dbg(dev, "Failed to map blue flame area\n");
1163 /*Only the master set the ports, all the rest got it from it.*/
1164 if (!mlx4_is_slave(dev))
1165 mlx4_set_port_mask(dev);
1167 err = mlx4_QUERY_ADAPTER(dev, &adapter);
1169 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
1173 priv->eq_table.inta_pin = adapter.inta_pin;
1174 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
1179 mlx4_close_hca(dev);
1182 if (!mlx4_is_slave(dev))
1183 mlx4_free_icms(dev);
1186 if (!mlx4_is_slave(dev)) {
1188 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1195 static int mlx4_init_counters_table(struct mlx4_dev *dev)
1197 struct mlx4_priv *priv = mlx4_priv(dev);
1200 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1203 nent = dev->caps.max_counters;
1204 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
1207 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
1209 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
1212 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1214 struct mlx4_priv *priv = mlx4_priv(dev);
1216 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1219 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
1225 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
1227 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1229 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
1232 EXPORT_SYMBOL_GPL(mlx4_counter_free);
1234 static int mlx4_setup_hca(struct mlx4_dev *dev)
1236 struct mlx4_priv *priv = mlx4_priv(dev);
1239 __be32 ib_port_default_caps;
1241 err = mlx4_init_uar_table(dev);
1243 mlx4_err(dev, "Failed to initialize "
1244 "user access region table, aborting.\n");
1248 err = mlx4_uar_alloc(dev, &priv->driver_uar);
1250 mlx4_err(dev, "Failed to allocate driver access region, "
1252 goto err_uar_table_free;
1255 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
1257 mlx4_err(dev, "Couldn't map kernel access region, "
1263 err = mlx4_init_pd_table(dev);
1265 mlx4_err(dev, "Failed to initialize "
1266 "protection domain table, aborting.\n");
1270 err = mlx4_init_xrcd_table(dev);
1272 mlx4_err(dev, "Failed to initialize "
1273 "reliable connection domain table, aborting.\n");
1274 goto err_pd_table_free;
1277 err = mlx4_init_mr_table(dev);
1279 mlx4_err(dev, "Failed to initialize "
1280 "memory region table, aborting.\n");
1281 goto err_xrcd_table_free;
1284 err = mlx4_init_eq_table(dev);
1286 mlx4_err(dev, "Failed to initialize "
1287 "event queue table, aborting.\n");
1288 goto err_mr_table_free;
1291 err = mlx4_cmd_use_events(dev);
1293 mlx4_err(dev, "Failed to switch to event-driven "
1294 "firmware commands, aborting.\n");
1295 goto err_eq_table_free;
1298 err = mlx4_NOP(dev);
1300 if (dev->flags & MLX4_FLAG_MSI_X) {
1301 mlx4_warn(dev, "NOP command failed to generate MSI-X "
1302 "interrupt IRQ %d).\n",
1303 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1304 mlx4_warn(dev, "Trying again without MSI-X.\n");
1306 mlx4_err(dev, "NOP command failed to generate interrupt "
1307 "(IRQ %d), aborting.\n",
1308 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1309 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
1315 mlx4_dbg(dev, "NOP command IRQ test passed\n");
1317 err = mlx4_init_cq_table(dev);
1319 mlx4_err(dev, "Failed to initialize "
1320 "completion queue table, aborting.\n");
1324 err = mlx4_init_srq_table(dev);
1326 mlx4_err(dev, "Failed to initialize "
1327 "shared receive queue table, aborting.\n");
1328 goto err_cq_table_free;
1331 err = mlx4_init_qp_table(dev);
1333 mlx4_err(dev, "Failed to initialize "
1334 "queue pair table, aborting.\n");
1335 goto err_srq_table_free;
1338 if (!mlx4_is_slave(dev)) {
1339 err = mlx4_init_mcg_table(dev);
1341 mlx4_err(dev, "Failed to initialize "
1342 "multicast group table, aborting.\n");
1343 goto err_qp_table_free;
1347 err = mlx4_init_counters_table(dev);
1348 if (err && err != -ENOENT) {
1349 mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
1350 goto err_mcg_table_free;
1353 if (!mlx4_is_slave(dev)) {
1354 for (port = 1; port <= dev->caps.num_ports; port++) {
1355 ib_port_default_caps = 0;
1356 err = mlx4_get_port_ib_caps(dev, port,
1357 &ib_port_default_caps);
1359 mlx4_warn(dev, "failed to get port %d default "
1360 "ib capabilities (%d). Continuing "
1361 "with caps = 0\n", port, err);
1362 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1364 err = mlx4_SET_PORT(dev, port);
1366 mlx4_err(dev, "Failed to set port %d, aborting\n",
1368 goto err_counters_table_free;
1375 err_counters_table_free:
1376 mlx4_cleanup_counters_table(dev);
1379 mlx4_cleanup_mcg_table(dev);
1382 mlx4_cleanup_qp_table(dev);
1385 mlx4_cleanup_srq_table(dev);
1388 mlx4_cleanup_cq_table(dev);
1391 mlx4_cmd_use_polling(dev);
1394 mlx4_cleanup_eq_table(dev);
1397 mlx4_cleanup_mr_table(dev);
1399 err_xrcd_table_free:
1400 mlx4_cleanup_xrcd_table(dev);
1403 mlx4_cleanup_pd_table(dev);
1409 mlx4_uar_free(dev, &priv->driver_uar);
1412 mlx4_cleanup_uar_table(dev);
1416 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1418 struct mlx4_priv *priv = mlx4_priv(dev);
1419 struct msix_entry *entries;
1420 int nreq = min_t(int, dev->caps.num_ports *
1421 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
1422 + MSIX_LEGACY_SZ, MAX_MSIX);
1427 /* In multifunction mode each function gets 2 msi-X vectors
1428 * one for data path completions anf the other for asynch events
1429 * or command completions */
1430 if (mlx4_is_mfunc(dev)) {
1433 nreq = min_t(int, dev->caps.num_eqs -
1434 dev->caps.reserved_eqs, nreq);
1437 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
1441 for (i = 0; i < nreq; ++i)
1442 entries[i].entry = i;
1445 err = pci_enable_msix(dev->pdev, entries, nreq);
1447 /* Try again if at least 2 vectors are available */
1449 mlx4_info(dev, "Requested %d vectors, "
1450 "but only %d MSI-X vectors available, "
1451 "trying again\n", nreq, err);
1460 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
1461 /*Working in legacy mode , all EQ's shared*/
1462 dev->caps.comp_pool = 0;
1463 dev->caps.num_comp_vectors = nreq - 1;
1465 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
1466 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
1468 for (i = 0; i < nreq; ++i)
1469 priv->eq_table.eq[i].irq = entries[i].vector;
1471 dev->flags |= MLX4_FLAG_MSI_X;
1478 dev->caps.num_comp_vectors = 1;
1479 dev->caps.comp_pool = 0;
1481 for (i = 0; i < 2; ++i)
1482 priv->eq_table.eq[i].irq = dev->pdev->irq;
1485 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
1487 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
1492 if (!mlx4_is_slave(dev)) {
1493 INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
1494 mlx4_init_mac_table(dev, &info->mac_table);
1495 mlx4_init_vlan_table(dev, &info->vlan_table);
1497 dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
1498 (port - 1) * (1 << log_num_mac);
1501 sprintf(info->dev_name, "mlx4_port%d", port);
1502 info->port_attr.attr.name = info->dev_name;
1503 if (mlx4_is_mfunc(dev))
1504 info->port_attr.attr.mode = S_IRUGO;
1506 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
1507 info->port_attr.store = set_port_type;
1509 info->port_attr.show = show_port_type;
1510 sysfs_attr_init(&info->port_attr.attr);
1512 err = device_create_file(&dev->pdev->dev, &info->port_attr);
1514 mlx4_err(dev, "Failed to create file for port %d\n", port);
1521 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
1526 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1529 static int mlx4_init_steering(struct mlx4_dev *dev)
1531 struct mlx4_priv *priv = mlx4_priv(dev);
1532 int num_entries = dev->caps.num_ports;
1535 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
1539 for (i = 0; i < num_entries; i++) {
1540 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1541 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
1542 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
1544 INIT_LIST_HEAD(&priv->steer[i].high_prios);
1549 static void mlx4_clear_steering(struct mlx4_dev *dev)
1551 struct mlx4_priv *priv = mlx4_priv(dev);
1552 struct mlx4_steer_index *entry, *tmp_entry;
1553 struct mlx4_promisc_qp *pqp, *tmp_pqp;
1554 int num_entries = dev->caps.num_ports;
1557 for (i = 0; i < num_entries; i++) {
1558 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1559 list_for_each_entry_safe(pqp, tmp_pqp,
1560 &priv->steer[i].promisc_qps[j],
1562 list_del(&pqp->list);
1565 list_for_each_entry_safe(entry, tmp_entry,
1566 &priv->steer[i].steer_entries[j],
1568 list_del(&entry->list);
1569 list_for_each_entry_safe(pqp, tmp_pqp,
1572 list_del(&pqp->list);
1582 static int extended_func_num(struct pci_dev *pdev)
1584 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
1587 #define MLX4_OWNER_BASE 0x8069c
1588 #define MLX4_OWNER_SIZE 4
1590 static int mlx4_get_ownership(struct mlx4_dev *dev)
1592 void __iomem *owner;
1595 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
1598 mlx4_err(dev, "Failed to obtain ownership bit\n");
1607 static void mlx4_free_ownership(struct mlx4_dev *dev)
1609 void __iomem *owner;
1611 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
1614 mlx4_err(dev, "Failed to obtain ownership bit\n");
1622 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1624 struct mlx4_priv *priv;
1625 struct mlx4_dev *dev;
1629 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
1631 err = pci_enable_device(pdev);
1633 dev_err(&pdev->dev, "Cannot enable PCI device, "
1637 if (num_vfs > MLX4_MAX_NUM_VF) {
1638 printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
1639 num_vfs, MLX4_MAX_NUM_VF);
1645 if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
1646 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1647 dev_err(&pdev->dev, "Missing DCS, aborting."
1648 "(id == 0X%p, id->driver_data: 0x%lx,"
1649 " pci_resource_flags(pdev, 0):0x%lx)\n", id,
1650 id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
1652 goto err_disable_pdev;
1654 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
1655 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
1657 goto err_disable_pdev;
1660 err = pci_request_regions(pdev, DRV_NAME);
1662 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
1663 goto err_disable_pdev;
1666 pci_set_master(pdev);
1668 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1670 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
1671 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1673 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
1674 goto err_release_regions;
1677 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1679 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
1680 "consistent PCI DMA mask.\n");
1681 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1683 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
1685 goto err_release_regions;
1689 /* Allow large DMA segments, up to the firmware limit of 1 GB */
1690 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
1692 priv = kzalloc(sizeof *priv, GFP_KERNEL);
1694 dev_err(&pdev->dev, "Device struct alloc failed, "
1697 goto err_release_regions;
1702 INIT_LIST_HEAD(&priv->ctx_list);
1703 spin_lock_init(&priv->ctx_lock);
1705 mutex_init(&priv->port_mutex);
1707 INIT_LIST_HEAD(&priv->pgdir_list);
1708 mutex_init(&priv->pgdir_mutex);
1710 INIT_LIST_HEAD(&priv->bf_list);
1711 mutex_init(&priv->bf_mutex);
1713 dev->rev_id = pdev->revision;
1714 /* Detect if this device is a virtual function */
1715 if (id && id->driver_data & MLX4_VF) {
1716 /* When acting as pf, we normally skip vfs unless explicitly
1717 * requested to probe them. */
1718 if (num_vfs && extended_func_num(pdev) > probe_vf) {
1719 mlx4_warn(dev, "Skipping virtual function:%d\n",
1720 extended_func_num(pdev));
1724 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
1725 dev->flags |= MLX4_FLAG_SLAVE;
1727 /* We reset the device and enable SRIOV only for physical
1728 * devices. Try to claim ownership on the device;
1729 * if already taken, skip -- do not allow multiple PFs */
1730 err = mlx4_get_ownership(dev);
1735 mlx4_warn(dev, "Multiple PFs not yet supported."
1743 mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs);
1744 err = pci_enable_sriov(pdev, num_vfs);
1746 mlx4_err(dev, "Failed to enable sriov,"
1747 "continuing without sriov enabled"
1748 " (err = %d).\n", err);
1752 mlx4_warn(dev, "Running in master mode\n");
1753 dev->flags |= MLX4_FLAG_SRIOV |
1755 dev->num_vfs = num_vfs;
1760 * Now reset the HCA before we touch the PCI capabilities or
1761 * attempt a firmware command, since a boot ROM may have left
1762 * the HCA in an undefined state.
1764 err = mlx4_reset(dev);
1766 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
1772 if (mlx4_cmd_init(dev)) {
1773 mlx4_err(dev, "Failed to init command interface, aborting.\n");
1777 /* In slave functions, the communication channel must be initialized
1778 * before posting commands. Also, init num_slaves before calling
1780 if (mlx4_is_mfunc(dev)) {
1781 if (mlx4_is_master(dev))
1782 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
1784 dev->num_slaves = 0;
1785 if (mlx4_multi_func_init(dev)) {
1786 mlx4_err(dev, "Failed to init slave mfunc"
1787 " interface, aborting.\n");
1793 err = mlx4_init_hca(dev);
1795 if (err == -EACCES) {
1796 /* Not primary Physical function
1797 * Running in slave mode */
1798 mlx4_cmd_cleanup(dev);
1799 dev->flags |= MLX4_FLAG_SLAVE;
1800 dev->flags &= ~MLX4_FLAG_MASTER;
1806 /* In master functions, the communication channel must be initialized
1807 * after obtaining its address from fw */
1808 if (mlx4_is_master(dev)) {
1809 if (mlx4_multi_func_init(dev)) {
1810 mlx4_err(dev, "Failed to init master mfunc"
1811 "interface, aborting.\n");
1816 err = mlx4_alloc_eq_table(dev);
1818 goto err_master_mfunc;
1820 priv->msix_ctl.pool_bm = 0;
1821 spin_lock_init(&priv->msix_ctl.pool_lock);
1823 mlx4_enable_msi_x(dev);
1824 if ((mlx4_is_mfunc(dev)) &&
1825 !(dev->flags & MLX4_FLAG_MSI_X)) {
1826 mlx4_err(dev, "INTx is not supported in multi-function mode."
1831 if (!mlx4_is_slave(dev)) {
1832 err = mlx4_init_steering(dev);
1837 err = mlx4_setup_hca(dev);
1838 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
1839 !mlx4_is_mfunc(dev)) {
1840 dev->flags &= ~MLX4_FLAG_MSI_X;
1841 pci_disable_msix(pdev);
1842 err = mlx4_setup_hca(dev);
1848 for (port = 1; port <= dev->caps.num_ports; port++) {
1849 err = mlx4_init_port_info(dev, port);
1854 err = mlx4_register_device(dev);
1858 mlx4_sense_init(dev);
1859 mlx4_start_sense(dev);
1861 pci_set_drvdata(pdev, dev);
1866 for (--port; port >= 1; --port)
1867 mlx4_cleanup_port_info(&priv->port[port]);
1869 mlx4_cleanup_counters_table(dev);
1870 mlx4_cleanup_mcg_table(dev);
1871 mlx4_cleanup_qp_table(dev);
1872 mlx4_cleanup_srq_table(dev);
1873 mlx4_cleanup_cq_table(dev);
1874 mlx4_cmd_use_polling(dev);
1875 mlx4_cleanup_eq_table(dev);
1876 mlx4_cleanup_mr_table(dev);
1877 mlx4_cleanup_xrcd_table(dev);
1878 mlx4_cleanup_pd_table(dev);
1879 mlx4_cleanup_uar_table(dev);
1882 if (!mlx4_is_slave(dev))
1883 mlx4_clear_steering(dev);
1886 mlx4_free_eq_table(dev);
1889 if (mlx4_is_master(dev))
1890 mlx4_multi_func_cleanup(dev);
1893 if (dev->flags & MLX4_FLAG_MSI_X)
1894 pci_disable_msix(pdev);
1896 mlx4_close_hca(dev);
1899 if (mlx4_is_slave(dev))
1900 mlx4_multi_func_cleanup(dev);
1903 mlx4_cmd_cleanup(dev);
1906 if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV))
1907 pci_disable_sriov(pdev);
1910 if (!mlx4_is_slave(dev))
1911 mlx4_free_ownership(dev);
1916 err_release_regions:
1917 pci_release_regions(pdev);
1920 pci_disable_device(pdev);
1921 pci_set_drvdata(pdev, NULL);
1925 static int __devinit mlx4_init_one(struct pci_dev *pdev,
1926 const struct pci_device_id *id)
1928 printk_once(KERN_INFO "%s", mlx4_version);
1930 return __mlx4_init_one(pdev, id);
1933 static void mlx4_remove_one(struct pci_dev *pdev)
1935 struct mlx4_dev *dev = pci_get_drvdata(pdev);
1936 struct mlx4_priv *priv = mlx4_priv(dev);
1940 /* in SRIOV it is not allowed to unload the pf's
1941 * driver while there are alive vf's */
1942 if (mlx4_is_master(dev)) {
1943 if (mlx4_how_many_lives_vf(dev))
1944 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
1946 mlx4_stop_sense(dev);
1947 mlx4_unregister_device(dev);
1949 for (p = 1; p <= dev->caps.num_ports; p++) {
1950 mlx4_cleanup_port_info(&priv->port[p]);
1951 mlx4_CLOSE_PORT(dev, p);
1954 mlx4_cleanup_counters_table(dev);
1955 mlx4_cleanup_mcg_table(dev);
1956 mlx4_cleanup_qp_table(dev);
1957 mlx4_cleanup_srq_table(dev);
1958 mlx4_cleanup_cq_table(dev);
1959 mlx4_cmd_use_polling(dev);
1960 mlx4_cleanup_eq_table(dev);
1961 mlx4_cleanup_mr_table(dev);
1962 mlx4_cleanup_xrcd_table(dev);
1963 mlx4_cleanup_pd_table(dev);
1965 if (mlx4_is_master(dev))
1966 mlx4_free_resource_tracker(dev);
1969 mlx4_uar_free(dev, &priv->driver_uar);
1970 mlx4_cleanup_uar_table(dev);
1971 if (!mlx4_is_slave(dev))
1972 mlx4_clear_steering(dev);
1973 mlx4_free_eq_table(dev);
1974 if (mlx4_is_master(dev))
1975 mlx4_multi_func_cleanup(dev);
1976 mlx4_close_hca(dev);
1977 if (mlx4_is_slave(dev))
1978 mlx4_multi_func_cleanup(dev);
1979 mlx4_cmd_cleanup(dev);
1981 if (dev->flags & MLX4_FLAG_MSI_X)
1982 pci_disable_msix(pdev);
1983 if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) {
1984 mlx4_warn(dev, "Disabling sriov\n");
1985 pci_disable_sriov(pdev);
1988 if (!mlx4_is_slave(dev))
1989 mlx4_free_ownership(dev);
1991 pci_release_regions(pdev);
1992 pci_disable_device(pdev);
1993 pci_set_drvdata(pdev, NULL);
1997 int mlx4_restart_one(struct pci_dev *pdev)
1999 mlx4_remove_one(pdev);
2000 return __mlx4_init_one(pdev, NULL);
2003 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
2004 /* MT25408 "Hermon" SDR */
2005 { PCI_VDEVICE(MELLANOX, 0x6340), 0 },
2006 /* MT25408 "Hermon" DDR */
2007 { PCI_VDEVICE(MELLANOX, 0x634a), 0 },
2008 /* MT25408 "Hermon" QDR */
2009 { PCI_VDEVICE(MELLANOX, 0x6354), 0 },
2010 /* MT25408 "Hermon" DDR PCIe gen2 */
2011 { PCI_VDEVICE(MELLANOX, 0x6732), 0 },
2012 /* MT25408 "Hermon" QDR PCIe gen2 */
2013 { PCI_VDEVICE(MELLANOX, 0x673c), 0 },
2014 /* MT25408 "Hermon" EN 10GigE */
2015 { PCI_VDEVICE(MELLANOX, 0x6368), 0 },
2016 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
2017 { PCI_VDEVICE(MELLANOX, 0x6750), 0 },
2018 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
2019 { PCI_VDEVICE(MELLANOX, 0x6372), 0 },
2020 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
2021 { PCI_VDEVICE(MELLANOX, 0x675a), 0 },
2022 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
2023 { PCI_VDEVICE(MELLANOX, 0x6764), 0 },
2024 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
2025 { PCI_VDEVICE(MELLANOX, 0x6746), 0 },
2026 /* MT26478 ConnectX2 40GigE PCIe gen2 */
2027 { PCI_VDEVICE(MELLANOX, 0x676e), 0 },
2028 /* MT25400 Family [ConnectX-2 Virtual Function] */
2029 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
2030 /* MT27500 Family [ConnectX-3] */
2031 { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
2032 /* MT27500 Family [ConnectX-3 Virtual Function] */
2033 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
2034 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
2035 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
2036 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
2037 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
2038 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
2039 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
2040 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
2041 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
2042 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
2043 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
2044 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
2045 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
2049 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
2051 static struct pci_driver mlx4_driver = {
2053 .id_table = mlx4_pci_table,
2054 .probe = mlx4_init_one,
2055 .remove = __devexit_p(mlx4_remove_one)
2058 static int __init mlx4_verify_params(void)
2060 if ((log_num_mac < 0) || (log_num_mac > 7)) {
2061 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
2065 if (log_num_vlan != 0)
2066 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
2067 MLX4_LOG_NUM_VLANS);
2069 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
2070 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
2074 /* Check if module param for ports type has legal combination */
2075 if (port_type_array[0] == false && port_type_array[1] == true) {
2076 printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
2077 port_type_array[0] = true;
2083 static int __init mlx4_init(void)
2087 if (mlx4_verify_params())
2092 mlx4_wq = create_singlethread_workqueue("mlx4");
2096 ret = pci_register_driver(&mlx4_driver);
2097 return ret < 0 ? ret : 0;
2100 static void __exit mlx4_cleanup(void)
2102 pci_unregister_driver(&mlx4_driver);
2103 destroy_workqueue(mlx4_wq);
2106 module_init(mlx4_init);
2107 module_exit(mlx4_cleanup);