2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
44 #include <linux/netdevice.h>
46 #include <linux/mlx4/device.h>
47 #include <linux/mlx4/doorbell.h>
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_VERSION(DRV_VERSION);
58 struct workqueue_struct *mlx4_wq;
60 #ifdef CONFIG_MLX4_DEBUG
62 int mlx4_debug_level = 0;
63 module_param_named(debug_level, mlx4_debug_level, int, 0644);
64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
66 #endif /* CONFIG_MLX4_DEBUG */
71 module_param(msi_x, int, 0444);
72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
74 #else /* CONFIG_PCI_MSI */
78 #endif /* CONFIG_PCI_MSI */
81 module_param(num_vfs, int, 0444);
82 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
85 module_param(probe_vf, int, 0644);
86 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
88 int mlx4_log_num_mgm_entry_size = 10;
89 module_param_named(log_num_mgm_entry_size,
90 mlx4_log_num_mgm_entry_size, int, 0444);
91 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
92 " of qp per mcg, for example:"
93 " 10 gives 248.range: 9<="
94 " log_num_mgm_entry_size <= 12");
96 #define MLX4_VF (1 << 0)
98 #define HCA_GLOBAL_CAP_MASK 0
99 #define PF_CONTEXT_BEHAVIOUR_MASK 0
101 static char mlx4_version[] __devinitdata =
102 DRV_NAME ": Mellanox ConnectX core driver v"
103 DRV_VERSION " (" DRV_RELDATE ")\n";
105 static struct mlx4_profile default_profile = {
108 .rdmarc_per_qp = 1 << 4,
112 .num_mtt = 1 << 20, /* It is really num mtt segements */
115 static int log_num_mac = 7;
116 module_param_named(log_num_mac, log_num_mac, int, 0444);
117 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
119 static int log_num_vlan;
120 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
121 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
122 /* Log2 max number of VLANs per ETH port (0-7) */
123 #define MLX4_LOG_NUM_VLANS 7
125 static bool use_prio;
126 module_param_named(use_prio, use_prio, bool, 0444);
127 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
130 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
131 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
132 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
134 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
135 static int arr_argc = 2;
136 module_param_array(port_type_array, int, &arr_argc, 0444);
137 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
138 "1 for IB, 2 for Ethernet");
140 struct mlx4_port_config {
141 struct list_head list;
142 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
143 struct pci_dev *pdev;
146 int mlx4_check_port_params(struct mlx4_dev *dev,
147 enum mlx4_port_type *port_type)
151 for (i = 0; i < dev->caps.num_ports - 1; i++) {
152 if (port_type[i] != port_type[i + 1]) {
153 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
154 mlx4_err(dev, "Only same port types supported "
155 "on this HCA, aborting.\n");
158 if (port_type[i] == MLX4_PORT_TYPE_ETH &&
159 port_type[i + 1] == MLX4_PORT_TYPE_IB)
164 for (i = 0; i < dev->caps.num_ports; i++) {
165 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
166 mlx4_err(dev, "Requested port type for port %d is not "
167 "supported on this HCA\n", i + 1);
174 static void mlx4_set_port_mask(struct mlx4_dev *dev)
178 for (i = 1; i <= dev->caps.num_ports; ++i)
179 dev->caps.port_mask[i] = dev->caps.port_type[i];
182 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
187 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
189 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
193 if (dev_cap->min_page_sz > PAGE_SIZE) {
194 mlx4_err(dev, "HCA minimum page size of %d bigger than "
195 "kernel PAGE_SIZE of %ld, aborting.\n",
196 dev_cap->min_page_sz, PAGE_SIZE);
199 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
200 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
202 dev_cap->num_ports, MLX4_MAX_PORTS);
206 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
207 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
208 "PCI resource 2 size of 0x%llx, aborting.\n",
210 (unsigned long long) pci_resource_len(dev->pdev, 2));
214 dev->caps.num_ports = dev_cap->num_ports;
215 dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM;
216 for (i = 1; i <= dev->caps.num_ports; ++i) {
217 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
218 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
219 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
220 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
221 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
222 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
223 dev->caps.def_mac[i] = dev_cap->def_mac[i];
224 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
225 dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
226 dev->caps.default_sense[i] = dev_cap->default_sense[i];
227 dev->caps.trans_type[i] = dev_cap->trans_type[i];
228 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
229 dev->caps.wavelength[i] = dev_cap->wavelength[i];
230 dev->caps.trans_code[i] = dev_cap->trans_code[i];
233 dev->caps.uar_page_size = PAGE_SIZE;
234 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
235 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
236 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
237 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
238 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
239 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
240 dev->caps.max_wqes = dev_cap->max_qp_sz;
241 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
242 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
243 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
244 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
245 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
246 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
247 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
249 * Subtract 1 from the limit because we need to allocate a
250 * spare CQE so the HCA HW can tell the difference between an
251 * empty CQ and a full CQ.
253 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
254 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
255 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
256 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
257 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
259 /* The first 128 UARs are used for EQ doorbells */
260 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
261 dev->caps.reserved_pds = dev_cap->reserved_pds;
262 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
263 dev_cap->reserved_xrcds : 0;
264 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
265 dev_cap->max_xrcds : 0;
266 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
268 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
269 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
270 dev->caps.flags = dev_cap->flags;
271 dev->caps.flags2 = dev_cap->flags2;
272 dev->caps.bmme_flags = dev_cap->bmme_flags;
273 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
274 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
275 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
276 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
278 /* Sense port always allowed on supported devices for ConnectX1 and 2 */
279 if (dev->pdev->device != 0x1003)
280 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
282 dev->caps.log_num_macs = log_num_mac;
283 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
284 dev->caps.log_num_prios = use_prio ? 3 : 0;
286 for (i = 1; i <= dev->caps.num_ports; ++i) {
287 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
288 if (dev->caps.supported_type[i]) {
289 /* if only ETH is supported - assign ETH */
290 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
291 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
292 /* if only IB is supported,
293 * assign IB only if SRIOV is off*/
294 else if (dev->caps.supported_type[i] ==
296 if (dev->flags & MLX4_FLAG_SRIOV)
297 dev->caps.port_type[i] =
300 dev->caps.port_type[i] =
302 /* if IB and ETH are supported,
303 * first of all check if SRIOV is on */
304 } else if (dev->flags & MLX4_FLAG_SRIOV)
305 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
307 /* In non-SRIOV mode, we set the port type
308 * according to user selection of port type,
309 * if usere selected none, take the FW hint */
310 if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE)
311 dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
312 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
314 dev->caps.port_type[i] = port_type_array[i-1];
318 * Link sensing is allowed on the port if 3 conditions are true:
319 * 1. Both protocols are supported on the port.
320 * 2. Different types are supported on the port
321 * 3. FW declared that it supports link sensing
323 mlx4_priv(dev)->sense.sense_allowed[i] =
324 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
325 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
326 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
329 * If "default_sense" bit is set, we move the port to "AUTO" mode
330 * and perform sense_port FW command to try and set the correct
331 * port type from beginning
333 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
334 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
335 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
336 mlx4_SENSE_PORT(dev, i, &sensed_port);
337 if (sensed_port != MLX4_PORT_TYPE_NONE)
338 dev->caps.port_type[i] = sensed_port;
340 dev->caps.possible_type[i] = dev->caps.port_type[i];
343 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
344 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
345 mlx4_warn(dev, "Requested number of MACs is too much "
346 "for port %d, reducing to %d.\n",
347 i, 1 << dev->caps.log_num_macs);
349 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
350 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
351 mlx4_warn(dev, "Requested number of VLANs is too much "
352 "for port %d, reducing to %d.\n",
353 i, 1 << dev->caps.log_num_vlans);
357 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
359 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
360 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
361 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
362 (1 << dev->caps.log_num_macs) *
363 (1 << dev->caps.log_num_vlans) *
364 (1 << dev->caps.log_num_prios) *
366 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
368 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
369 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
370 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
371 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
375 /*The function checks if there are live vf, return the num of them*/
376 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
378 struct mlx4_priv *priv = mlx4_priv(dev);
379 struct mlx4_slave_state *s_state;
383 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
384 s_state = &priv->mfunc.master.slave_state[i];
385 if (s_state->active && s_state->last_cmd !=
386 MLX4_COMM_CMD_RESET) {
387 mlx4_warn(dev, "%s: slave: %d is still active\n",
395 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
397 struct mlx4_priv *priv = mlx4_priv(dev);
398 struct mlx4_slave_state *s_slave;
400 if (!mlx4_is_master(dev))
403 s_slave = &priv->mfunc.master.slave_state[slave];
404 return !!s_slave->active;
406 EXPORT_SYMBOL(mlx4_is_slave_active);
408 static int mlx4_slave_cap(struct mlx4_dev *dev)
412 struct mlx4_dev_cap dev_cap;
413 struct mlx4_func_cap func_cap;
414 struct mlx4_init_hca_param hca_param;
417 memset(&hca_param, 0, sizeof(hca_param));
418 err = mlx4_QUERY_HCA(dev, &hca_param);
420 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
424 /*fail if the hca has an unknown capability */
425 if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
426 HCA_GLOBAL_CAP_MASK) {
427 mlx4_err(dev, "Unknown hca global capabilities\n");
431 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
433 memset(&dev_cap, 0, sizeof(dev_cap));
434 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
435 err = mlx4_dev_cap(dev, &dev_cap);
437 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
441 err = mlx4_QUERY_FW(dev);
443 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
445 page_size = ~dev->caps.page_size_cap + 1;
446 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
447 if (page_size > PAGE_SIZE) {
448 mlx4_err(dev, "HCA minimum page size of %d bigger than "
449 "kernel PAGE_SIZE of %ld, aborting.\n",
450 page_size, PAGE_SIZE);
454 /* slave gets uar page size from QUERY_HCA fw command */
455 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
457 /* TODO: relax this assumption */
458 if (dev->caps.uar_page_size != PAGE_SIZE) {
459 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
460 dev->caps.uar_page_size, PAGE_SIZE);
464 memset(&func_cap, 0, sizeof(func_cap));
465 err = mlx4_QUERY_FUNC_CAP(dev, &func_cap);
467 mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n");
471 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
472 PF_CONTEXT_BEHAVIOUR_MASK) {
473 mlx4_err(dev, "Unknown pf context behaviour\n");
477 dev->caps.num_ports = func_cap.num_ports;
478 dev->caps.num_qps = func_cap.qp_quota;
479 dev->caps.num_srqs = func_cap.srq_quota;
480 dev->caps.num_cqs = func_cap.cq_quota;
481 dev->caps.num_eqs = func_cap.max_eq;
482 dev->caps.reserved_eqs = func_cap.reserved_eq;
483 dev->caps.num_mpts = func_cap.mpt_quota;
484 dev->caps.num_mtts = func_cap.mtt_quota;
485 dev->caps.num_pds = MLX4_NUM_PDS;
486 dev->caps.num_mgms = 0;
487 dev->caps.num_amgms = 0;
489 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
490 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
491 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
495 for (i = 1; i <= dev->caps.num_ports; ++i)
496 dev->caps.port_mask[i] = dev->caps.port_type[i];
498 if (dev->caps.uar_page_size * (dev->caps.num_uars -
499 dev->caps.reserved_uars) >
500 pci_resource_len(dev->pdev, 2)) {
501 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
502 "PCI resource 2 size of 0x%llx, aborting.\n",
503 dev->caps.uar_page_size * dev->caps.num_uars,
504 (unsigned long long) pci_resource_len(dev->pdev, 2));
512 * Change the port configuration of the device.
513 * Every user of this function must hold the port mutex.
515 int mlx4_change_port_types(struct mlx4_dev *dev,
516 enum mlx4_port_type *port_types)
522 for (port = 0; port < dev->caps.num_ports; port++) {
523 /* Change the port type only if the new type is different
524 * from the current, and not set to Auto */
525 if (port_types[port] != dev->caps.port_type[port + 1])
529 mlx4_unregister_device(dev);
530 for (port = 1; port <= dev->caps.num_ports; port++) {
531 mlx4_CLOSE_PORT(dev, port);
532 dev->caps.port_type[port] = port_types[port - 1];
533 err = mlx4_SET_PORT(dev, port);
535 mlx4_err(dev, "Failed to set port %d, "
540 mlx4_set_port_mask(dev);
541 err = mlx4_register_device(dev);
548 static ssize_t show_port_type(struct device *dev,
549 struct device_attribute *attr,
552 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
554 struct mlx4_dev *mdev = info->dev;
558 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
560 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
561 sprintf(buf, "auto (%s)\n", type);
563 sprintf(buf, "%s\n", type);
568 static ssize_t set_port_type(struct device *dev,
569 struct device_attribute *attr,
570 const char *buf, size_t count)
572 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
574 struct mlx4_dev *mdev = info->dev;
575 struct mlx4_priv *priv = mlx4_priv(mdev);
576 enum mlx4_port_type types[MLX4_MAX_PORTS];
577 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
581 if (!strcmp(buf, "ib\n"))
582 info->tmp_type = MLX4_PORT_TYPE_IB;
583 else if (!strcmp(buf, "eth\n"))
584 info->tmp_type = MLX4_PORT_TYPE_ETH;
585 else if (!strcmp(buf, "auto\n"))
586 info->tmp_type = MLX4_PORT_TYPE_AUTO;
588 mlx4_err(mdev, "%s is not supported port type\n", buf);
592 mlx4_stop_sense(mdev);
593 mutex_lock(&priv->port_mutex);
594 /* Possible type is always the one that was delivered */
595 mdev->caps.possible_type[info->port] = info->tmp_type;
597 for (i = 0; i < mdev->caps.num_ports; i++) {
598 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
599 mdev->caps.possible_type[i+1];
600 if (types[i] == MLX4_PORT_TYPE_AUTO)
601 types[i] = mdev->caps.port_type[i+1];
604 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
605 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
606 for (i = 1; i <= mdev->caps.num_ports; i++) {
607 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
608 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
614 mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
615 "Set only 'eth' or 'ib' for both ports "
616 "(should be the same)\n");
620 mlx4_do_sense_ports(mdev, new_types, types);
622 err = mlx4_check_port_params(mdev, new_types);
626 /* We are about to apply the changes after the configuration
627 * was verified, no need to remember the temporary types
629 for (i = 0; i < mdev->caps.num_ports; i++)
630 priv->port[i + 1].tmp_type = 0;
632 err = mlx4_change_port_types(mdev, new_types);
635 mlx4_start_sense(mdev);
636 mutex_unlock(&priv->port_mutex);
637 return err ? err : count;
648 static inline int int_to_ibta_mtu(int mtu)
651 case 256: return IB_MTU_256;
652 case 512: return IB_MTU_512;
653 case 1024: return IB_MTU_1024;
654 case 2048: return IB_MTU_2048;
655 case 4096: return IB_MTU_4096;
660 static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
663 case IB_MTU_256: return 256;
664 case IB_MTU_512: return 512;
665 case IB_MTU_1024: return 1024;
666 case IB_MTU_2048: return 2048;
667 case IB_MTU_4096: return 4096;
672 static ssize_t show_port_ib_mtu(struct device *dev,
673 struct device_attribute *attr,
676 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
678 struct mlx4_dev *mdev = info->dev;
680 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
681 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
684 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
688 static ssize_t set_port_ib_mtu(struct device *dev,
689 struct device_attribute *attr,
690 const char *buf, size_t count)
692 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
694 struct mlx4_dev *mdev = info->dev;
695 struct mlx4_priv *priv = mlx4_priv(mdev);
696 int err, port, mtu, ibta_mtu = -1;
698 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
699 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
703 err = sscanf(buf, "%d", &mtu);
705 ibta_mtu = int_to_ibta_mtu(mtu);
707 if (err <= 0 || ibta_mtu < 0) {
708 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
712 mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
714 mlx4_stop_sense(mdev);
715 mutex_lock(&priv->port_mutex);
716 mlx4_unregister_device(mdev);
717 for (port = 1; port <= mdev->caps.num_ports; port++) {
718 mlx4_CLOSE_PORT(mdev, port);
719 err = mlx4_SET_PORT(mdev, port);
721 mlx4_err(mdev, "Failed to set port %d, "
726 err = mlx4_register_device(mdev);
728 mutex_unlock(&priv->port_mutex);
729 mlx4_start_sense(mdev);
730 return err ? err : count;
733 static int mlx4_load_fw(struct mlx4_dev *dev)
735 struct mlx4_priv *priv = mlx4_priv(dev);
738 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
739 GFP_HIGHUSER | __GFP_NOWARN, 0);
740 if (!priv->fw.fw_icm) {
741 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
745 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
747 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
751 err = mlx4_RUN_FW(dev);
753 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
763 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
767 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
770 struct mlx4_priv *priv = mlx4_priv(dev);
774 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
776 ((u64) (MLX4_CMPT_TYPE_QP *
777 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
778 cmpt_entry_sz, dev->caps.num_qps,
779 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
784 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
786 ((u64) (MLX4_CMPT_TYPE_SRQ *
787 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
788 cmpt_entry_sz, dev->caps.num_srqs,
789 dev->caps.reserved_srqs, 0, 0);
793 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
795 ((u64) (MLX4_CMPT_TYPE_CQ *
796 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
797 cmpt_entry_sz, dev->caps.num_cqs,
798 dev->caps.reserved_cqs, 0, 0);
802 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
804 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
806 ((u64) (MLX4_CMPT_TYPE_EQ *
807 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
808 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
815 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
818 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
821 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
827 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
828 struct mlx4_init_hca_param *init_hca, u64 icm_size)
830 struct mlx4_priv *priv = mlx4_priv(dev);
835 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
837 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
841 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
842 (unsigned long long) icm_size >> 10,
843 (unsigned long long) aux_pages << 2);
845 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
846 GFP_HIGHUSER | __GFP_NOWARN, 0);
847 if (!priv->fw.aux_icm) {
848 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
852 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
854 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
858 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
860 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
865 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
867 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
868 init_hca->eqc_base, dev_cap->eqc_entry_sz,
869 num_eqs, num_eqs, 0, 0);
871 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
876 * Reserved MTT entries must be aligned up to a cacheline
877 * boundary, since the FW will write to them, while the driver
878 * writes to all other MTT entries. (The variable
879 * dev->caps.mtt_entry_sz below is really the MTT segment
880 * size, not the raw entry size)
882 dev->caps.reserved_mtts =
883 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
884 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
886 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
888 dev->caps.mtt_entry_sz,
890 dev->caps.reserved_mtts, 1, 0);
892 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
896 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
898 dev_cap->dmpt_entry_sz,
900 dev->caps.reserved_mrws, 1, 1);
902 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
906 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
908 dev_cap->qpc_entry_sz,
910 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
913 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
917 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
919 dev_cap->aux_entry_sz,
921 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
924 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
928 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
930 dev_cap->altc_entry_sz,
932 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
935 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
939 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
940 init_hca->rdmarc_base,
941 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
943 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
946 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
950 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
952 dev_cap->cqc_entry_sz,
954 dev->caps.reserved_cqs, 0, 0);
956 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
957 goto err_unmap_rdmarc;
960 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
962 dev_cap->srq_entry_sz,
964 dev->caps.reserved_srqs, 0, 0);
966 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
971 * It's not strictly required, but for simplicity just map the
972 * whole multicast group table now. The table isn't very big
973 * and it's a lot easier than trying to track ref counts.
975 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
977 mlx4_get_mgm_entry_size(dev),
978 dev->caps.num_mgms + dev->caps.num_amgms,
979 dev->caps.num_mgms + dev->caps.num_amgms,
982 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
989 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
992 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
995 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
998 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1001 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1004 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1007 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1010 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1013 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1016 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1017 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1018 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1019 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1022 mlx4_UNMAP_ICM_AUX(dev);
1025 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1030 static void mlx4_free_icms(struct mlx4_dev *dev)
1032 struct mlx4_priv *priv = mlx4_priv(dev);
1034 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1035 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1036 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1037 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1038 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1039 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1040 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1041 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1042 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1043 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1044 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1045 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1046 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1047 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1049 mlx4_UNMAP_ICM_AUX(dev);
1050 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1053 static void mlx4_slave_exit(struct mlx4_dev *dev)
1055 struct mlx4_priv *priv = mlx4_priv(dev);
1057 down(&priv->cmd.slave_sem);
1058 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
1059 mlx4_warn(dev, "Failed to close slave function.\n");
1060 up(&priv->cmd.slave_sem);
1063 static int map_bf_area(struct mlx4_dev *dev)
1065 struct mlx4_priv *priv = mlx4_priv(dev);
1066 resource_size_t bf_start;
1067 resource_size_t bf_len;
1070 if (!dev->caps.bf_reg_size)
1073 bf_start = pci_resource_start(dev->pdev, 2) +
1074 (dev->caps.num_uars << PAGE_SHIFT);
1075 bf_len = pci_resource_len(dev->pdev, 2) -
1076 (dev->caps.num_uars << PAGE_SHIFT);
1077 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1078 if (!priv->bf_mapping)
1084 static void unmap_bf_area(struct mlx4_dev *dev)
1086 if (mlx4_priv(dev)->bf_mapping)
1087 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1090 static void mlx4_close_hca(struct mlx4_dev *dev)
1093 if (mlx4_is_slave(dev))
1094 mlx4_slave_exit(dev);
1096 mlx4_CLOSE_HCA(dev, 0);
1097 mlx4_free_icms(dev);
1099 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1103 static int mlx4_init_slave(struct mlx4_dev *dev)
1105 struct mlx4_priv *priv = mlx4_priv(dev);
1106 u64 dma = (u64) priv->mfunc.vhcr_dma;
1107 int num_of_reset_retries = NUM_OF_RESET_RETRIES;
1108 int ret_from_reset = 0;
1110 u32 cmd_channel_ver;
1112 down(&priv->cmd.slave_sem);
1113 priv->cmd.max_cmds = 1;
1114 mlx4_warn(dev, "Sending reset\n");
1115 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1117 /* if we are in the middle of flr the slave will try
1118 * NUM_OF_RESET_RETRIES times before leaving.*/
1119 if (ret_from_reset) {
1120 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1121 msleep(SLEEP_TIME_IN_RESET);
1122 while (ret_from_reset && num_of_reset_retries) {
1123 mlx4_warn(dev, "slave is currently in the"
1124 "middle of FLR. retrying..."
1126 (NUM_OF_RESET_RETRIES -
1127 num_of_reset_retries + 1));
1129 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
1131 num_of_reset_retries = num_of_reset_retries - 1;
1137 /* check the driver version - the slave I/F revision
1138 * must match the master's */
1139 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
1140 cmd_channel_ver = mlx4_comm_get_version();
1142 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1143 MLX4_COMM_GET_IF_REV(slave_read)) {
1144 mlx4_err(dev, "slave driver version is not supported"
1145 " by the master\n");
1149 mlx4_warn(dev, "Sending vhcr0\n");
1150 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
1153 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
1156 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
1159 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
1161 up(&priv->cmd.slave_sem);
1165 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
1166 up(&priv->cmd.slave_sem);
1170 static int mlx4_init_hca(struct mlx4_dev *dev)
1172 struct mlx4_priv *priv = mlx4_priv(dev);
1173 struct mlx4_adapter adapter;
1174 struct mlx4_dev_cap dev_cap;
1175 struct mlx4_mod_stat_cfg mlx4_cfg;
1176 struct mlx4_profile profile;
1177 struct mlx4_init_hca_param init_hca;
1181 if (!mlx4_is_slave(dev)) {
1182 err = mlx4_QUERY_FW(dev);
1185 mlx4_info(dev, "non-primary physical function, skipping.\n");
1187 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
1191 err = mlx4_load_fw(dev);
1193 mlx4_err(dev, "Failed to start FW, aborting.\n");
1197 mlx4_cfg.log_pg_sz_m = 1;
1198 mlx4_cfg.log_pg_sz = 0;
1199 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
1201 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
1203 err = mlx4_dev_cap(dev, &dev_cap);
1205 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
1209 profile = default_profile;
1211 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
1213 if ((long long) icm_size < 0) {
1218 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
1220 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
1221 init_hca.uar_page_sz = PAGE_SHIFT - 12;
1223 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
1227 err = mlx4_INIT_HCA(dev, &init_hca);
1229 mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
1233 err = mlx4_init_slave(dev);
1235 mlx4_err(dev, "Failed to initialize slave\n");
1239 err = mlx4_slave_cap(dev);
1241 mlx4_err(dev, "Failed to obtain slave caps\n");
1246 if (map_bf_area(dev))
1247 mlx4_dbg(dev, "Failed to map blue flame area\n");
1249 /*Only the master set the ports, all the rest got it from it.*/
1250 if (!mlx4_is_slave(dev))
1251 mlx4_set_port_mask(dev);
1253 err = mlx4_QUERY_ADAPTER(dev, &adapter);
1255 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
1259 priv->eq_table.inta_pin = adapter.inta_pin;
1260 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
1265 mlx4_close_hca(dev);
1268 if (!mlx4_is_slave(dev))
1269 mlx4_free_icms(dev);
1272 if (!mlx4_is_slave(dev)) {
1274 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1281 static int mlx4_init_counters_table(struct mlx4_dev *dev)
1283 struct mlx4_priv *priv = mlx4_priv(dev);
1286 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1289 nent = dev->caps.max_counters;
1290 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
1293 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
1295 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
1298 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1300 struct mlx4_priv *priv = mlx4_priv(dev);
1302 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1305 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
1312 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1317 if (mlx4_is_mfunc(dev)) {
1318 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
1319 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
1320 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1322 *idx = get_param_l(&out_param);
1326 return __mlx4_counter_alloc(dev, idx);
1328 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
1330 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1332 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
1336 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1340 if (mlx4_is_mfunc(dev)) {
1341 set_param_l(&in_param, idx);
1342 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
1343 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
1347 __mlx4_counter_free(dev, idx);
1349 EXPORT_SYMBOL_GPL(mlx4_counter_free);
1351 static int mlx4_setup_hca(struct mlx4_dev *dev)
1353 struct mlx4_priv *priv = mlx4_priv(dev);
1356 __be32 ib_port_default_caps;
1358 err = mlx4_init_uar_table(dev);
1360 mlx4_err(dev, "Failed to initialize "
1361 "user access region table, aborting.\n");
1365 err = mlx4_uar_alloc(dev, &priv->driver_uar);
1367 mlx4_err(dev, "Failed to allocate driver access region, "
1369 goto err_uar_table_free;
1372 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
1374 mlx4_err(dev, "Couldn't map kernel access region, "
1380 err = mlx4_init_pd_table(dev);
1382 mlx4_err(dev, "Failed to initialize "
1383 "protection domain table, aborting.\n");
1387 err = mlx4_init_xrcd_table(dev);
1389 mlx4_err(dev, "Failed to initialize "
1390 "reliable connection domain table, aborting.\n");
1391 goto err_pd_table_free;
1394 err = mlx4_init_mr_table(dev);
1396 mlx4_err(dev, "Failed to initialize "
1397 "memory region table, aborting.\n");
1398 goto err_xrcd_table_free;
1401 err = mlx4_init_eq_table(dev);
1403 mlx4_err(dev, "Failed to initialize "
1404 "event queue table, aborting.\n");
1405 goto err_mr_table_free;
1408 err = mlx4_cmd_use_events(dev);
1410 mlx4_err(dev, "Failed to switch to event-driven "
1411 "firmware commands, aborting.\n");
1412 goto err_eq_table_free;
1415 err = mlx4_NOP(dev);
1417 if (dev->flags & MLX4_FLAG_MSI_X) {
1418 mlx4_warn(dev, "NOP command failed to generate MSI-X "
1419 "interrupt IRQ %d).\n",
1420 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1421 mlx4_warn(dev, "Trying again without MSI-X.\n");
1423 mlx4_err(dev, "NOP command failed to generate interrupt "
1424 "(IRQ %d), aborting.\n",
1425 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1426 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
1432 mlx4_dbg(dev, "NOP command IRQ test passed\n");
1434 err = mlx4_init_cq_table(dev);
1436 mlx4_err(dev, "Failed to initialize "
1437 "completion queue table, aborting.\n");
1441 err = mlx4_init_srq_table(dev);
1443 mlx4_err(dev, "Failed to initialize "
1444 "shared receive queue table, aborting.\n");
1445 goto err_cq_table_free;
1448 err = mlx4_init_qp_table(dev);
1450 mlx4_err(dev, "Failed to initialize "
1451 "queue pair table, aborting.\n");
1452 goto err_srq_table_free;
1455 if (!mlx4_is_slave(dev)) {
1456 err = mlx4_init_mcg_table(dev);
1458 mlx4_err(dev, "Failed to initialize "
1459 "multicast group table, aborting.\n");
1460 goto err_qp_table_free;
1464 err = mlx4_init_counters_table(dev);
1465 if (err && err != -ENOENT) {
1466 mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
1467 goto err_mcg_table_free;
1470 if (!mlx4_is_slave(dev)) {
1471 for (port = 1; port <= dev->caps.num_ports; port++) {
1472 ib_port_default_caps = 0;
1473 err = mlx4_get_port_ib_caps(dev, port,
1474 &ib_port_default_caps);
1476 mlx4_warn(dev, "failed to get port %d default "
1477 "ib capabilities (%d). Continuing "
1478 "with caps = 0\n", port, err);
1479 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1481 if (mlx4_is_mfunc(dev))
1482 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
1484 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
1486 err = mlx4_SET_PORT(dev, port);
1488 mlx4_err(dev, "Failed to set port %d, aborting\n",
1490 goto err_counters_table_free;
1497 err_counters_table_free:
1498 mlx4_cleanup_counters_table(dev);
1501 mlx4_cleanup_mcg_table(dev);
1504 mlx4_cleanup_qp_table(dev);
1507 mlx4_cleanup_srq_table(dev);
1510 mlx4_cleanup_cq_table(dev);
1513 mlx4_cmd_use_polling(dev);
1516 mlx4_cleanup_eq_table(dev);
1519 mlx4_cleanup_mr_table(dev);
1521 err_xrcd_table_free:
1522 mlx4_cleanup_xrcd_table(dev);
1525 mlx4_cleanup_pd_table(dev);
1531 mlx4_uar_free(dev, &priv->driver_uar);
1534 mlx4_cleanup_uar_table(dev);
1538 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1540 struct mlx4_priv *priv = mlx4_priv(dev);
1541 struct msix_entry *entries;
1542 int nreq = min_t(int, dev->caps.num_ports *
1543 min_t(int, netif_get_num_default_rss_queues() + 1,
1544 MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
1549 /* In multifunction mode each function gets 2 msi-X vectors
1550 * one for data path completions anf the other for asynch events
1551 * or command completions */
1552 if (mlx4_is_mfunc(dev)) {
1555 nreq = min_t(int, dev->caps.num_eqs -
1556 dev->caps.reserved_eqs, nreq);
1559 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
1563 for (i = 0; i < nreq; ++i)
1564 entries[i].entry = i;
1567 err = pci_enable_msix(dev->pdev, entries, nreq);
1569 /* Try again if at least 2 vectors are available */
1571 mlx4_info(dev, "Requested %d vectors, "
1572 "but only %d MSI-X vectors available, "
1573 "trying again\n", nreq, err);
1582 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
1583 /*Working in legacy mode , all EQ's shared*/
1584 dev->caps.comp_pool = 0;
1585 dev->caps.num_comp_vectors = nreq - 1;
1587 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
1588 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
1590 for (i = 0; i < nreq; ++i)
1591 priv->eq_table.eq[i].irq = entries[i].vector;
1593 dev->flags |= MLX4_FLAG_MSI_X;
1600 dev->caps.num_comp_vectors = 1;
1601 dev->caps.comp_pool = 0;
1603 for (i = 0; i < 2; ++i)
1604 priv->eq_table.eq[i].irq = dev->pdev->irq;
1607 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
1609 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
1614 if (!mlx4_is_slave(dev)) {
1615 INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
1616 mlx4_init_mac_table(dev, &info->mac_table);
1617 mlx4_init_vlan_table(dev, &info->vlan_table);
1619 dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
1620 (port - 1) * (1 << log_num_mac);
1623 sprintf(info->dev_name, "mlx4_port%d", port);
1624 info->port_attr.attr.name = info->dev_name;
1625 if (mlx4_is_mfunc(dev))
1626 info->port_attr.attr.mode = S_IRUGO;
1628 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
1629 info->port_attr.store = set_port_type;
1631 info->port_attr.show = show_port_type;
1632 sysfs_attr_init(&info->port_attr.attr);
1634 err = device_create_file(&dev->pdev->dev, &info->port_attr);
1636 mlx4_err(dev, "Failed to create file for port %d\n", port);
1640 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
1641 info->port_mtu_attr.attr.name = info->dev_mtu_name;
1642 if (mlx4_is_mfunc(dev))
1643 info->port_mtu_attr.attr.mode = S_IRUGO;
1645 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
1646 info->port_mtu_attr.store = set_port_ib_mtu;
1648 info->port_mtu_attr.show = show_port_ib_mtu;
1649 sysfs_attr_init(&info->port_mtu_attr.attr);
1651 err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr);
1653 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
1654 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1661 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
1666 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1667 device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr);
1670 static int mlx4_init_steering(struct mlx4_dev *dev)
1672 struct mlx4_priv *priv = mlx4_priv(dev);
1673 int num_entries = dev->caps.num_ports;
1676 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
1680 for (i = 0; i < num_entries; i++)
1681 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1682 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
1683 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
1688 static void mlx4_clear_steering(struct mlx4_dev *dev)
1690 struct mlx4_priv *priv = mlx4_priv(dev);
1691 struct mlx4_steer_index *entry, *tmp_entry;
1692 struct mlx4_promisc_qp *pqp, *tmp_pqp;
1693 int num_entries = dev->caps.num_ports;
1696 for (i = 0; i < num_entries; i++) {
1697 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1698 list_for_each_entry_safe(pqp, tmp_pqp,
1699 &priv->steer[i].promisc_qps[j],
1701 list_del(&pqp->list);
1704 list_for_each_entry_safe(entry, tmp_entry,
1705 &priv->steer[i].steer_entries[j],
1707 list_del(&entry->list);
1708 list_for_each_entry_safe(pqp, tmp_pqp,
1711 list_del(&pqp->list);
1721 static int extended_func_num(struct pci_dev *pdev)
1723 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
1726 #define MLX4_OWNER_BASE 0x8069c
1727 #define MLX4_OWNER_SIZE 4
1729 static int mlx4_get_ownership(struct mlx4_dev *dev)
1731 void __iomem *owner;
1734 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
1737 mlx4_err(dev, "Failed to obtain ownership bit\n");
1746 static void mlx4_free_ownership(struct mlx4_dev *dev)
1748 void __iomem *owner;
1750 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
1753 mlx4_err(dev, "Failed to obtain ownership bit\n");
1761 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1763 struct mlx4_priv *priv;
1764 struct mlx4_dev *dev;
1768 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
1770 err = pci_enable_device(pdev);
1772 dev_err(&pdev->dev, "Cannot enable PCI device, "
1776 if (num_vfs > MLX4_MAX_NUM_VF) {
1777 printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
1778 num_vfs, MLX4_MAX_NUM_VF);
1784 if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
1785 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1786 dev_err(&pdev->dev, "Missing DCS, aborting."
1787 "(id == 0X%p, id->driver_data: 0x%lx,"
1788 " pci_resource_flags(pdev, 0):0x%lx)\n", id,
1789 id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
1791 goto err_disable_pdev;
1793 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
1794 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
1796 goto err_disable_pdev;
1799 err = pci_request_regions(pdev, DRV_NAME);
1801 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
1802 goto err_disable_pdev;
1805 pci_set_master(pdev);
1807 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1809 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
1810 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1812 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
1813 goto err_release_regions;
1816 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1818 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
1819 "consistent PCI DMA mask.\n");
1820 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1822 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
1824 goto err_release_regions;
1828 /* Allow large DMA segments, up to the firmware limit of 1 GB */
1829 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
1831 priv = kzalloc(sizeof *priv, GFP_KERNEL);
1833 dev_err(&pdev->dev, "Device struct alloc failed, "
1836 goto err_release_regions;
1841 INIT_LIST_HEAD(&priv->ctx_list);
1842 spin_lock_init(&priv->ctx_lock);
1844 mutex_init(&priv->port_mutex);
1846 INIT_LIST_HEAD(&priv->pgdir_list);
1847 mutex_init(&priv->pgdir_mutex);
1849 INIT_LIST_HEAD(&priv->bf_list);
1850 mutex_init(&priv->bf_mutex);
1852 dev->rev_id = pdev->revision;
1853 /* Detect if this device is a virtual function */
1854 if (id && id->driver_data & MLX4_VF) {
1855 /* When acting as pf, we normally skip vfs unless explicitly
1856 * requested to probe them. */
1857 if (num_vfs && extended_func_num(pdev) > probe_vf) {
1858 mlx4_warn(dev, "Skipping virtual function:%d\n",
1859 extended_func_num(pdev));
1863 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
1864 dev->flags |= MLX4_FLAG_SLAVE;
1866 /* We reset the device and enable SRIOV only for physical
1867 * devices. Try to claim ownership on the device;
1868 * if already taken, skip -- do not allow multiple PFs */
1869 err = mlx4_get_ownership(dev);
1874 mlx4_warn(dev, "Multiple PFs not yet supported."
1882 mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs);
1883 err = pci_enable_sriov(pdev, num_vfs);
1885 mlx4_err(dev, "Failed to enable sriov,"
1886 "continuing without sriov enabled"
1887 " (err = %d).\n", err);
1890 mlx4_warn(dev, "Running in master mode\n");
1891 dev->flags |= MLX4_FLAG_SRIOV |
1893 dev->num_vfs = num_vfs;
1898 * Now reset the HCA before we touch the PCI capabilities or
1899 * attempt a firmware command, since a boot ROM may have left
1900 * the HCA in an undefined state.
1902 err = mlx4_reset(dev);
1904 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
1910 if (mlx4_cmd_init(dev)) {
1911 mlx4_err(dev, "Failed to init command interface, aborting.\n");
1915 /* In slave functions, the communication channel must be initialized
1916 * before posting commands. Also, init num_slaves before calling
1918 if (mlx4_is_mfunc(dev)) {
1919 if (mlx4_is_master(dev))
1920 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
1922 dev->num_slaves = 0;
1923 if (mlx4_multi_func_init(dev)) {
1924 mlx4_err(dev, "Failed to init slave mfunc"
1925 " interface, aborting.\n");
1931 err = mlx4_init_hca(dev);
1933 if (err == -EACCES) {
1934 /* Not primary Physical function
1935 * Running in slave mode */
1936 mlx4_cmd_cleanup(dev);
1937 dev->flags |= MLX4_FLAG_SLAVE;
1938 dev->flags &= ~MLX4_FLAG_MASTER;
1944 /* In master functions, the communication channel must be initialized
1945 * after obtaining its address from fw */
1946 if (mlx4_is_master(dev)) {
1947 if (mlx4_multi_func_init(dev)) {
1948 mlx4_err(dev, "Failed to init master mfunc"
1949 "interface, aborting.\n");
1954 err = mlx4_alloc_eq_table(dev);
1956 goto err_master_mfunc;
1958 priv->msix_ctl.pool_bm = 0;
1959 mutex_init(&priv->msix_ctl.pool_lock);
1961 mlx4_enable_msi_x(dev);
1962 if ((mlx4_is_mfunc(dev)) &&
1963 !(dev->flags & MLX4_FLAG_MSI_X)) {
1964 mlx4_err(dev, "INTx is not supported in multi-function mode."
1969 if (!mlx4_is_slave(dev)) {
1970 err = mlx4_init_steering(dev);
1975 err = mlx4_setup_hca(dev);
1976 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
1977 !mlx4_is_mfunc(dev)) {
1978 dev->flags &= ~MLX4_FLAG_MSI_X;
1979 dev->caps.num_comp_vectors = 1;
1980 dev->caps.comp_pool = 0;
1981 pci_disable_msix(pdev);
1982 err = mlx4_setup_hca(dev);
1988 for (port = 1; port <= dev->caps.num_ports; port++) {
1989 err = mlx4_init_port_info(dev, port);
1994 err = mlx4_register_device(dev);
1998 mlx4_sense_init(dev);
1999 mlx4_start_sense(dev);
2001 pci_set_drvdata(pdev, dev);
2006 for (--port; port >= 1; --port)
2007 mlx4_cleanup_port_info(&priv->port[port]);
2009 mlx4_cleanup_counters_table(dev);
2010 mlx4_cleanup_mcg_table(dev);
2011 mlx4_cleanup_qp_table(dev);
2012 mlx4_cleanup_srq_table(dev);
2013 mlx4_cleanup_cq_table(dev);
2014 mlx4_cmd_use_polling(dev);
2015 mlx4_cleanup_eq_table(dev);
2016 mlx4_cleanup_mr_table(dev);
2017 mlx4_cleanup_xrcd_table(dev);
2018 mlx4_cleanup_pd_table(dev);
2019 mlx4_cleanup_uar_table(dev);
2022 if (!mlx4_is_slave(dev))
2023 mlx4_clear_steering(dev);
2026 mlx4_free_eq_table(dev);
2029 if (mlx4_is_master(dev))
2030 mlx4_multi_func_cleanup(dev);
2033 if (dev->flags & MLX4_FLAG_MSI_X)
2034 pci_disable_msix(pdev);
2036 mlx4_close_hca(dev);
2039 if (mlx4_is_slave(dev))
2040 mlx4_multi_func_cleanup(dev);
2043 mlx4_cmd_cleanup(dev);
2046 if (dev->flags & MLX4_FLAG_SRIOV)
2047 pci_disable_sriov(pdev);
2050 if (!mlx4_is_slave(dev))
2051 mlx4_free_ownership(dev);
2056 err_release_regions:
2057 pci_release_regions(pdev);
2060 pci_disable_device(pdev);
2061 pci_set_drvdata(pdev, NULL);
2065 static int __devinit mlx4_init_one(struct pci_dev *pdev,
2066 const struct pci_device_id *id)
2068 printk_once(KERN_INFO "%s", mlx4_version);
2070 return __mlx4_init_one(pdev, id);
2073 static void mlx4_remove_one(struct pci_dev *pdev)
2075 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2076 struct mlx4_priv *priv = mlx4_priv(dev);
2080 /* in SRIOV it is not allowed to unload the pf's
2081 * driver while there are alive vf's */
2082 if (mlx4_is_master(dev)) {
2083 if (mlx4_how_many_lives_vf(dev))
2084 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
2086 mlx4_stop_sense(dev);
2087 mlx4_unregister_device(dev);
2089 for (p = 1; p <= dev->caps.num_ports; p++) {
2090 mlx4_cleanup_port_info(&priv->port[p]);
2091 mlx4_CLOSE_PORT(dev, p);
2094 if (mlx4_is_master(dev))
2095 mlx4_free_resource_tracker(dev,
2096 RES_TR_FREE_SLAVES_ONLY);
2098 mlx4_cleanup_counters_table(dev);
2099 mlx4_cleanup_mcg_table(dev);
2100 mlx4_cleanup_qp_table(dev);
2101 mlx4_cleanup_srq_table(dev);
2102 mlx4_cleanup_cq_table(dev);
2103 mlx4_cmd_use_polling(dev);
2104 mlx4_cleanup_eq_table(dev);
2105 mlx4_cleanup_mr_table(dev);
2106 mlx4_cleanup_xrcd_table(dev);
2107 mlx4_cleanup_pd_table(dev);
2109 if (mlx4_is_master(dev))
2110 mlx4_free_resource_tracker(dev,
2111 RES_TR_FREE_STRUCTS_ONLY);
2114 mlx4_uar_free(dev, &priv->driver_uar);
2115 mlx4_cleanup_uar_table(dev);
2116 if (!mlx4_is_slave(dev))
2117 mlx4_clear_steering(dev);
2118 mlx4_free_eq_table(dev);
2119 if (mlx4_is_master(dev))
2120 mlx4_multi_func_cleanup(dev);
2121 mlx4_close_hca(dev);
2122 if (mlx4_is_slave(dev))
2123 mlx4_multi_func_cleanup(dev);
2124 mlx4_cmd_cleanup(dev);
2126 if (dev->flags & MLX4_FLAG_MSI_X)
2127 pci_disable_msix(pdev);
2128 if (dev->flags & MLX4_FLAG_SRIOV) {
2129 mlx4_warn(dev, "Disabling sriov\n");
2130 pci_disable_sriov(pdev);
2133 if (!mlx4_is_slave(dev))
2134 mlx4_free_ownership(dev);
2136 pci_release_regions(pdev);
2137 pci_disable_device(pdev);
2138 pci_set_drvdata(pdev, NULL);
2142 int mlx4_restart_one(struct pci_dev *pdev)
2144 mlx4_remove_one(pdev);
2145 return __mlx4_init_one(pdev, NULL);
2148 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
2149 /* MT25408 "Hermon" SDR */
2150 { PCI_VDEVICE(MELLANOX, 0x6340), 0 },
2151 /* MT25408 "Hermon" DDR */
2152 { PCI_VDEVICE(MELLANOX, 0x634a), 0 },
2153 /* MT25408 "Hermon" QDR */
2154 { PCI_VDEVICE(MELLANOX, 0x6354), 0 },
2155 /* MT25408 "Hermon" DDR PCIe gen2 */
2156 { PCI_VDEVICE(MELLANOX, 0x6732), 0 },
2157 /* MT25408 "Hermon" QDR PCIe gen2 */
2158 { PCI_VDEVICE(MELLANOX, 0x673c), 0 },
2159 /* MT25408 "Hermon" EN 10GigE */
2160 { PCI_VDEVICE(MELLANOX, 0x6368), 0 },
2161 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
2162 { PCI_VDEVICE(MELLANOX, 0x6750), 0 },
2163 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
2164 { PCI_VDEVICE(MELLANOX, 0x6372), 0 },
2165 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
2166 { PCI_VDEVICE(MELLANOX, 0x675a), 0 },
2167 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
2168 { PCI_VDEVICE(MELLANOX, 0x6764), 0 },
2169 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
2170 { PCI_VDEVICE(MELLANOX, 0x6746), 0 },
2171 /* MT26478 ConnectX2 40GigE PCIe gen2 */
2172 { PCI_VDEVICE(MELLANOX, 0x676e), 0 },
2173 /* MT25400 Family [ConnectX-2 Virtual Function] */
2174 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
2175 /* MT27500 Family [ConnectX-3] */
2176 { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
2177 /* MT27500 Family [ConnectX-3 Virtual Function] */
2178 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
2179 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
2180 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
2181 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
2182 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
2183 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
2184 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
2185 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
2186 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
2187 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
2188 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
2189 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
2190 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
2194 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
2196 static struct pci_driver mlx4_driver = {
2198 .id_table = mlx4_pci_table,
2199 .probe = mlx4_init_one,
2200 .remove = __devexit_p(mlx4_remove_one)
2203 static int __init mlx4_verify_params(void)
2205 if ((log_num_mac < 0) || (log_num_mac > 7)) {
2206 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
2210 if (log_num_vlan != 0)
2211 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
2212 MLX4_LOG_NUM_VLANS);
2214 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
2215 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
2219 /* Check if module param for ports type has legal combination */
2220 if (port_type_array[0] == false && port_type_array[1] == true) {
2221 printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
2222 port_type_array[0] = true;
2228 static int __init mlx4_init(void)
2232 if (mlx4_verify_params())
2237 mlx4_wq = create_singlethread_workqueue("mlx4");
2241 ret = pci_register_driver(&mlx4_driver);
2242 return ret < 0 ? ret : 0;
2245 static void __exit mlx4_cleanup(void)
2247 pci_unregister_driver(&mlx4_driver);
2248 destroy_workqueue(mlx4_wq);
2251 module_init(mlx4_init);
2252 module_exit(mlx4_cleanup);