static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
u16 tail, u16 head)
{
- int idx;
+ u16 idx;
do {
idx = tail & (qp->sq.wqe_cnt - 1);
};
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
- int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
+ u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
void *in_mad, void *response_mad)
{
u8 op_modifier = 0;
int uuarn;
int err;
int i;
- int reqlen;
+ size_t reqlen;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
u64 off_mask;
u64 buf_off;
- page_size = 1 << page_shift;
+ page_size = (u64)1 << page_shift;
page_mask = page_size - 1;
buf_off = addr & page_mask;
off_size = page_size >> 6;
void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
- int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
+ u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
void *in_mad, void *response_mad);
struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
struct mlx5_ib_ah *ah);
case IB_WR_RDMA_WRITE_WITH_IMM:
set_raddr_seg(seg, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
- seg += sizeof(struct mlx5_wqe_raddr_seg);
+ seg += sizeof(struct mlx5_wqe_raddr_seg);
size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
break;
case IB_QPT_SMI:
case IB_QPT_GSI:
set_datagram_seg(seg, wr);
- seg += sizeof(struct mlx5_wqe_datagram_seg);
+ seg += sizeof(struct mlx5_wqe_datagram_seg);
size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
if (unlikely((seg == qend)))
seg = mlx5_get_send_wqe(qp, 0);
if (size <= max_direct) {
buf->nbufs = 1;
buf->npages = 1;
- buf->page_shift = get_order(size) + PAGE_SHIFT;
+ buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
size, &t, GFP_KERNEL);
if (!buf->direct.buf)
struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
struct mlx5_cmd_mailbox *next = msg->next;
int data_only;
- int offset = 0;
+ u32 offset = 0;
int dump_len;
data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
case MLX5_PORT_CHANGE_SUBTYPE_GUID:
case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
- dev->event(dev, port_subtype_event(eqe->sub_type), &port);
+ if (dev->event)
+ dev->event(dev, port_subtype_event(eqe->sub_type), &port);
break;
default:
mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
#include "mlx5_core.h"
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
- u16 opmod, int port)
+ u16 opmod, u8 port)
{
struct mlx5_mad_ifc_mbox_in *in = NULL;
struct mlx5_mad_ifc_mbox_out *out = NULL;
copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap);
- if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
+ if (dev->profile && dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
flags = be64_to_cpu(query_out->hca_cap.flags);
struct mlx5_pages_req {
struct mlx5_core_dev *dev;
- u32 func_id;
+ u16 func_id;
s32 npages;
struct work_struct work;
};
__be32 caps_31_0;
};
-int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps)
+int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
{
struct mlx5_reg_pcap in;
struct mlx5_reg_pcap out;
u8 syndrome;
};
-struct mlx5_eqe_dropped_packet {
-};
-
struct mlx5_eqe_port_state {
u8 reserved0[8];
u8 port;
struct mlx5_eqe_comp comp;
struct mlx5_eqe_qp_srq qp_srq;
struct mlx5_eqe_cq_err cq_err;
- struct mlx5_eqe_dropped_packet dp;
struct mlx5_eqe_port_state port;
struct mlx5_eqe_gpio gpio;
struct mlx5_eqe_congestion cong;
struct mlx5_buf_list *page_list;
int nbufs;
int npages;
- int page_shift;
int size;
+ u8 page_shift;
};
struct mlx5_eq {
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
- u16 opmod, int port);
+ u16 opmod, u8 port);
void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
-int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps);
+int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
struct mlx5_profile {
u64 mask;
- u32 log_max_qp;
+ u8 log_max_qp;
struct {
int size;
int limit;