if (!dev->ops.process_mad)
return -ENOSYS;
- in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
- out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kzalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad) {
ret = -ENOMEM;
goto out;
}
mutex_unlock(&dev->counters_table[port_num - 1].mutex);
if (stats_avail) {
- memset(out_mad->data, 0, sizeof out_mad->data);
switch (counter_stats.counter_mode & 0xf) {
case 0:
edit_counter(&counter_stats,
*out_mad_size != sizeof(*out_mad)))
return IB_MAD_RESULT_FAILURE;
- memset(out_mad->data, 0, sizeof(out_mad->data));
-
if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
{
struct ib_pma_portcounters *pma_cnt;
- memset(out_mad->data, 0, sizeof out_mad->data);
pma_cnt = (void *)(out_mad->data + 40);
ocrdma_update_stats(dev);
struct ib_cc_classportinfo_attr *p =
(struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
p->base_version = 1;
p->class_version = 1;
p->cap_mask = 0;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
p->congestion_info = 0;
p->control_table_cap = ppd->cc_max_table_entries;
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct ib_cc_congestion_entry_shadow *entries;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
spin_lock(&ppd->cc_shadow_lock);
entries = ppd->congestion_entries_shadow->entries;
if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
goto bail;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
spin_lock(&ppd->cc_shadow_lock);
max_cct_block =