int i;
for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
- slot->arch.gfn_track[i] = kvzalloc(npages *
- sizeof(*slot->arch.gfn_track[i]), GFP_KERNEL);
+ slot->arch.gfn_track[i] =
+ kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]),
+ GFP_KERNEL);
if (!slot->arch.gfn_track[i])
goto track_free;
}
slot->base_gfn, level) + 1;
slot->arch.rmap[i] =
- kvzalloc(lpages * sizeof(*slot->arch.rmap[i]), GFP_KERNEL);
+ kvcalloc(lpages, sizeof(*slot->arch.rmap[i]),
+ GFP_KERNEL);
if (!slot->arch.rmap[i])
goto out_free;
if (i == 0)
continue;
- linfo = kvzalloc(lpages * sizeof(*linfo), GFP_KERNEL);
+ linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL);
if (!linfo)
goto out_free;
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
- rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+ rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
+ sizeof(*rgb_user),
GFP_KERNEL);
if (!rgb_user)
goto rgb_user_alloc_fail;
- rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
+ rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_regamma),
GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
- axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
+ axix_x = kvcalloc(ramp->num_entries + 3, sizeof(*axix_x),
GFP_KERNEL);
if (!axix_x)
goto axix_x_alloc_fail;
- coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+ coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
+ GFP_KERNEL);
if (!coeff)
goto coeff_alloc_fail;
input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
- rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+ rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
+ sizeof(*rgb_user),
GFP_KERNEL);
if (!rgb_user)
goto rgb_user_alloc_fail;
- curve = kvzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
+ curve = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*curve),
GFP_KERNEL);
if (!curve)
goto curve_alloc_fail;
- axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
+ axix_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axix_x),
GFP_KERNEL);
if (!axix_x)
goto axix_x_alloc_fail;
- coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+ coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
+ GFP_KERNEL);
if (!coeff)
goto coeff_alloc_fail;
}
ret = true;
} else if (trans == TRANSFER_FUNCTION_PQ) {
- rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
- (MAX_HW_POINTS + _EXTRA_POINTS),
+ rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_regamma),
GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
kvfree(rgb_regamma);
} else if (trans == TRANSFER_FUNCTION_SRGB ||
trans == TRANSFER_FUNCTION_BT709) {
- rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
- (MAX_HW_POINTS + _EXTRA_POINTS),
+ rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_regamma),
GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
}
ret = true;
} else if (trans == TRANSFER_FUNCTION_PQ) {
- rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
- (MAX_HW_POINTS + _EXTRA_POINTS),
+ rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_degamma),
GFP_KERNEL);
if (!rgb_degamma)
goto rgb_degamma_alloc_fail;
kvfree(rgb_degamma);
} else if (trans == TRANSFER_FUNCTION_SRGB ||
trans == TRANSFER_FUNCTION_BT709) {
- rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
- (MAX_HW_POINTS + _EXTRA_POINTS),
+ rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+ sizeof(*rgb_degamma),
GFP_KERNEL);
if (!rgb_degamma)
goto rgb_degamma_alloc_fail;
pgt->sparse = sparse;
if (desc->type == PGD) {
- pgt->pde = kvzalloc(sizeof(*pgt->pde) * pten, GFP_KERNEL);
+ pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL);
if (!pgt->pde) {
kfree(pgt);
return NULL;
goto err_umem;
}
- in->pas = kvzalloc(sizeof(*in->pas) * ncont, GFP_KERNEL);
+ in->pas = kvcalloc(ncont, sizeof(*in->pas), GFP_KERNEL);
if (!in->pas) {
err = -ENOMEM;
goto err_umem;
}
mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
- in->pas = kvzalloc(sizeof(*in->pas) * srq->buf.npages, GFP_KERNEL);
+ in->pas = kvcalloc(srq->buf.npages, sizeof(*in->pas), GFP_KERNEL);
if (!in->pas) {
err = -ENOMEM;
goto err_buf;
return -E2BIG;
}
- v->validated_blocks = kvzalloc(BITS_TO_LONGS(v->data_blocks) *
- sizeof(unsigned long), GFP_KERNEL);
+ v->validated_blocks = kvcalloc(BITS_TO_LONGS(v->data_blocks),
+ sizeof(unsigned long),
+ GFP_KERNEL);
if (!v->validated_blocks) {
ti->error = "failed to allocate bitset for check_at_most_once";
return -ENOMEM;
for (i = 0; i < ctbl->clipt_size; ++i)
INIT_LIST_HEAD(&ctbl->hash_list[i]);
- cl_list = kvzalloc(clipt_size*sizeof(struct clip_entry), GFP_KERNEL);
+ cl_list = kvcalloc(clipt_size, sizeof(struct clip_entry), GFP_KERNEL);
if (!cl_list) {
kvfree(ctbl);
return NULL;
adapter->params.offload = 0;
}
- adapter->mps_encap = kvzalloc(sizeof(struct mps_encap_entry) *
- adapter->params.arch.mps_tcam_size,
+ adapter->mps_encap = kvcalloc(adapter->params.arch.mps_tcam_size,
+ sizeof(struct mps_encap_entry),
GFP_KERNEL);
if (!adapter->mps_encap)
dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n");
unsigned int bmap_size;
bmap_size = BITS_TO_LONGS(max_tids);
- link->tid_map = kvzalloc(sizeof(unsigned long) * bmap_size, GFP_KERNEL);
+ link->tid_map = kvcalloc(bmap_size, sizeof(unsigned long),
+ GFP_KERNEL);
if (!link->tid_map)
goto out_no_mem;
bitmap_zero(link->tid_map, max_tids);
return -EINVAL;
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
- table->icm = kvzalloc(num_icm * sizeof(*table->icm), GFP_KERNEL);
+ table->icm = kvcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL);
if (!table->icm)
return -ENOMEM;
table->virt = virt;
if (err)
goto out;
- conn->qp.rq.bufs = kvzalloc(sizeof(conn->qp.rq.bufs[0]) *
- conn->qp.rq.size, GFP_KERNEL);
+ conn->qp.rq.bufs = kvcalloc(conn->qp.rq.size,
+ sizeof(conn->qp.rq.bufs[0]),
+ GFP_KERNEL);
if (!conn->qp.rq.bufs) {
err = -ENOMEM;
goto err_wq;
}
- conn->qp.sq.bufs = kvzalloc(sizeof(conn->qp.sq.bufs[0]) *
- conn->qp.sq.size, GFP_KERNEL);
+ conn->qp.sq.bufs = kvcalloc(conn->qp.sq.size,
+ sizeof(conn->qp.sq.bufs[0]),
+ GFP_KERNEL);
if (!conn->qp.sq.bufs) {
err = -ENOMEM;
goto err_rq_bufs;
alink->id = id;
alink->parent = TC_H_ROOT;
alink->total_queues = alink->vnic->max_rx_rings;
- alink->qdiscs = kvzalloc(sizeof(*alink->qdiscs) * alink->total_queues,
+ alink->qdiscs = kvcalloc(alink->total_queues, sizeof(*alink->qdiscs),
GFP_KERNEL);
if (!alink->qdiscs) {
err = -ENOMEM;
* the array. */
if (items)
num_arrays++;
- q->pool = kvzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
+ q->pool = kvcalloc(num_arrays * max, sizeof(void *), GFP_KERNEL);
if (q->pool == NULL)
return -ENOMEM;
new_size = min_t(u32, BTF_MAX_TYPE,
btf->types_size + expand_by);
- new_types = kvzalloc(new_size * sizeof(*new_types),
+ new_types = kvcalloc(new_size, sizeof(*new_types),
GFP_KERNEL | __GFP_NOWARN);
if (!new_types)
return -ENOMEM;
u8 *visit_states = NULL;
/* +1 for btf_void */
- resolved_sizes = kvzalloc((nr_types + 1) * sizeof(*resolved_sizes),
+ resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
GFP_KERNEL | __GFP_NOWARN);
if (!resolved_sizes)
goto nomem;
- resolved_ids = kvzalloc((nr_types + 1) * sizeof(*resolved_ids),
+ resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
GFP_KERNEL | __GFP_NOWARN);
if (!resolved_ids)
goto nomem;
- visit_states = kvzalloc((nr_types + 1) * sizeof(*visit_states),
+ visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
GFP_KERNEL | __GFP_NOWARN);
if (!visit_states)
goto nomem;
struct page **pages;
nr_pages = gup->size / PAGE_SIZE;
- pages = kvzalloc(sizeof(void *) * nr_pages, GFP_KERNEL);
+ pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL);
if (!pages)
return -ENOMEM;
* as kvzalloc could trigger reclaim and get_swap_page,
* which can lock swap_slots_cache_mutex.
*/
- slots = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE,
+ slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
GFP_KERNEL);
if (!slots)
return -ENOMEM;
- slots_ret = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE,
+ slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
GFP_KERNEL);
if (!slots_ret) {
kvfree(slots);
unsigned int i, nr;
nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
- spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL);
+ spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
if (!spaces)
return -ENOMEM;
for (i = 0; i < nr; i++) {
p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
- cluster_info = kvzalloc(nr_cluster * sizeof(*cluster_info),
+ cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
GFP_KERNEL);
if (!cluster_info) {
error = -ENOMEM;
}
/* frontswap enabled? set up bit-per-page map for frontswap */
if (IS_ENABLED(CONFIG_FRONTSWAP))
- frontswap_map = kvzalloc(BITS_TO_LONGS(maxpages) * sizeof(long),
+ frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages),
+ sizeof(long),
GFP_KERNEL);
if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
return err;
if (!q->flows) {
- q->flows = kvzalloc(q->flows_cnt *
- sizeof(struct fq_codel_flow), GFP_KERNEL);
+ q->flows = kvcalloc(q->flows_cnt,
+ sizeof(struct fq_codel_flow),
+ GFP_KERNEL);
if (!q->flows)
return -ENOMEM;
- q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL);
+ q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
if (!q->backlogs)
return -ENOMEM;
for (i = 0; i < q->flows_cnt; i++) {
if (!q->hh_flows) {
/* Initialize heavy-hitter flow table. */
- q->hh_flows = kvzalloc(HH_FLOWS_CNT *
- sizeof(struct list_head), GFP_KERNEL);
+ q->hh_flows = kvcalloc(HH_FLOWS_CNT, sizeof(struct list_head),
+ GFP_KERNEL);
if (!q->hh_flows)
return -ENOMEM;
for (i = 0; i < HH_FLOWS_CNT; i++)
/* Initialize heavy-hitter filter arrays. */
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
- q->hhf_arrays[i] = kvzalloc(HHF_ARRAYS_LEN *
- sizeof(u32), GFP_KERNEL);
+ q->hhf_arrays[i] = kvcalloc(HHF_ARRAYS_LEN,
+ sizeof(u32),
+ GFP_KERNEL);
if (!q->hhf_arrays[i]) {
/* Note: hhf_destroy() will be called
* by our caller.