return -EINVAL;
if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
return -EFAULT;
- kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
+ kbuf = kmalloc_array(maxevents, sizeof(*kbuf), GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
fs = get_fs();
return -EINVAL;
if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
return -EFAULT;
- sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
+ sops = kmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
if (!sops)
return -ENOMEM;
err = 0;
#include "mm.h"
#ifdef CONFIG_ARM_LPAE
-#define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL)
+#define __pgd_alloc() kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL)
#define __pgd_free(pgd) kfree(pgd)
#else
#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
static int coverage_start(const union decode_item *table)
{
- coverage.base = kmalloc(MAX_COVERAGE_ENTRIES *
- sizeof(struct coverage_entry), GFP_KERNEL);
+ coverage.base = kmalloc_array(MAX_COVERAGE_ENTRIES,
+ sizeof(struct coverage_entry),
+ GFP_KERNEL);
coverage.num_entries = 0;
coverage.nesting = 0;
return table_iter(table, coverage_start_fn, &coverage);
/* - 3 - */
slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1;
slidx_pool.buffer =
- kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL);
+ kmalloc_array(slidx_pool.max_idx, sizeof(slidx_list_t),
+ GFP_KERNEL);
return slidx_pool.buffer ? 0 : -ENOMEM;
}
int cpu = smp_processor_id();
if (!ia64_idtrs[cpu]) {
- ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX *
- sizeof (struct ia64_tr_entry), GFP_KERNEL);
+ ia64_idtrs[cpu] = kmalloc_array(2 * IA64_TR_ALLOC_MAX,
+ sizeof(struct ia64_tr_entry),
+ GFP_KERNEL);
if (!ia64_idtrs[cpu])
return -ENOMEM;
}
{
int i;
- sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL);
+ sn_irq_lh = kmalloc_array(NR_IRQS, sizeof(struct list_head *),
+ GFP_KERNEL);
if (!sn_irq_lh)
panic("SN PCI INIT: Failed to allocate memory for PCI init\n");
* and if we try that first we are likely to not waste larger
* slabs of memory.
*/
- desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t),
- GFP_KERNEL|GFP_DMA);
+ desc_base = (u32)kmalloc_array(entries, sizeof(au1x_ddma_desc_t),
+ GFP_KERNEL|GFP_DMA);
if (desc_base == 0)
return 0;
new_blocks = max_blocks - info->max_blocks;
- block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_ATOMIC);
+ block = kmalloc_array(max_blocks, sizeof(rh_block_t), GFP_ATOMIC);
if (block == NULL)
return -ENOMEM;
if (ret)
goto out;
- ppc4xx_hsta_msi.irq_map = kmalloc(sizeof(int) * irq_count, GFP_KERNEL);
+ ppc4xx_hsta_msi.irq_map = kmalloc_array(irq_count, sizeof(int),
+ GFP_KERNEL);
if (!ppc4xx_hsta_msi.irq_map) {
ret = -ENOMEM;
goto out1;
if (type == PCI_CAP_ID_MSIX)
pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n");
- msi_data->msi_virqs = kmalloc((msi_irqs) * sizeof(int), GFP_KERNEL);
+ msi_data->msi_virqs = kmalloc_array(msi_irqs, sizeof(int), GFP_KERNEL);
if (!msi_data->msi_virqs)
return -ENOMEM;
#ifdef CONFIG_PM
/* allocate memory to save mpic state */
- mpic->save_data = kmalloc(mpic->num_sources * sizeof(*mpic->save_data),
- GFP_KERNEL);
+ mpic->save_data = kmalloc_array(mpic->num_sources,
+ sizeof(*mpic->save_data),
+ GFP_KERNEL);
BUG_ON(mpic->save_data == NULL);
#endif
get_online_cpus();
cpu_count = num_online_cpus();
- cpu_vec = kmalloc(sizeof(*cpu_vec) * num_possible_cpus(), GFP_KERNEL);
+ cpu_vec = kmalloc_array(num_possible_cpus(), sizeof(*cpu_vec),
+ GFP_KERNEL);
if (!cpu_vec)
goto fail_put_online_cpus;
/* Note: Diag 0c needs 8 byte alignment and real storage */
debug_entry_t ***areas;
int i, j;
- areas = kmalloc(nr_areas * sizeof(debug_entry_t **), GFP_KERNEL);
+ areas = kmalloc_array(nr_areas, sizeof(debug_entry_t **), GFP_KERNEL);
if (!areas)
goto fail_malloc_areas;
for (i = 0; i < nr_areas; i++) {
- areas[i] = kmalloc(pages_per_area * sizeof(debug_entry_t *), GFP_KERNEL);
+ areas[i] = kmalloc_array(pages_per_area,
+ sizeof(debug_entry_t *),
+ GFP_KERNEL);
if (!areas[i])
goto fail_malloc_areas2;
for (j = 0; j < pages_per_area; j++) {
j++;
j++;
- new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
+ new = kmalloc_array(j, sizeof(struct attribute *), GFP_KERNEL);
if (!new)
return NULL;
j = 0;
static int
dcss_set_subcodes(void)
{
- char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA);
+ char *name = kmalloc(8, GFP_KERNEL | GFP_DMA);
unsigned long rx, ry;
int rc;
if (!atomic_read(&nmi_active))
return 0;
- prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
+ prev_nmi_count = kmalloc_array(nr_cpu_ids, sizeof(unsigned int),
+ GFP_KERNEL);
if (!prev_nmi_count) {
err = -ENOMEM;
goto error;
unsigned long *p = current_thread_info()->utraps;
current_thread_info()->utraps =
- kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
- GFP_KERNEL);
+ kmalloc_array(UT_TRAP_INSTRUCTION_31 + 1,
+ sizeof(long),
+ GFP_KERNEL);
if (!current_thread_info()->utraps) {
current_thread_info()->utraps = p;
return -ENOMEM;
if (!bpf_jit_enable)
return;
- addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
+ addrs = kmalloc_array(flen, sizeof(*addrs), GFP_KERNEL);
if (addrs == NULL)
return;
return -1;
}
- irq_req_buffer = kmalloc(
- sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE,
- GFP_KERNEL
+ irq_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE,
+ sizeof(struct io_thread_req *),
+ GFP_KERNEL
);
irq_remainder = 0;
printk(KERN_ERR "Failed to initialize ubd buffering\n");
return -1;
}
- io_req_buffer = kmalloc(
- sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE,
- GFP_KERNEL
+ io_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE,
+ sizeof(struct io_thread_req *),
+ GFP_KERNEL
);
io_remainder = 0;
result->max_iov_frags = num_extra_frags;
for (i = 0; i < max_size; i++) {
if (vp->header_size > 0)
- iov = kmalloc(
- sizeof(struct iovec) * (3 + num_extra_frags),
- GFP_KERNEL
+ iov = kmalloc_array(3 + num_extra_frags,
+ sizeof(struct iovec),
+ GFP_KERNEL
);
else
- iov = kmalloc(
- sizeof(struct iovec) * (2 + num_extra_frags),
- GFP_KERNEL
+ iov = kmalloc_array(2 + num_extra_frags,
+ sizeof(struct iovec),
+ GFP_KERNEL
);
if (iov == NULL)
goto out_fail;
return -EINVAL;
}
- sleep_save = kmalloc(puv3_cpu_pm_fns->save_count
- * sizeof(unsigned long), GFP_KERNEL);
+ sleep_save = kmalloc_array(puv3_cpu_pm_fns->save_count,
+ sizeof(unsigned long),
+ GFP_KERNEL);
if (!sleep_save) {
printk(KERN_ERR "failed to alloc memory for pm save\n");
return -ENOMEM;
j++;
j++;
- new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
+ new = kmalloc_array(j, sizeof(struct attribute *), GFP_KERNEL);
if (!new)
return NULL;
#endif
cfg = hpet_readl(HPET_CFG);
- hpet_boot_cfg = kmalloc((last + 2) * sizeof(*hpet_boot_cfg),
- GFP_KERNEL);
+ hpet_boot_cfg = kmalloc_array(last + 2, sizeof(*hpet_boot_cfg),
+ GFP_KERNEL);
if (hpet_boot_cfg)
*hpet_boot_cfg = cfg;
else
if (ret)
goto out_setup_data_kobj;
- kobjp = kmalloc(sizeof(*kobjp) * nr, GFP_KERNEL);
+ kobjp = kmalloc_array(nr, sizeof(*kobjp), GFP_KERNEL);
if (!kobjp) {
ret = -ENOMEM;
goto out_setup_data_kobj;
if (svm_sev_enabled()) {
r = -ENOMEM;
- sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *), GFP_KERNEL);
+ sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
+ sizeof(void *),
+ GFP_KERNEL);
if (!sd->sev_vmcbs)
goto err_1;
}
extra_pass = true;
goto skip_init_addrs;
}
- addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
+ addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
prog = orig_prog;
goto out_addrs;
prog = tmp;
}
- addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
+ addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
prog = orig_prog;
goto out;
if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
timeout_us = calculate_destination_timeout();
- vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
+ vp = kmalloc_array(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
uvhub_descs = (struct uvhub_desc *)vp;
memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
BUG_ON(!state || !ldb);
ph = &ldb->ph;
tb[0] = &ldb->toc;
- tb[1] = kmalloc(sizeof(*tb[1]) * 3, GFP_KERNEL);
+ tb[1] = kmalloc_array(3, sizeof(*tb[1]), GFP_KERNEL);
if (!tb[1]) {
ldm_crit("Out of memory.");
goto err;
goto out_nooutbuf;
/* avoid "the frame size is larger than 1024 bytes" compiler warning */
- sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 4 : 2), GFP_KERNEL);
+ sg = kmalloc(array3_size(sizeof(*sg), 8, (diff_dst ? 4 : 2)),
+ GFP_KERNEL);
if (!sg)
goto out_nosg;
sgout = &sg[16];
* in order to account for buggy BIOS which don't export the first two
* special levels (see below)
*/
- br->levels = kmalloc((obj->package.count + ACPI_VIDEO_FIRST_LEVEL) *
- sizeof(*br->levels), GFP_KERNEL);
+ br->levels = kmalloc_array(obj->package.count + ACPI_VIDEO_FIRST_LEVEL,
+ sizeof(*br->levels),
+ GFP_KERNEL);
if (!br->levels) {
result = -ENOMEM;
goto out_free;
struct ghes_arr ghes_arr;
ghes_arr.count = 0;
- ghes_arr.ghes_devs = kmalloc(sizeof(void *) * ghes_count, GFP_KERNEL);
+ ghes_arr.ghes_devs = kmalloc_array(ghes_count, sizeof(void *),
+ GFP_KERNEL);
if (!ghes_arr.ghes_devs)
return -ENOMEM;
pr->performance->state_count = pss->package.count;
pr->performance->states =
- kmalloc(sizeof(struct acpi_processor_px) * pss->package.count,
- GFP_KERNEL);
+ kmalloc_array(pss->package.count,
+ sizeof(struct acpi_processor_px),
+ GFP_KERNEL);
if (!pr->performance->states) {
result = -ENOMEM;
goto end;
pr->throttling.state_count = tss->package.count;
pr->throttling.states_tss =
- kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
- GFP_KERNEL);
+ kmalloc_array(tss->package.count,
+ sizeof(struct acpi_processor_tx_tss),
+ GFP_KERNEL);
if (!pr->throttling.states_tss) {
result = -ENOMEM;
goto end;
card->using_dma = 1;
if (1) { /* All known FPGA versions so far */
card->dma_alignment = 3;
- card->dma_bounce = kmalloc(card->nr_ports * BUF_SIZE, GFP_KERNEL);
+ card->dma_bounce = kmalloc_array(card->nr_ports,
+ BUF_SIZE, GFP_KERNEL);
if (!card->dma_bounce) {
dev_warn(&card->dev->dev, "Failed to allocate DMA bounce buffers\n");
err = -ENOMEM;
goto none;
}
- cfag12864b_cache = kmalloc(sizeof(unsigned char) *
- CFAG12864B_SIZE, GFP_KERNEL);
+ cfag12864b_cache = kmalloc(CFAG12864B_SIZE,
+ GFP_KERNEL);
if (cfag12864b_cache == NULL) {
printk(KERN_ERR CFAG12864B_NAME ": ERROR: "
"can't alloc cache buffer (%i bytes)\n",
Controller->CombinedStatusBufferLength = NewStatusBufferLength;
return true;
}
- NewStatusBuffer = kmalloc(2 * Controller->CombinedStatusBufferLength,
- GFP_ATOMIC);
+ NewStatusBuffer = kmalloc_array(2, Controller->CombinedStatusBufferLength,
+ GFP_ATOMIC);
if (NewStatusBuffer == NULL)
{
DAC960_Warning("Unable to expand Combined Status Buffer - Truncating\n",
__rq_for_each_bio(bio, rq)
segments += bio_segments(bio);
- bvec = kmalloc(sizeof(struct bio_vec) * segments, GFP_NOIO);
+ bvec = kmalloc_array(segments, sizeof(struct bio_vec),
+ GFP_NOIO);
if (!bvec)
return -EIO;
cmd->bvec = bvec;
vaddr = (unsigned long)z_remap_nocache_nonser(paddr, size);
#endif
z2ram_map =
- kmalloc((size/Z2RAM_CHUNKSIZE)*sizeof(z2ram_map[0]),
- GFP_KERNEL);
+ kmalloc_array(size / Z2RAM_CHUNKSIZE,
+ sizeof(z2ram_map[0]),
+ GFP_KERNEL);
if ( z2ram_map == NULL )
{
printk( KERN_ERR DEVICE_NAME
*/
nr = nframes;
do {
- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
+ cgc.buffer = kmalloc_array(nr, CD_FRAMESIZE_RAW, GFP_KERNEL);
if (cgc.buffer)
break;
if (ureserve.seg_count >= 16384)
return -EINVAL;
- usegment = kmalloc(sizeof(*usegment) * ureserve.seg_count, GFP_KERNEL);
+ usegment = kmalloc_array(ureserve.seg_count,
+ sizeof(*usegment),
+ GFP_KERNEL);
if (!usegment)
return -ENOMEM;
- ksegment = kmalloc(sizeof(*ksegment) * kreserve.seg_count, GFP_KERNEL);
+ ksegment = kmalloc_array(kreserve.seg_count,
+ sizeof(*ksegment),
+ GFP_KERNEL);
if (!ksegment) {
kfree(usegment);
return -ENOMEM;
* We'll work with an array of isoch_data's (one for each
* device in dev_list) throughout this function.
*/
- if ((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) {
+ master = kmalloc_array(ndevs, sizeof(*master), GFP_KERNEL);
+ if (master == NULL) {
ret = -ENOMEM;
goto get_out;
}
else
return 0;
- sgi_tioca_agp_bridges = kmalloc(tioca_gart_found *
- sizeof(struct agp_bridge_data *),
- GFP_KERNEL);
+ sgi_tioca_agp_bridges = kmalloc_array(tioca_gart_found,
+ sizeof(struct agp_bridge_data *),
+ GFP_KERNEL);
if (!sgi_tioca_agp_bridges)
return -ENOMEM;
if (table == NULL)
return -ENOMEM;
- uninorth_priv.pages_arr = kmalloc((1 << page_order) * sizeof(struct page*), GFP_KERNEL);
+ uninorth_priv.pages_arr = kmalloc_array(1 << page_order,
+ sizeof(struct page *),
+ GFP_KERNEL);
if (uninorth_priv.pages_arr == NULL)
goto enomem;
nr_ports = portdev->max_nr_ports;
nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
- vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
- io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL);
- io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL);
- portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
- GFP_KERNEL);
- portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
- GFP_KERNEL);
+ vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL);
+ io_callbacks = kmalloc_array(nr_queues, sizeof(vq_callback_t *),
+ GFP_KERNEL);
+ io_names = kmalloc_array(nr_queues, sizeof(char *), GFP_KERNEL);
+ portdev->in_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *),
+ GFP_KERNEL);
+ portdev->out_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *),
+ GFP_KERNEL);
if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
!portdev->out_vqs) {
err = -ENOMEM;
cpu_freq = htp_freq_to_cpu_freq(priv->clk_mult);
- table = kmalloc((priv->max_freqs + 1) * sizeof(*table), GFP_KERNEL);
+ table = kmalloc_array(priv->max_freqs + 1, sizeof(*table), GFP_KERNEL);
if (!table)
return ERR_PTR(-ENOMEM);
}
/* generate the IVs */
- ivs = kmalloc(number_of_ivs * CIPHER_BLOCK_SIZE, GFP_ATOMIC);
+ ivs = kmalloc_array(CIPHER_BLOCK_SIZE, number_of_ivs, GFP_ATOMIC);
if (!ivs)
return -ENOMEM;
get_random_bytes(ivs, number_of_ivs * CIPHER_BLOCK_SIZE);
while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
cpu_relax();
- rctx->hw_context = kmalloc(sizeof(u32) * (3 + HASH_CSR_REGISTER_NUMBER),
- GFP_KERNEL);
+ rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
+ sizeof(u32),
+ GFP_KERNEL);
preg = rctx->hw_context;
/* Init the BDs, if needed */
if (bd_count) {
- tsk->cookie = kmalloc(sizeof(void*) * bd_count, GFP_KERNEL);
+ tsk->cookie = kmalloc_array(bd_count, sizeof(void *),
+ GFP_KERNEL);
if (!tsk->cookie)
goto error;
struct dmaengine_unmap_data *unmap;
int err = 0;
- src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
+ src = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!src)
return -ENOMEM;
buffer->page_count = 0;
buffer->page_count_mapped = 0;
- buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
- GFP_KERNEL);
+ buffer->pages = kmalloc_array(page_count, sizeof(buffer->pages[0]),
+ GFP_KERNEL);
if (buffer->pages == NULL)
return -ENOMEM;
max_receive = 1U << (dev->card->max_receive + 1);
num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
- ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
+ ptrptr = kmalloc_array(num_packets, sizeof(void *), GFP_KERNEL);
if (!ptrptr) {
retval = -ENOMEM;
goto failed;
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
#undef HQD_N_REGS
#define HQD_N_REGS (19+4)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
#undef HQD_N_REGS
#define HQD_N_REGS (19+4+2+3+7)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
edid[0x7e] = valid_extensions;
- new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ new = kmalloc_array(valid_extensions + 1, EDID_LENGTH,
+ GFP_KERNEL);
if (!new)
goto out;
if (read_vbt_r10(addr, &vbt))
return -1;
- gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL);
+ gct = kmalloc_array(vbt.panel_count, sizeof(*gct), GFP_KERNEL);
if (!gct)
return -ENOMEM;
goto done;
mmu->mem = mems[ret].oclass;
- mmu->heap = kmalloc(sizeof(*mmu->heap) * mmu->heap_nr, GFP_KERNEL);
- mmu->type = kmalloc(sizeof(*mmu->type) * mmu->type_nr, GFP_KERNEL);
+ mmu->heap = kmalloc_array(mmu->heap_nr, sizeof(*mmu->heap),
+ GFP_KERNEL);
+ mmu->type = kmalloc_array(mmu->type_nr, sizeof(*mmu->type),
+ GFP_KERNEL);
if (ret = -ENOMEM, !mmu->heap || !mmu->type)
goto done;
- mmu->kind = kmalloc(sizeof(*mmu->kind) * mmu->kind_nr, GFP_KERNEL);
+ mmu->kind = kmalloc_array(mmu->kind_nr, sizeof(*mmu->kind),
+ GFP_KERNEL);
if (!mmu->kind && mmu->kind_nr)
goto done;
vmm->limit = args->size;
vmm->page_nr = args->page_nr;
- vmm->page = kmalloc(sizeof(*vmm->page) * vmm->page_nr, GFP_KERNEL);
+ vmm->page = kmalloc_array(vmm->page_nr, sizeof(*vmm->page),
+ GFP_KERNEL);
if (!vmm->page) {
ret = -ENOMEM;
goto done;
}
iccsense->nr_entry = cnt;
- iccsense->rail = kmalloc(sizeof(struct pwr_rail_t) * cnt, GFP_KERNEL);
+ iccsense->rail = kmalloc_array(cnt, sizeof(struct pwr_rail_t),
+ GFP_KERNEL);
if (!iccsense->rail)
return -ENOMEM;
return -ENOSYS;
/* XXX: Multiple partitions? */
- result = kmalloc(64 * sizeof(u32), GFP_KERNEL);
+ result = kmalloc_array(64, sizeof(u32), GFP_KERNEL);
if (!result)
return -ENOMEM;
h_adj = omap_dmm->container_height / ydiv;
w_adj = omap_dmm->container_width / xdiv;
- map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
- global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
+ map = kmalloc_array(h_adj, sizeof(*map), GFP_KERNEL);
+ global_map = kmalloc_array(w_adj + 1, h_adj, GFP_KERNEL);
if (!map || !global_map)
goto error;
* DSS, GPU, etc. are not cache coherent:
*/
if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
- addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
+ addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
ret = -ENOMEM;
goto free_pages;
(~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
qdev->mem_slots =
- kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
- GFP_KERNEL);
+ kmalloc_array(qdev->n_mem_slots, sizeof(struct qxl_memslot),
+ GFP_KERNEL);
idr_init(&qdev->release_idr);
spin_lock_init(&qdev->release_idr_lock);
dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
(SAVAGE_DMA_PAGE_SIZE * 4);
- dev_priv->dma_pages = kmalloc(sizeof(drm_savage_dma_page_t) *
- dev_priv->nr_dma_pages, GFP_KERNEL);
+ dev_priv->dma_pages = kmalloc_array(dev_priv->nr_dma_pages,
+ sizeof(drm_savage_dma_page_t),
+ GFP_KERNEL);
if (dev_priv->dma_pages == NULL)
return -ENOMEM;
DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id,
epd->factored_stage_time);
- buf = kmalloc(fb->width * fb->height, GFP_KERNEL);
+ buf = kmalloc_array(fb->width, fb->height, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (use_static)
pages_to_free = static_buf;
else
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
+ pages_to_free = kmalloc_array(npages_to_free,
+ sizeof(struct page *),
+ GFP_KERNEL);
if (!pages_to_free) {
pr_debug("Failed to allocate memory for pool free operation\n");
return 0;
unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
/* allocate array for page caching change */
- caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+ caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
+ GFP_KERNEL);
if (!caching_array) {
pr_debug("Unable to allocate table for new pages\n");
if (use_static)
pages_to_free = static_buf;
else
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
+ pages_to_free = kmalloc_array(npages_to_free,
+ sizeof(struct page *),
+ GFP_KERNEL);
if (!pages_to_free) {
pr_debug("%s: Failed to allocate memory for pool free operation\n",
(unsigned)(PAGE_SIZE/sizeof(struct page *)));
/* allocate array for page caching change */
- caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+ caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
+ GFP_KERNEL);
if (!caching_array) {
pr_debug("%s: Unable to allocate table for new pages\n",
{
if (vc4_state->dlist_count == vc4_state->dlist_size) {
u32 new_size = max(4u, vc4_state->dlist_count * 2);
- u32 *new_dlist = kmalloc(new_size * 4, GFP_KERNEL);
+ u32 *new_dlist = kmalloc_array(new_size, 4, GFP_KERNEL);
if (!new_dlist)
return;
}
if (parser->device->maxcollection == parser->device->collection_size) {
- collection = kmalloc(sizeof(struct hid_collection) *
- parser->device->collection_size * 2, GFP_KERNEL);
+ collection = kmalloc(
+ array3_size(sizeof(struct hid_collection),
+ parser->device->collection_size,
+ 2),
+ GFP_KERNEL);
if (collection == NULL) {
hid_err(parser->device, "failed to reallocate collection array\n");
return -ENOMEM;
__s32 max = field->logical_maximum;
__s32 *value;
- value = kmalloc(sizeof(__s32) * count, GFP_ATOMIC);
+ value = kmalloc_array(count, sizeof(__s32), GFP_ATOMIC);
if (!value)
return;
char *buf;
unsigned int i;
- buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
+ buf = kmalloc(HID_DEBUG_BUFSIZE, GFP_ATOMIC);
if (!buf)
return;
return -EINVAL;
o_fb = fbdata->bitmap;
- tmp_fb = kmalloc(PICOLCDFB_SIZE*info->var.bits_per_pixel, GFP_KERNEL);
+ tmp_fb = kmalloc_array(PICOLCDFB_SIZE, info->var.bits_per_pixel,
+ GFP_KERNEL);
if (!tmp_fb)
return -ENOMEM;
goto out;
}
- buf = kmalloc(count * sizeof(__u8), GFP_KERNEL);
+ buf = kmalloc(count, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto out;
u8 __user **data_ptrs;
int i, res;
- data_ptrs = kmalloc(nmsgs * sizeof(u8 __user *), GFP_KERNEL);
+ data_ptrs = kmalloc_array(nmsgs, sizeof(u8 __user *), GFP_KERNEL);
if (data_ptrs == NULL) {
kfree(msgs);
return -ENOMEM;
if (!hwif->sg_max_nents)
hwif->sg_max_nents = PRD_ENTRIES;
- hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
- GFP_KERNEL);
+ hwif->sg_table = kmalloc_array(hwif->sg_max_nents,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
if (!hwif->sg_table) {
printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
goto out;
rt = &id->route;
rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
- rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
- GFP_KERNEL);
+ rt->path_rec = kmalloc_array(rt->num_paths, sizeof(*rt->path_rec),
+ GFP_KERNEL);
if (!rt->path_rec)
goto err;
if (params->cache) {
pool->cache_bucket =
- kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
- GFP_KERNEL);
+ kmalloc_array(IB_FMR_HASH_SIZE,
+ sizeof(*pool->cache_bucket),
+ GFP_KERNEL);
if (!pool->cache_bucket) {
ret = -ENOMEM;
goto out_free_pool;
alloc->last = 0;
alloc->max = num;
spin_lock_init(&alloc->lock);
- alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof(long),
- GFP_KERNEL);
+ alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long),
+ GFP_KERNEL);
if (!alloc->table)
return -ENOMEM;
ctx->refcount++;
}
if (!ret && hw_update) {
- gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
+ gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
+ GFP_ATOMIC);
if (!gids) {
ret = -ENOMEM;
} else {
if (!ret && hw_update) {
int i;
- gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
+ gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
+ GFP_ATOMIC);
if (!gids) {
ret = -ENOMEM;
} else {
goto err_counter;
ibdev->ib_uc_qpns_bitmap =
- kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
- sizeof(long),
- GFP_KERNEL);
+ kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
+ sizeof(long),
+ GFP_KERNEL);
if (!ibdev->ib_uc_qpns_bitmap)
goto err_steer_qp_release;
int i;
qp->sqp_proxy_rcv =
- kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt,
- GFP_KERNEL);
+ kmalloc_array(qp->rq.wqe_cnt, sizeof(struct mlx4_ib_buf),
+ GFP_KERNEL);
if (!qp->sqp_proxy_rcv)
return -ENOMEM;
for (i = 0; i < qp->rq.wqe_cnt; i++) {
alloc->max = num;
alloc->mask = mask;
spin_lock_init(&alloc->lock);
- alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof (long),
- GFP_KERNEL);
+ alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long),
+ GFP_KERNEL);
if (!alloc->table)
return -ENOMEM;
int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE;
int i;
- array->page_list = kmalloc(npage * sizeof *array->page_list, GFP_KERNEL);
+ array->page_list = kmalloc_array(npage, sizeof(*array->page_list),
+ GFP_KERNEL);
if (!array->page_list)
return -ENOMEM;
npages *= 2;
}
- dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+ dma_list = kmalloc_array(npages, sizeof(*dma_list),
+ GFP_KERNEL);
if (!dma_list)
goto err_free;
npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
shift = PAGE_SHIFT;
- dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+ dma_list = kmalloc_array(npages, sizeof(*dma_list),
+ GFP_KERNEL);
if (!dma_list)
return -ENOMEM;
- buf->page_list = kmalloc(npages * sizeof *buf->page_list,
- GFP_KERNEL);
+ buf->page_list = kmalloc_array(npages,
+ sizeof(*buf->page_list),
+ GFP_KERNEL);
if (!buf->page_list)
goto err_out;
{
int i;
- dev->cmd.context = kmalloc(dev->cmd.max_cmds *
- sizeof (struct mthca_cmd_context),
- GFP_KERNEL);
+ dev->cmd.context = kmalloc_array(dev->cmd.max_cmds,
+ sizeof(struct mthca_cmd_context),
+ GFP_KERNEL);
if (!dev->cmd.context)
return -ENOMEM;
eq->nent = roundup_pow_of_two(max(nent, 2));
npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
- eq->page_list = kmalloc(npages * sizeof *eq->page_list,
- GFP_KERNEL);
+ eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list),
+ GFP_KERNEL);
if (!eq->page_list)
goto err_out;
for (i = 0; i < npages; ++i)
eq->page_list[i].buf = NULL;
- dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+ dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL);
if (!dma_list)
goto err_out_free;
dev->db_tab->max_group1 = 0;
dev->db_tab->min_group2 = dev->db_tab->npages - 1;
- dev->db_tab->page = kmalloc(dev->db_tab->npages *
- sizeof *dev->db_tab->page,
- GFP_KERNEL);
+ dev->db_tab->page = kmalloc_array(dev->db_tab->npages,
+ sizeof(*dev->db_tab->page),
+ GFP_KERNEL);
if (!dev->db_tab->page) {
kfree(dev->db_tab);
return -ENOMEM;
for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
- buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
+ buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
if (!buddy->bits[i])
goto err_out_free;
bitmap_zero(buddy->bits[i],
size = PAGE_ALIGN(qp->send_wqe_offset +
(qp->sq.max << qp->sq.wqe_shift));
- qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
- GFP_KERNEL);
+ qp->wrid = kmalloc_array(qp->rq.max + qp->sq.max, sizeof(u64),
+ GFP_KERNEL);
if (!qp->wrid)
goto err_out;
if (pd->ibpd.uobject)
return 0;
- srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
+ srq->wrid = kmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
if (!srq->wrid)
return -ENOMEM;
int i;
struct netdev_hw_addr *ha;
- addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC);
+ addrs = kmalloc_array(mc_count, ETH_ALEN, GFP_ATOMIC);
if (!addrs) {
set_allmulti(nesdev, nic_active_bit);
goto unlock;
srq->bit_fields_len = (srq->rq.max_cnt / 32) +
(srq->rq.max_cnt % 32 ? 1 : 0);
srq->idx_bit_fields =
- kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
+ kmalloc_array(srq->bit_fields_len, sizeof(u32),
+ GFP_KERNEL);
if (srq->idx_bit_fields == NULL)
goto arm_err;
memset(srq->idx_bit_fields, 0xff,
dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;
else
dd->cspec->cntrnamelen = 1 + s - cntr6120names;
- dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
- * sizeof(u64), GFP_KERNEL);
+ dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
+ GFP_KERNEL);
for (i = 0, s = (char *)portcntr6120names; s; i++)
s = strchr(s + 1, '\n');
dd->cspec->nportcntrs = i - 1;
dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
- dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
- * sizeof(u64), GFP_KERNEL);
+ dd->cspec->portcntrs = kmalloc_array(dd->cspec->nportcntrs,
+ sizeof(u64),
+ GFP_KERNEL);
}
static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;
else
dd->cspec->cntrnamelen = 1 + s - cntr7220names;
- dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
- * sizeof(u64), GFP_KERNEL);
+ dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
+ GFP_KERNEL);
for (i = 0, s = (char *)portcntr7220names; s; i++)
s = strchr(s + 1, '\n');
dd->cspec->nportcntrs = i - 1;
dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
- dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
- * sizeof(u64), GFP_KERNEL);
+ dd->cspec->portcntrs = kmalloc_array(dd->cspec->nportcntrs,
+ sizeof(u64),
+ GFP_KERNEL);
}
static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
if (msix_entries) {
/* can be up to 512 bytes, too big for stack */
- msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
- sizeof(u64), GFP_KERNEL);
+ msix_vecsave = kmalloc_array(2 * dd->cspec->num_msix_entries,
+ sizeof(u64),
+ GFP_KERNEL);
}
/*
dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
else
dd->cspec->cntrnamelen = 1 + s - cntr7322names;
- dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
- * sizeof(u64), GFP_KERNEL);
+ dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
+ GFP_KERNEL);
for (i = 0, s = (char *)portcntr7322names; s; i++)
s = strchr(s + 1, '\n');
dd->cspec->nportcntrs = i - 1;
dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
for (i = 0; i < dd->num_pports; ++i) {
- dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
- * sizeof(u64), GFP_KERNEL);
+ dd->pport[i].cpspec->portcntrs =
+ kmalloc_array(dd->cspec->nportcntrs, sizeof(u64),
+ GFP_KERNEL);
}
}
sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
NUM_VL15_BUFS + BITS_PER_LONG - 1;
sbufcnt /= BITS_PER_LONG;
- dd->cspec->sendchkenable = kmalloc(sbufcnt *
- sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
- dd->cspec->sendgrhchk = kmalloc(sbufcnt *
- sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
- dd->cspec->sendibchk = kmalloc(sbufcnt *
- sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
+ dd->cspec->sendchkenable =
+ kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendchkenable),
+ GFP_KERNEL);
+ dd->cspec->sendgrhchk =
+ kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendgrhchk),
+ GFP_KERNEL);
+ dd->cspec->sendibchk =
+ kmalloc_array(sbufcnt, sizeof(*dd->cspec->sendibchk),
+ GFP_KERNEL);
if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
!dd->cspec->sendibchk) {
ret = -ENOMEM;
goto alloc_login_buf_fail;
iser_conn->num_rx_descs = session->cmds_max;
- iser_conn->rx_descs = kmalloc(iser_conn->num_rx_descs *
- sizeof(struct iser_rx_desc), GFP_KERNEL);
+ iser_conn->rx_descs = kmalloc_array(iser_conn->num_rx_descs,
+ sizeof(struct iser_rx_desc),
+ GFP_KERNEL);
if (!iser_conn->rx_descs)
goto rx_desc_alloc_fail;
for (i = 0; i < target->req_ring_size; ++i) {
req = &ch->req_ring[i];
- mr_list = kmalloc(target->mr_per_cmd * sizeof(void *),
- GFP_KERNEL);
+ mr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
+ GFP_KERNEL);
if (!mr_list)
goto out;
if (srp_dev->use_fast_reg) {
req->fr_list = mr_list;
} else {
req->fmr_list = mr_list;
- req->map_page = kmalloc(srp_dev->max_pages_per_mr *
- sizeof(void *), GFP_KERNEL);
+ req->map_page = kmalloc_array(srp_dev->max_pages_per_mr,
+ sizeof(void *),
+ GFP_KERNEL);
if (!req->map_page)
goto out;
}
WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
&& ioctx_size != sizeof(struct srpt_send_ioctx));
- ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
+ ring = kmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL);
if (!ring)
goto out;
for (i = 0; i < ring_size; ++i) {
timeout = gameport_time(gameport, 10000); /* 10 ms */
- buf = kmalloc(BUF_SIZE * sizeof(struct joydump), GFP_KERNEL);
+ buf = kmalloc_array(BUF_SIZE, sizeof(struct joydump), GFP_KERNEL);
if (!buf) {
printk(KERN_INFO "joydump: no memory for testing\n");
goto jd_end;
if (count <= 0)
return;
- its_srat_maps = kmalloc(count * sizeof(struct its_srat_map),
- GFP_KERNEL);
+ its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
+ GFP_KERNEL);
if (!its_srat_maps) {
pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
return;
strcpy(card->name, id);
card->contrnr = contr;
card->nbchan = profp->nbchannel;
- card->bchans = kmalloc(sizeof(capidrv_bchan) * card->nbchan, GFP_ATOMIC);
+ card->bchans = kmalloc_array(card->nbchan, sizeof(capidrv_bchan),
+ GFP_ATOMIC);
if (!card->bchans) {
printk(KERN_WARNING
"capidrv: (%s) Could not allocate bchan-structs.\n", id);
return;
if (l > 64)
l = 64; /* arbitrary limit */
- dbgline = kmalloc(3 * l, GFP_ATOMIC);
+ dbgline = kmalloc_array(3, l, GFP_ATOMIC);
if (!dbgline)
return;
for (i = 0; i < l; i++) {
return;
if (l > 64)
l = 64; /* arbitrary limit */
- dbgline = kmalloc(3 * l, GFP_ATOMIC);
+ dbgline = kmalloc_array(3, l, GFP_ATOMIC);
if (!dbgline)
return;
data += CAPIMSG_LEN(data);
cs->mode = M_UNKNOWN;
cs->mstate = MS_UNINITIALIZED;
- cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL);
+ cs->bcs = kmalloc_array(channels, sizeof(struct bc_state), GFP_KERNEL);
cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL);
if (!cs->bcs || !cs->inbuf) {
pr_err("out of memory\n");
drv->owner = owner;
INIT_LIST_HEAD(&drv->list);
- drv->cs = kmalloc(minors * sizeof *drv->cs, GFP_KERNEL);
+ drv->cs = kmalloc_array(minors, sizeof(*drv->cs), GFP_KERNEL);
if (!drv->cs)
goto error;
int i;
unsigned *send;
- if (!(send = kmalloc(cnt * sizeof(unsigned int), GFP_ATOMIC))) {
+ if (!(send = kmalloc_array(cnt, sizeof(unsigned int), GFP_ATOMIC))) {
printk(KERN_WARNING
"HiSax: No memory for hfcd.send\n");
return (NULL);
{
int i;
- if (!(bcs->hw.hfc.send = kmalloc(32 * sizeof(unsigned int), GFP_ATOMIC))) {
+ bcs->hw.hfc.send = kmalloc_array(32, sizeof(unsigned int), GFP_ATOMIC);
+ if (!bcs->hw.hfc.send) {
printk(KERN_WARNING
"HiSax: No memory for hfc.send\n");
return;
void
inittiger(struct IsdnCardState *cs)
{
- if (!(cs->bcs[0].hw.tiger.send = kmalloc(NETJET_DMA_TXSIZE * sizeof(unsigned int),
- GFP_KERNEL | GFP_DMA))) {
+ cs->bcs[0].hw.tiger.send = kmalloc_array(NETJET_DMA_TXSIZE,
+ sizeof(unsigned int),
+ GFP_KERNEL | GFP_DMA);
+ if (!cs->bcs[0].hw.tiger.send) {
printk(KERN_WARNING
"HiSax: No memory for tiger.send\n");
return;
cs->hw.njet.base + NETJET_DMA_READ_IRQ);
outl(virt_to_bus(cs->bcs[0].hw.tiger.s_end),
cs->hw.njet.base + NETJET_DMA_READ_END);
- if (!(cs->bcs[0].hw.tiger.rec = kmalloc(NETJET_DMA_RXSIZE * sizeof(unsigned int),
- GFP_KERNEL | GFP_DMA))) {
+ cs->bcs[0].hw.tiger.rec = kmalloc_array(NETJET_DMA_RXSIZE,
+ sizeof(unsigned int),
+ GFP_KERNEL | GFP_DMA);
+ if (!cs->bcs[0].hw.tiger.rec) {
printk(KERN_WARNING
"HiSax: No memory for tiger.rec\n");
return;
skb_queue_purge(&d->rpqueue[j]);
kfree(d->rpqueue);
}
- if (!(d->rpqueue = kmalloc(sizeof(struct sk_buff_head) * m, GFP_ATOMIC))) {
+ d->rpqueue = kmalloc_array(m, sizeof(struct sk_buff_head), GFP_ATOMIC);
+ if (!d->rpqueue) {
printk(KERN_WARNING "register_isdn: Could not alloc rpqueue\n");
if (!adding) {
kfree(d->rcvcount);
if ((adding) && (d->rcv_waitq))
kfree(d->rcv_waitq);
- d->rcv_waitq = kmalloc(sizeof(wait_queue_head_t) * 2 * m, GFP_ATOMIC);
+ d->rcv_waitq = kmalloc(array3_size(sizeof(wait_queue_head_t), 2, m),
+ GFP_ATOMIC);
if (!d->rcv_waitq) {
printk(KERN_WARNING "register_isdn: Could not alloc rcv_waitq\n");
if (!adding) {
goto free_blk_bitmap;
- line->chks = kmalloc(lm->blk_per_line * sizeof(struct nvm_chk_meta),
- GFP_KERNEL);
+ line->chks = kmalloc_array(lm->blk_per_line,
+ sizeof(struct nvm_chk_meta), GFP_KERNEL);
if (!line->chks)
goto free_erase_bitmap;
r = -ENOMEM;
goto bad;
}
- section_req->iv = kmalloc(ivsize * 2, GFP_KERNEL);
+ section_req->iv = kmalloc_array(ivsize, 2,
+ GFP_KERNEL);
if (!section_req->iv) {
skcipher_request_free(section_req);
*error = "Unable to allocate iv";
{
int i;
- _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
- GFP_KERNEL);
+ _origins = kmalloc_array(ORIGIN_HASH_SIZE, sizeof(struct list_head),
+ GFP_KERNEL);
if (!_origins) {
DMERR("unable to allocate memory for _origins");
return -ENOMEM;
for (i = 0; i < ORIGIN_HASH_SIZE; i++)
INIT_LIST_HEAD(_origins + i);
- _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
- GFP_KERNEL);
+ _dm_origins = kmalloc_array(ORIGIN_HASH_SIZE,
+ sizeof(struct list_head),
+ GFP_KERNEL);
if (!_dm_origins) {
DMERR("unable to allocate memory for _dm_origins");
kfree(_origins);
if (*q == ',')
(*n_histogram_entries)++;
- *histogram_boundaries = kmalloc(*n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
+ *histogram_boundaries = kmalloc_array(*n_histogram_entries,
+ sizeof(unsigned long long),
+ GFP_KERNEL);
if (!*histogram_boundaries)
return -ENOMEM;
new_size = 8;
gfp = GFP_NOIO;
}
- argv = kmalloc(new_size * sizeof(*argv), gfp);
+ argv = kmalloc_array(new_size, sizeof(*argv), gfp);
if (argv) {
memcpy(argv, old_argv, *size * sizeof(*argv));
*size = new_size;
num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
offset = slot_number * num_pages;
- store->filemap = kmalloc(sizeof(struct page *)
- * num_pages, GFP_KERNEL);
+ store->filemap = kmalloc_array(num_pages, sizeof(struct page *),
+ GFP_KERNEL);
if (!store->filemap)
return -ENOMEM;
if (!r1_bio)
return NULL;
- rps = kmalloc(sizeof(struct resync_pages) * pi->raid_disks,
- gfp_flags);
+ rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
+ gfp_flags);
if (!rps)
goto out_free_r1bio;
nalloc_rp = nalloc;
else
nalloc_rp = nalloc * 2;
- rps = kmalloc(sizeof(struct resync_pages) * nalloc_rp, gfp_flags);
+ rps = kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags);
if (!rps)
goto out_free_r10bio;
u32 addr;
/* skip list for window clipping */
- if (NULL == (skips = kmalloc(sizeof(*skips) * ov->nclips,GFP_KERNEL)))
+ skips = kmalloc_array(ov->nclips, sizeof(*skips),GFP_KERNEL);
+ if (NULL == skips)
return -ENOMEM;
/* estimate risc mem: worst case is (1.5*clip+1) * lines instructions
/* Allocate the pseudo palette */
oi->ivtvfb_info.pseudo_palette =
- kmalloc(sizeof(u32) * 16, GFP_KERNEL|__GFP_NOWARN);
+ kmalloc_array(16, sizeof(u32), GFP_KERNEL|__GFP_NOWARN);
if (!oi->ivtvfb_info.pseudo_palette) {
IVTVFB_ERR("abort, unable to alloc pseudo palette\n");
/* create a string array containing the names of all the preset timings */
while (v4l2_dv_timings_presets[dev->query_dv_timings_size].bt.width)
dev->query_dv_timings_size++;
- dev->query_dv_timings_qmenu = kmalloc(dev->query_dv_timings_size *
- (sizeof(void *) + 32), GFP_KERNEL);
+ dev->query_dv_timings_qmenu = kmalloc_array(dev->query_dv_timings_size,
+ (sizeof(void *) + 32),
+ GFP_KERNEL);
if (dev->query_dv_timings_qmenu == NULL)
goto free_dev;
for (i = 0; i < dev->query_dv_timings_size; i++) {
if (cam->sbuf[i].data)
continue;
cam->sbuf[i].data =
- kmalloc(FRAMES_PER_DESC * FRAME_SIZE_PER_DESC, GFP_KERNEL);
+ kmalloc_array(FRAME_SIZE_PER_DESC, FRAMES_PER_DESC,
+ GFP_KERNEL);
if (!cam->sbuf[i].data) {
while (--i >= 0) {
kfree(cam->sbuf[i].data);
dev_info(dev->dev,
"audio EndPoint Addr 0x%x, Alternate settings: %i\n",
adev->end_point_addr, adev->num_alt);
- adev->alt_max_pkt_size = kmalloc(32 * adev->num_alt, GFP_KERNEL);
+ adev->alt_max_pkt_size = kmalloc_array(32, adev->num_alt, GFP_KERNEL);
if (!adev->alt_max_pkt_size) {
err = -ENOMEM;
goto err_free_card;
usb->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
if (usb->intr_urb == NULL)
goto allocfail;
- usb->intr_urb->transfer_buffer = kmalloc(2*sizeof(u16), GFP_KERNEL);
+ usb->intr_urb->transfer_buffer = kmalloc_array(2, sizeof(u16),
+ GFP_KERNEL);
if (usb->intr_urb->transfer_buffer == NULL)
goto allocfail;
if (len * 2 <= USB_BUF_SZ) {
p = tmpbuf = gspca_dev->usb_buf;
} else {
- p = tmpbuf = kmalloc(len * 2, GFP_KERNEL);
+ p = tmpbuf = kmalloc_array(len, 2, GFP_KERNEL);
if (!tmpbuf) {
pr_err("Out of memory\n");
return;
return -ENODEV;
/* Alloc an array for all possible max_pkt_size */
- alt_max_pkt_size = kmalloc(sizeof(alt_max_pkt_size[0]) *
- interface->num_altsetting, GFP_KERNEL);
+ alt_max_pkt_size = kmalloc_array(interface->num_altsetting,
+ sizeof(alt_max_pkt_size[0]),
+ GFP_KERNEL);
if (alt_max_pkt_size == NULL)
return -ENOMEM;
if (dev->urb_buffer)
return 0;
- dev->urb_buffer = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
+ dev->urb_buffer = kmalloc_array(num_bufs, sizeof(void *), GFP_KERNEL);
if (!dev->urb_buffer)
return -ENOMEM;
- dev->urb_dma = kmalloc(sizeof(dma_addr_t *)*num_bufs, GFP_KERNEL);
+ dev->urb_dma = kmalloc_array(num_bufs, sizeof(dma_addr_t *),
+ GFP_KERNEL);
if (!dev->urb_dma)
return -ENOMEM;
dev->isoc_ctl.num_bufs = num_bufs;
- dev->isoc_ctl.urb = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
+ dev->isoc_ctl.urb = kmalloc_array(num_bufs, sizeof(void *),
+ GFP_KERNEL);
if (!dev->isoc_ctl.urb)
return -ENOMEM;
- dev->isoc_ctl.transfer_buffer = kmalloc(sizeof(void *)*num_bufs,
- GFP_KERNEL);
+ dev->isoc_ctl.transfer_buffer = kmalloc_array(num_bufs,
+ sizeof(void *),
+ GFP_KERNEL);
if (!dev->isoc_ctl.transfer_buffer) {
kfree(dev->isoc_ctl.urb);
return -ENOMEM;
usbvision->num_alt = uif->num_altsetting;
PDEBUG(DBG_PROBE, "Alternate settings: %i", usbvision->num_alt);
- usbvision->alt_max_pkt_size = kmalloc(32 * usbvision->num_alt, GFP_KERNEL);
+ usbvision->alt_max_pkt_size = kmalloc_array(32, usbvision->num_alt,
+ GFP_KERNEL);
if (!usbvision->alt_max_pkt_size) {
ret = -ENOMEM;
goto err_pkt;
spin_lock_init(&clock->lock);
clock->size = 32;
- clock->samples = kmalloc(clock->size * sizeof(*clock->samples),
- GFP_KERNEL);
+ clock->samples = kmalloc_array(clock->size, sizeof(*clock->samples),
+ GFP_KERNEL);
if (clock->samples == NULL)
return -ENOMEM;
dma->offset = data & ~PAGE_MASK;
dma->size = size;
dma->nr_pages = last-first+1;
- dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL);
+ dma->pages = kmalloc_array(dma->nr_pages, sizeof(struct page *),
+ GFP_KERNEL);
if (NULL == dma->pages)
return -ENOMEM;
dbg_verbose("Start of a scan for the boot blocks");
if (!msb->boot_page) {
- page = kmalloc(sizeof(struct ms_boot_page)*2, GFP_KERNEL);
+ page = kmalloc_array(2, sizeof(struct ms_boot_page),
+ GFP_KERNEL);
if (!page)
return -ENOMEM;
msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
msb->lba_to_pba_table =
- kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL);
+ kmalloc_array(msb->logical_block_count, sizeof(u16),
+ GFP_KERNEL);
if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
!msb->erased_blocks_bitmap) {
"a moment.\n");
}
- priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
+ priv->mpt_txfidx = kmalloc_array(priv->tx_max_out, sizeof(int),
+ GFP_KERNEL);
if (priv->mpt_txfidx == NULL)
goto out;
priv->mpt_txfidx_tail = -1;
dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
- priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
- GFP_KERNEL);
+ priv->mpt_rxfidx = kmalloc_array(priv->max_buckets_out, sizeof(int),
+ GFP_KERNEL);
if (priv->mpt_rxfidx == NULL)
goto out_SendCtl;
priv->mpt_rxfidx_tail = -1;
if (colon_ch != NULL) {
csraddr_len = colon_ch - buf;
csraddr_str =
- kmalloc(sizeof(char)*(csraddr_len + 1), GFP_KERNEL);
+ kmalloc(csraddr_len + 1, GFP_KERNEL);
if (csraddr_str == NULL) {
ret = -ENOMEM;
goto free_buf;
return VMCI_ERROR_ALREADY_EXISTS;
produce_ppns =
- kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL);
+ kmalloc_array(num_produce_pages, sizeof(*produce_ppns),
+ GFP_KERNEL);
if (!produce_ppns)
return VMCI_ERROR_NO_MEM;
consume_ppns =
- kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL);
+ kmalloc_array(num_consume_pages, sizeof(*consume_ppns),
+ GFP_KERNEL);
if (!consume_ppns) {
kfree(produce_ppns);
return VMCI_ERROR_NO_MEM;
newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
if (!newcfi)
return -ENOMEM;
- shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
+ shared = kmalloc_array(cfi->numchips,
+ sizeof(struct flchip_shared),
+ GFP_KERNEL);
if (!shared) {
kfree(newcfi);
return -ENOMEM;
mtd->size = devsize * cfi->numchips;
mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
- mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
- * mtd->numeraseregions, GFP_KERNEL);
+ mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
+ sizeof(struct mtd_erase_region_info),
+ GFP_KERNEL);
if (!mtd->eraseregions)
goto setup_err;
mtd->size = devsize * cfi->numchips;
mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
- mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
- * mtd->numeraseregions, GFP_KERNEL);
+ mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
+ sizeof(struct mtd_erase_region_info),
+ GFP_KERNEL);
if (!mtd->eraseregions) {
kfree(cfi->cmdset_priv);
kfree(mtd);
/* Set up erase unit maps */
part->DataUnits = le16_to_cpu(part->header.NumEraseUnits) -
part->header.NumTransferUnits;
- part->EUNInfo = kmalloc(part->DataUnits * sizeof(struct eun_info_t),
- GFP_KERNEL);
+ part->EUNInfo = kmalloc_array(part->DataUnits, sizeof(struct eun_info_t),
+ GFP_KERNEL);
if (!part->EUNInfo)
goto out;
for (i = 0; i < part->DataUnits; i++)
part->EUNInfo[i].Offset = 0xffffffff;
part->XferInfo =
- kmalloc(part->header.NumTransferUnits * sizeof(struct xfer_info_t),
- GFP_KERNEL);
+ kmalloc_array(part->header.NumTransferUnits,
+ sizeof(struct xfer_info_t),
+ GFP_KERNEL);
if (!part->XferInfo)
goto out_EUNInfo;
memset(part->VirtualBlockMap, 0xff, blocks * sizeof(uint32_t));
part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize;
- part->bam_cache = kmalloc(part->BlocksPerUnit * sizeof(uint32_t),
- GFP_KERNEL);
+ part->bam_cache = kmalloc_array(part->BlocksPerUnit, sizeof(uint32_t),
+ GFP_KERNEL);
if (!part->bam_cache)
goto out_VirtualBlockMap;
inftl->nb_blocks = ip->lastUnit + 1;
/* Memory alloc */
- inftl->PUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL);
+ inftl->PUtable = kmalloc_array(inftl->nb_blocks, sizeof(u16),
+ GFP_KERNEL);
if (!inftl->PUtable) {
printk(KERN_WARNING "INFTL: allocation of PUtable "
"failed (%zd bytes)\n",
return -ENOMEM;
}
- inftl->VUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL);
+ inftl->VUtable = kmalloc_array(inftl->nb_blocks, sizeof(u16),
+ GFP_KERNEL);
if (!inftl->VUtable) {
kfree(inftl->PUtable);
printk(KERN_WARNING "INFTL: allocation of VUtable "
mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
- shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips,
+ shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared),
GFP_KERNEL);
if (!shared) {
kfree(lpddr);
* Not sure there are actually any multi-partition devices in the
* real world, but the hardware supports them, so, so will we
*/
- card->parts = kmalloc(sizeof(struct vmupart) * card->partitions,
- GFP_KERNEL);
+ card->parts = kmalloc_array(card->partitions, sizeof(struct vmupart),
+ GFP_KERNEL);
if (!card->parts) {
error = -ENOMEM;
goto fail_partitions;
}
- card->mtd = kmalloc(sizeof(struct mtd_info) * card->partitions,
- GFP_KERNEL);
+ card->mtd = kmalloc_array(card->partitions, sizeof(struct mtd_info),
+ GFP_KERNEL);
if (!card->mtd) {
error = -ENOMEM;
goto fail_mtd_info;
concat->mtd.erasesize = max_erasesize;
concat->mtd.numeraseregions = num_erase_region;
concat->mtd.eraseregions = erase_region_p =
- kmalloc(num_erase_region *
- sizeof (struct mtd_erase_region_info), GFP_KERNEL);
+ kmalloc_array(num_erase_region,
+ sizeof(struct mtd_erase_region_info),
+ GFP_KERNEL);
if (!erase_region_p) {
kfree(concat);
printk
if (!d->page_buf)
goto page_buf_fail;
- d->oob_buf = kmalloc(2 * mtd->oobavail, GFP_KERNEL);
+ d->oob_buf = kmalloc_array(2, mtd->oobavail, GFP_KERNEL);
if (!d->oob_buf)
goto oob_buf_fail;
}
nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL);
- nbc->errloc = kmalloc(t*sizeof(*nbc->errloc), GFP_KERNEL);
+ nbc->errloc = kmalloc_array(t, sizeof(*nbc->errloc), GFP_KERNEL);
if (!nbc->eccmask || !nbc->errloc)
goto fail;
/*
nftl->lastEUN = nftl->nb_blocks - 1;
/* memory alloc */
- nftl->EUNtable = kmalloc(nftl->nb_blocks * sizeof(u16), GFP_KERNEL);
+ nftl->EUNtable = kmalloc_array(nftl->nb_blocks, sizeof(u16),
+ GFP_KERNEL);
if (!nftl->EUNtable) {
printk(KERN_NOTICE "NFTL: allocation of EUNtable failed\n");
return -ENOMEM;
}
- nftl->ReplUnitTable = kmalloc(nftl->nb_blocks * sizeof(u16), GFP_KERNEL);
+ nftl->ReplUnitTable = kmalloc_array(nftl->nb_blocks,
+ sizeof(u16),
+ GFP_KERNEL);
if (!nftl->ReplUnitTable) {
kfree(nftl->EUNtable);
printk(KERN_NOTICE "NFTL: allocation of ReplUnitTable failed\n");
dbg("initializing zone %d", zone_num);
/* Allocate memory for FTL table */
- zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL);
+ zone->lba_to_phys_table = kmalloc_array(ftl->max_lba, 2, GFP_KERNEL);
if (!zone->lba_to_phys_table)
return -ENOMEM;
(long)ssfdc->sectors;
/* Allocate logical block map */
- ssfdc->logic_block_map = kmalloc(sizeof(ssfdc->logic_block_map[0]) *
- ssfdc->map_len, GFP_KERNEL);
+ ssfdc->logic_block_map =
+ kmalloc_array(ssfdc->map_len,
+ sizeof(ssfdc->logic_block_map[0]), GFP_KERNEL);
if (!ssfdc->logic_block_map)
goto out_err;
memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) *
err = -ENOMEM;
readbuf = vmalloc(bufsize);
writebuf = vmalloc(bufsize);
- offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL);
+ offsets = kmalloc_array(ebcnt, sizeof(int), GFP_KERNEL);
if (!readbuf || !writebuf || !offsets)
goto out;
for (i = 0; i < ebcnt; i++)
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
- scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
+ scan_eba = kmalloc_array(num_volumes, sizeof(*scan_eba), GFP_KERNEL);
if (!scan_eba)
return -ENOMEM;
- fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
+ fm_eba = kmalloc_array(num_volumes, sizeof(*fm_eba), GFP_KERNEL);
if (!fm_eba) {
kfree(scan_eba);
return -ENOMEM;
if (!vol)
continue;
- scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
- GFP_KERNEL);
+ scan_eba[i] = kmalloc_array(vol->reserved_pebs,
+ sizeof(**scan_eba),
+ GFP_KERNEL);
if (!scan_eba[i]) {
ret = -ENOMEM;
goto out_free;
}
- fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
- GFP_KERNEL);
+ fm_eba[i] = kmalloc_array(vol->reserved_pebs,
+ sizeof(**fm_eba),
+ GFP_KERNEL);
if (!fm_eba[i]) {
ret = -ENOMEM;
goto out_free;
if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
dev->ml_priv = lp;
lp->name = chipname;
- lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
- GFP_DMA | GFP_KERNEL);
+ lp->rx_buffs = (unsigned long)kmalloc_array(RX_RING_SIZE, PKT_BUF_SZ,
+ GFP_DMA | GFP_KERNEL);
if (!lp->rx_buffs)
goto out_lp;
if (lance_need_isa_bounce_buffers) {
- lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
- GFP_DMA | GFP_KERNEL);
+ lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ,
+ GFP_DMA | GFP_KERNEL);
if (!lp->tx_bounce_buffs)
goto out_rx;
} else
first_dword = eeprom->offset >> 2;
last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
- eeprom_buff = kmalloc(sizeof(u32) *
- (last_dword - first_dword + 1), GFP_KERNEL);
+ eeprom_buff = kmalloc_array(last_dword - first_dword + 1, sizeof(u32),
+ GFP_KERNEL);
if (eeprom_buff == NULL)
return -ENOMEM;
first_dword = eeprom->offset >> 2;
last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
- eeprom_buff = kmalloc(sizeof(u32) *
- (last_dword - first_dword + 1), GFP_KERNEL);
+ eeprom_buff = kmalloc_array(last_dword - first_dword + 1, sizeof(u32),
+ GFP_KERNEL);
if (eeprom_buff == NULL)
return -ENOMEM;
first_dword = eeprom->offset >> 2;
last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
- eeprom_buff = kmalloc(sizeof(u32) * (last_dword - first_dword + 1),
- GFP_KERNEL);
+ eeprom_buff = kmalloc_array(last_dword - first_dword + 1, sizeof(u32),
+ GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
u32 good_mbuf_cnt;
u32 val;
- good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
+ good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
if (!good_mbuf)
return -ENOMEM;
return -ENOMEM;
/* storage for cfa_code to vf-idx mapping */
- cfa_code_map = kmalloc(sizeof(*bp->cfa_code_map) * MAX_CFA_CODE,
- GFP_KERNEL);
+ cfa_code_map = kmalloc_array(MAX_CFA_CODE, sizeof(*bp->cfa_code_map),
+ GFP_KERNEL);
if (!cfa_code_map) {
rc = -ENOMEM;
goto err;
u16 (*incr)[NCCTRL_WIN];
struct adapter *adap = seq->private;
- incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL);
+ incr = kmalloc_array(NMTUS, sizeof(*incr), GFP_KERNEL);
if (!incr)
return -ENOMEM;
const struct sge_eth_rxq *rxq;
rxq = &adapter->sge.ethrxq[pi->first_qset];
- rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
+ rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
if (!rss)
return -ENOMEM;
max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
if (is_offload(adap))
max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
- entries = kmalloc(sizeof(*entries) * (max_ingq + 1),
- GFP_KERNEL);
+ entries = kmalloc_array(max_ingq + 1, sizeof(*entries),
+ GFP_KERNEL);
if (!entries)
return -ENOMEM;
/* Init Tx bds */
for (j = 0; j < ug_info->numQueuesTx; j++) {
/* Setup the skbuff rings */
- ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
- ugeth->ug_info->bdRingLenTx[j],
- GFP_KERNEL);
+ ugeth->tx_skbuff[j] =
+ kmalloc_array(ugeth->ug_info->bdRingLenTx[j],
+ sizeof(struct sk_buff *), GFP_KERNEL);
if (ugeth->tx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
/* Init Rx bds */
for (j = 0; j < ug_info->numQueuesRx; j++) {
/* Setup the skbuff rings */
- ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
- ugeth->ug_info->bdRingLenRx[j],
- GFP_KERNEL);
+ ugeth->rx_skbuff[j] =
+ kmalloc_array(ugeth->ug_info->bdRingLenRx[j],
+ sizeof(struct sk_buff *), GFP_KERNEL);
if (ugeth->rx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
{
int i;
- pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
+ pool->free_map = kmalloc_array(pool->size, sizeof(u16), GFP_KERNEL);
if (!pool->free_map)
return -1;
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(sizeof(u16) *
- (last_word - first_word + 1), GFP_KERNEL);
+ eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
+ GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
- GFP_KERNEL);
+ eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
+ GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(sizeof(u16) *
- (last_word - first_word + 1), GFP_KERNEL);
+ eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
+ GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
first_word = ee->offset >> 1;
last_word = (ee->offset + ee->len - 1) >> 1;
- dataword = kmalloc(sizeof(u16) * (last_word - first_word + 1),
- GFP_KERNEL);
+ dataword = kmalloc_array(last_word - first_word + 1, sizeof(u16),
+ GFP_KERNEL);
if (!dataword)
return -ENOMEM;
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(sizeof(__le16) *
- (last_word - first_word + 1), GFP_KERNEL);
+ eeprom_buff = kmalloc_array(last_word - first_word + 1,
+ sizeof(__le16),
+ GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
rctl |= IXGB_RCTL_MPE;
IXGB_WRITE_REG(hw, RCTL, rctl);
} else {
- u8 *mta = kmalloc(IXGB_MAX_NUM_MULTICAST_ADDRESSES *
- ETH_ALEN, GFP_ATOMIC);
+ u8 *mta = kmalloc_array(ETH_ALEN,
+ IXGB_MAX_NUM_MULTICAST_ADDRESSES,
+ GFP_ATOMIC);
u8 *addr;
if (!mta)
goto alloc_failed;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_len = last_word - first_word + 1;
- eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
+ eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
int i;
int err = 0;
- priv->cmd.context = kmalloc(priv->cmd.max_cmds *
- sizeof(struct mlx4_cmd_context),
- GFP_KERNEL);
+ priv->cmd.context = kmalloc_array(priv->cmd.max_cmds,
+ sizeof(struct mlx4_cmd_context),
+ GFP_KERNEL);
if (!priv->cmd.context)
return -ENOMEM;
}
priv->eq_table.irq_names =
- kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
- GFP_KERNEL);
+ kmalloc_array(MLX4_IRQNAME_SIZE,
+ (dev->caps.num_comp_vectors + 1),
+ GFP_KERNEL);
if (!priv->eq_table.irq_names) {
err = -ENOMEM;
goto err_out_clr_int;
for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[i];
- res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
- sizeof(int), GFP_KERNEL);
- res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
- sizeof(int), GFP_KERNEL);
+ res_alloc->quota = kmalloc_array(dev->persist->num_vfs + 1,
+ sizeof(int),
+ GFP_KERNEL);
+ res_alloc->guaranteed = kmalloc_array(dev->persist->num_vfs + 1,
+ sizeof(int),
+ GFP_KERNEL);
if (i == RES_MAC || i == RES_VLAN)
res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
(dev->persist->num_vfs
goto init_fail;
}
- priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
- GFP_ATOMIC);
+ priv->tx_buf_base = kmalloc_array(priv->tx_buf_size, TX_DESC_NUM,
+ GFP_ATOMIC);
if (!priv->tx_buf_base) {
ret = -ENOMEM;
goto init_fail;
}
- priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
- GFP_ATOMIC);
+ priv->rx_buf_base = kmalloc_array(priv->rx_buf_size, RX_DESC_NUM,
+ GFP_ATOMIC);
if (!priv->rx_buf_base) {
ret = -ENOMEM;
goto init_fail;
ring->tx_pending),
&ring_addr, GFP_ATOMIC);
}
- rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
- tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
+ rx_skbuff = kmalloc_array(ring->rx_pending, sizeof(struct nv_skb_map),
+ GFP_KERNEL);
+ tx_skbuff = kmalloc_array(ring->tx_pending, sizeof(struct nv_skb_map),
+ GFP_KERNEL);
if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
/* fall back to old rings */
if (!nv_optimized(np)) {
if (mc_count >= PCH_GBE_MAR_ENTRIES)
return;
- mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
+ mta_list = kmalloc_array(ETH_ALEN, mc_count, GFP_ATOMIC);
if (!mta_list)
return;
goto err0;
}
- nvm_info->image_att = kmalloc(nvm_info->num_images *
- sizeof(struct bist_nvm_image_att),
- GFP_KERNEL);
+ nvm_info->image_att = kmalloc_array(nvm_info->num_images,
+ sizeof(struct bist_nvm_image_att),
+ GFP_KERNEL);
if (!nvm_info->image_att) {
rc = -ENOMEM;
goto err0;
goto pci_alloc_err;
tx_ring->q =
- kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
+ kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
+ GFP_KERNEL);
if (tx_ring->q == NULL)
goto err;
{
int i;
- gtp->addr_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
+ gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
+ GFP_KERNEL);
if (gtp->addr_hash == NULL)
return -ENOMEM;
- gtp->tid_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
+ gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
+ GFP_KERNEL);
if (gtp->tid_hash == NULL)
goto err1;
return -EPERM;
}
- image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
+ image = kmalloc_array(EEPROM_WORDS, sizeof(u32), GFP_KERNEL);
if (!image)
return -ENOMEM;
if (!queue_cnt)
return 0;
- listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
+ listarr = kmalloc_array(queue_cnt, sizeof(struct list_head),
+ GFP_KERNEL);
if (!listarr)
return -ENOMEM;
team->qom_lists = listarr;
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
- GFP_KERNEL);
+ eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
+ GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
- GFP_KERNEL);
+ eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
+ GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
- GFP_KERNEL);
+ eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
+ GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
return 0;
/* reserve one for zero packet */
- urb->sg = kmalloc((num_sgs + 1) * sizeof(struct scatterlist),
- GFP_ATOMIC);
+ urb->sg = kmalloc_array(num_sgs + 1, sizeof(struct scatterlist),
+ GFP_ATOMIC);
if (!urb->sg)
return -ENOMEM;
vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
if (!vqs)
goto err_vq;
- callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
+ callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
if (!callbacks)
goto err_callback;
- names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
+ names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
if (!names)
goto err_names;
if (!vi->big_packets || vi->mergeable_rx_bufs) {
* ah->ah_rf_banks based on ah->ah_rf_banks_size
* we set above */
if (ah->ah_rf_banks == NULL) {
- ah->ah_rf_banks = kmalloc(sizeof(u32) * ah->ah_rf_banks_size,
+ ah->ah_rf_banks = kmalloc_array(ah->ah_rf_banks_size,
+ sizeof(u32),
GFP_KERNEL);
if (ah->ah_rf_banks == NULL) {
ATH5K_ERR(ah, "out of memory\n");
memset(caldata->pa_table[chain], 0, sizeof(caldata->pa_table[chain]));
- buf = kmalloc(2 * 48 * sizeof(u32), GFP_KERNEL);
+ buf = kmalloc_array(2 * 48, sizeof(u32), GFP_KERNEL);
if (!buf)
return -ENOMEM;
u32 *tmp_reg_list, *tmp_data;
int i;
- tmp_reg_list = kmalloc(size * sizeof(u32), GFP_KERNEL);
+ tmp_reg_list = kmalloc_array(size, sizeof(u32), GFP_KERNEL);
if (!tmp_reg_list) {
dev_err(ah->dev, "%s: tmp_reg_list: alloc filed\n", __func__);
return;
}
- tmp_data = kmalloc(size * sizeof(u32), GFP_KERNEL);
+ tmp_data = kmalloc_array(size, sizeof(u32), GFP_KERNEL);
if (!tmp_data) {
dev_err(ah->dev, "%s tmp_data: alloc filed\n", __func__);
goto error_tmp_data;
s16 *ptr;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- ptr = kmalloc(sizeof(s16) * 131, GFP_ATOMIC);
+ ptr = kmalloc_array(131, sizeof(s16), GFP_ATOMIC);
if (NULL == ptr)
return false;
if (module == 2) {
u16 *values_to_save;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- values_to_save = kmalloc(sizeof(u16) * 20, GFP_ATOMIC);
+ values_to_save = kmalloc_array(20, sizeof(u16), GFP_ATOMIC);
if (NULL == values_to_save)
return;
u16 *phy_c32;
phy_c21 = 0;
phy_c10 = phy_c13 = phy_c14 = phy_c8 = 0;
- ptr = kmalloc(sizeof(s16) * 131, GFP_ATOMIC);
+ ptr = kmalloc_array(131, sizeof(s16), GFP_ATOMIC);
if (NULL == ptr)
return;
- phy_c32 = kmalloc(sizeof(u16) * 20, GFP_ATOMIC);
+ phy_c32 = kmalloc_array(20, sizeof(u16), GFP_ATOMIC);
if (NULL == phy_c32) {
kfree(ptr);
return;
u16 t;
u32 *data_buf = NULL;
- data_buf = kmalloc(sizeof(u32) * num_samps, GFP_ATOMIC);
+ data_buf = kmalloc_array(num_samps, sizeof(u32), GFP_ATOMIC);
if (data_buf == NULL)
return;
tbl_len = (phy_bw << 1);
}
- tone_buf = kmalloc(sizeof(struct cordic_iq) * tbl_len, GFP_ATOMIC);
+ tone_buf = kmalloc_array(tbl_len, sizeof(struct cordic_iq),
+ GFP_ATOMIC);
if (tone_buf == NULL)
return 0;
int i;
int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
- qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
+ qual = kmalloc_array(IW_MAX_AP, sizeof(*qual), GFP_KERNEL);
if (!qual)
return -ENOMEM;
dma_addr_t p;
priv->msg_buffers =
- kmalloc(IPW_COMMAND_POOL_SIZE * sizeof(struct ipw2100_tx_packet),
- GFP_KERNEL);
+ kmalloc_array(IPW_COMMAND_POOL_SIZE,
+ sizeof(struct ipw2100_tx_packet),
+ GFP_KERNEL);
if (!priv->msg_buffers)
return -ENOMEM;
/*
* allocate packets
*/
- priv->rx_buffers = kmalloc(RX_QUEUE_LENGTH *
- sizeof(struct ipw2100_rx_packet),
- GFP_KERNEL);
+ priv->rx_buffers = kmalloc_array(RX_QUEUE_LENGTH,
+ sizeof(struct ipw2100_rx_packet),
+ GFP_KERNEL);
if (!priv->rx_buffers) {
IPW_DEBUG_INFO("can't allocate rx packet buffer table\n");
IPW_DEBUG_TRACE("<< :\n");
- virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
- GFP_KERNEL);
+ virts = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(void *),
+ GFP_KERNEL);
if (!virts)
return -ENOMEM;
- phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL,
- GFP_KERNEL);
+ phys = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(dma_addr_t),
+ GFP_KERNEL);
if (!phys) {
kfree(virts);
return -ENOMEM;
{
struct pci_dev *dev = priv->pci_dev;
- q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
+ q->txb = kmalloc_array(count, sizeof(q->txb[0]), GFP_KERNEL);
if (!q->txb) {
IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
return -ENOMEM;
left -= 4;
new_count = left / sizeof(struct hfa384x_scan_result);
- results = kmalloc(new_count * sizeof(struct hfa384x_hostscan_result),
- GFP_ATOMIC);
+ results = kmalloc_array(new_count,
+ sizeof(struct hfa384x_hostscan_result),
+ GFP_ATOMIC);
if (results == NULL)
return;
return -EOPNOTSUPP;
}
- addr = kmalloc(sizeof(struct sockaddr) * IW_MAX_AP, GFP_KERNEL);
- qual = kmalloc(sizeof(struct iw_quality) * IW_MAX_AP, GFP_KERNEL);
+ addr = kmalloc_array(IW_MAX_AP, sizeof(struct sockaddr), GFP_KERNEL);
+ qual = kmalloc_array(IW_MAX_AP, sizeof(struct iw_quality), GFP_KERNEL);
if (addr == NULL || qual == NULL) {
kfree(addr);
kfree(qual);
/* Alloc memory for full beacon write at once. */
num_cmds = 1 + zd_chip_is_zd1211b(&mac->chip) + full_len;
- ioreqs = kmalloc(num_cmds * sizeof(struct zd_ioreq32), GFP_KERNEL);
+ ioreqs = kmalloc_array(num_cmds, sizeof(struct zd_ioreq32),
+ GFP_KERNEL);
if (!ioreqs) {
r = -ENOMEM;
goto out_nofree;
u_char *tuplebuffer;
u_char *tempbuffer;
- tuplebuffer = kmalloc(sizeof(u_char) * 256, GFP_KERNEL);
+ tuplebuffer = kmalloc_array(256, sizeof(u_char), GFP_KERNEL);
if (!tuplebuffer)
return -ENOMEM;
- tempbuffer = kmalloc(sizeof(u_char) * 258, GFP_KERNEL);
+ tempbuffer = kmalloc_array(258, sizeof(u_char), GFP_KERNEL);
if (!tempbuffer) {
ret = -ENOMEM;
goto free_tuple;
map_num++;
}
- new_map = kmalloc(sizeof(struct pinctrl_map) * map_num, GFP_KERNEL);
+ new_map = kmalloc_array(map_num, sizeof(struct pinctrl_map),
+ GFP_KERNEL);
if (!new_map)
return -ENOMEM;
for (i = 0; i < grp->npins; i++)
map_num++;
- new_map = kmalloc(sizeof(struct pinctrl_map) * map_num, GFP_KERNEL);
+ new_map = kmalloc_array(map_num, sizeof(struct pinctrl_map),
+ GFP_KERNEL);
if (!new_map)
return -ENOMEM;
* any configuration.
*/
nmaps = npins * 2;
- *map = kmalloc(nmaps * sizeof(struct pinctrl_map), GFP_KERNEL);
+ *map = kmalloc_array(nmaps, sizeof(struct pinctrl_map), GFP_KERNEL);
if (!*map)
return -ENOMEM;
return -EINVAL;
}
eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
- eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *),
- GFP_KERNEL);
+ eerb->buffer = kmalloc_array(eerb->buffer_page_count, sizeof(char *),
+ GFP_KERNEL);
if (!eerb->buffer) {
kfree(eerb);
return -ENOMEM;
if (!tp)
goto out_err;
tp->freemem_pages =
- kmalloc(sizeof(void *) * TTY3270_STRING_PAGES, GFP_KERNEL);
+ kmalloc_array(TTY3270_STRING_PAGES, sizeof(void *),
+ GFP_KERNEL);
if (!tp->freemem_pages)
goto out_tp;
INIT_LIST_HEAD(&tp->freemem);
return -EINVAL;
/* fetch status of all crypto cards */
- device_status = kmalloc(MAX_ZDEV_ENTRIES_EXT
- * sizeof(struct zcrypt_device_status_ext),
- GFP_KERNEL);
+ device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask_ext(device_status);
if (aac_convert_sgl == 0)
return 0;
- sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC);
+ sge = kmalloc_array(nseg_new, sizeof(struct sge_ieee1212), GFP_ATOMIC);
if (sge == NULL)
return -ENOMEM;
#endif
if (bufflen) { /* allocate memory before taking host_lock */
sg_count = scsi_sg_count(cmd);
- cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA);
+ cptr = kmalloc_array(sg_count, sizeof(*cptr),
+ GFP_KERNEL | GFP_DMA);
if (!cptr)
return SCSI_MLQUEUE_HOST_BUSY;
} else {
AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
ahd->stack_size = ahd_probe_stack_size(ahd);
- ahd->saved_stack = kmalloc(ahd->stack_size * sizeof(uint16_t), GFP_ATOMIC);
+ ahd->saved_stack = kmalloc_array(ahd->stack_size, sizeof(uint16_t),
+ GFP_ATOMIC);
if (ahd->saved_stack == NULL)
return (ENOMEM);
struct asd_seq_data *seq = &asd_ha->seq;
int i;
- seq->edb_arr = kmalloc(seq->num_edbs*sizeof(*seq->edb_arr), gfp_flags);
+ seq->edb_arr = kmalloc_array(seq->num_edbs, sizeof(*seq->edb_arr),
+ gfp_flags);
if (!seq->edb_arr)
return -ENOMEM;
struct asd_ascb *escb;
int i, escbs;
- seq->escb_arr = kmalloc(seq->num_escbs*sizeof(*seq->escb_arr),
- gfp_flags);
+ seq->escb_arr = kmalloc_array(seq->num_escbs, sizeof(*seq->escb_arr),
+ gfp_flags);
if (!seq->escb_arr)
return -ENOMEM;
* need to keep free lists or allocate this
* memory.
*/
- queue->alloc = q = kmalloc(sizeof(QE_t) * nqueues, GFP_KERNEL);
+ queue->alloc = q = kmalloc_array(nqueues, sizeof(QE_t), GFP_KERNEL);
if (q) {
for (; nqueues; q++, nqueues--) {
SET_MAGIC(q, QUEUE_MAGIC_FREE);
return -ENOMEM;
}
- mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
- GFP_KERNEL);
+ mem_arr_orig = kmalloc_array(BEISCSI_MAX_FRAGS_INIT,
+ sizeof(*mem_arr_orig),
+ GFP_KERNEL);
if (!mem_arr_orig) {
kfree(phba->init_mem);
kfree(phwi_ctrlr->wrb_context);
} while (alloc_size);
mem_descr->num_elements = j;
mem_descr->size_in_bytes = phba->mem_req[i];
- mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
- GFP_KERNEL);
+ mem_descr->mem_array = kmalloc_array(j, sizeof(*mem_arr),
+ GFP_KERNEL);
if (!mem_descr->mem_array)
goto free_mem;
idx = 0;
mem_descr = phba->init_mem;
mem_descr += HWI_MEM_WRB;
- pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
- GFP_KERNEL);
+ pwrb_arr = kmalloc_array(phba->params.cxns_per_ctrl,
+ sizeof(*pwrb_arr),
+ GFP_KERNEL);
if (!pwrb_arr) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Memory alloc failed in create wrb ring.\n");
*/
num_vlink_desc = rlen / sizeof(*vp);
if (num_vlink_desc)
- vlink_desc_arr = kmalloc(sizeof(vp) * num_vlink_desc,
- GFP_ATOMIC);
+ vlink_desc_arr = kmalloc_array(num_vlink_desc, sizeof(vp),
+ GFP_ATOMIC);
if (!vlink_desc_arr)
return;
num_vlink_desc = 0;
return -ENOMEM;
for (i = 0; i < h->nr_cmds; i++) {
h->ioaccel2_cmd_sg_list[i] =
- kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
- h->maxsgentries, GFP_KERNEL);
+ kmalloc_array(h->maxsgentries,
+ sizeof(*h->ioaccel2_cmd_sg_list[i]),
+ GFP_KERNEL);
if (!h->ioaccel2_cmd_sg_list[i])
goto clean;
}
return -ENOMEM;
for (i = 0; i < h->nr_cmds; i++) {
- h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
- h->chainsize, GFP_KERNEL);
+ h->cmd_sg_list[i] = kmalloc_array(h->chainsize,
+ sizeof(*h->cmd_sg_list[i]),
+ GFP_KERNEL);
if (!h->cmd_sg_list[i])
goto clean;
status = -ENOMEM;
goto cleanup1;
}
- buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
+ buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL);
if (!buff_size) {
status = -ENOMEM;
goto cleanup1;
char *driver_ver, *old_driver_ver;
int rc, size = sizeof(cfgtable->driver_version);
- old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
+ old_driver_ver = kmalloc_array(2, size, GFP_KERNEL);
if (!old_driver_ver)
return -ENOMEM;
driver_ver = old_driver_ver + size;
if (!phba->lpfc_mbuf_pool)
goto fail_free_dma_buf_pool;
- pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) *
- LPFC_MBUF_POOL_SIZE, GFP_KERNEL);
+ pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE,
+ sizeof(struct lpfc_dmabuf),
+ GFP_KERNEL);
if (!pool->elements)
goto fail_free_lpfc_mbuf_pool;
* +1 to allow for aligning.
* XXX FIXME: Use DMA consistent routines
*/
- dma_cmd_space = kmalloc((host->sg_tablesize + 2) *
- sizeof(struct dbdma_cmd), GFP_KERNEL);
+ dma_cmd_space = kmalloc_array(host->sg_tablesize + 2,
+ sizeof(struct dbdma_cmd),
+ GFP_KERNEL);
if (dma_cmd_space == 0) {
printk(KERN_ERR "mac53c94: couldn't allocate dma "
"command space for %pOF\n", node);
goto out_host_put;
}
- adapter->scb_list = kmalloc(sizeof(scb_t) * MAX_COMMANDS, GFP_KERNEL);
+ adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t),
+ GFP_KERNEL);
if (!adapter->scb_list) {
dev_warn(&pdev->dev, "out of RAM\n");
goto out_free_cmd_buffer;
* Allocate single blocks of memory for all required kiocs,
* mailboxes and passthru structures.
*/
- adapter->kioc_list = kmalloc(sizeof(uioc_t) * lld_adp->max_kioc,
- GFP_KERNEL);
- adapter->mbox_list = kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc,
- GFP_KERNEL);
+ adapter->kioc_list = kmalloc_array(lld_adp->max_kioc,
+ sizeof(uioc_t),
+ GFP_KERNEL);
+ adapter->mbox_list = kmalloc_array(lld_adp->max_kioc,
+ sizeof(mbox64_t),
+ GFP_KERNEL);
adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool",
&adapter->pdev->dev,
sizeof(mraid_passthru_t),
/* if this is the first attach, build the infrastructure */
write_lock(&os_scsi_tapes_lock);
if (os_scsi_tapes == NULL) {
- os_scsi_tapes = kmalloc(osst_max_dev * sizeof(struct osst_tape *), GFP_ATOMIC);
+ os_scsi_tapes = kmalloc_array(osst_max_dev,
+ sizeof(struct osst_tape *),
+ GFP_ATOMIC);
if (os_scsi_tapes == NULL) {
write_unlock(&os_scsi_tapes_lock);
printk(KERN_ERR "osst :E: Unable to allocate array for OnStream SCSI tapes.\n");
ql_log(ql_log_info, vha, 0x0072,
"%d CRB init values found in ROM.\n", n);
- buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
+ buf = kmalloc_array(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
if (buf == NULL) {
ql_log(ql_log_fatal, vha, 0x010c,
"Unable to allocate memory.\n");
ql4_printk(KERN_INFO, ha,
"%s: %d CRB init values found in ROM.\n", DRIVER_NAME, n);
- buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
+ buf = kmalloc_array(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
if (buf == NULL) {
ql4_printk(KERN_WARNING, ha,
"%s: [ERROR] Unable to malloc memory.\n", DRIVER_NAME);
num_new_devices = num_physicals + num_logicals;
- new_device_list = kmalloc(sizeof(*new_device_list) *
- num_new_devices, GFP_KERNEL);
+ new_device_list = kmalloc_array(num_new_devices,
+ sizeof(*new_device_list),
+ GFP_KERNEL);
if (!new_device_list) {
dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
rc = -ENOMEM;
if (count == 0)
return 0;
- if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_KERNEL)) == NULL)
+ pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL);
+ if (pages == NULL)
return -ENOMEM;
/* Try to fault in all of the necessary pages */
struct irq_affinity desc = { .pre_vectors = 2 };
num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
- vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
- callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
- names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
+ vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL);
+ callbacks = kmalloc_array(num_vqs, sizeof(vq_callback_t *),
+ GFP_KERNEL);
+ names = kmalloc_array(num_vqs, sizeof(char *), GFP_KERNEL);
if (!callbacks || !vqs || !names) {
err = -ENOMEM;
qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
- portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+ portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL);
if (!portal->cgrs)
goto fail_cgrs;
/* initial snapshot is no-depletion */
/* used to be BUZ_MAX_WIDTH/HEIGHT, but that gives overflows
* on norm-change! */
fh->overlay_mask =
- kmalloc(((768 + 31) / 32) * 576 * 4, GFP_KERNEL);
+ kmalloc(array3_size((768 + 31) / 32, 576, 4), GFP_KERNEL);
if (!fh->overlay_mask) {
dprintk(1,
KERN_ERR
bool bMatchWinStart = false, bPktInBuf = false;
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): Seq is %d,pTS->RxIndicateSeq is %d, WinSize is %d\n",__func__,SeqNum,pTS->RxIndicateSeq,WinSize);
- prxbIndicateArray = kmalloc(sizeof(struct ieee80211_rxb *) *
- REORDER_WIN_SIZE, GFP_KERNEL);
+ prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE,
+ sizeof(struct ieee80211_rxb *),
+ GFP_KERNEL);
if (!prxbIndicateArray)
return;
{
struct r8192_priv *priv = ieee80211_priv(dev);
- priv->rx_urb = kmalloc(sizeof(struct urb *) * (MAX_RX_URB + 1),
- GFP_KERNEL);
+ priv->rx_urb = kmalloc_array(MAX_RX_URB + 1, sizeof(struct urb *),
+ GFP_KERNEL);
if (!priv->rx_urb)
return -ENOMEM;
{
int i;
- hvcs_index_list = kmalloc(n * sizeof(hvcs_index_count),GFP_KERNEL);
+ hvcs_index_list = kmalloc_array(n, sizeof(hvcs_index_count),
+ GFP_KERNEL);
if (!hvcs_index_list)
return -ENOMEM;
hvcs_index_count = n;
goto errrelfw;
}
- data = kmalloc(word_count * 2, GFP_KERNEL);
+ data = kmalloc_array(word_count, 2, GFP_KERNEL);
if (data == NULL) {
dev_err(&pdev->dev, "Card%d, firmware upload "
"failed, not enough memory\n", index + 1);
if (!atmel_use_pdc_rx(&atmel_port->uart)) {
ret = -ENOMEM;
- data = kmalloc(sizeof(struct atmel_uart_char)
- * ATMEL_SERIAL_RINGSIZE, GFP_KERNEL);
+ data = kmalloc_array(ATMEL_SERIAL_RINGSIZE,
+ sizeof(struct atmel_uart_char),
+ GFP_KERNEL);
if (!data)
goto err_alloc_ring;
atmel_port->rx_ring.buf = data;
q = p->inverse_trans_unicode;
if (!q) {
q = p->inverse_trans_unicode =
- kmalloc(MAX_GLYPH * sizeof(u16), GFP_KERNEL);
+ kmalloc_array(MAX_GLYPH, sizeof(u16), GFP_KERNEL);
if (!q)
return;
}
p1 = p->uni_pgdir[n = unicode >> 11];
if (!p1) {
- p1 = p->uni_pgdir[n] = kmalloc(32*sizeof(u16 *), GFP_KERNEL);
+ p1 = p->uni_pgdir[n] = kmalloc_array(32, sizeof(u16 *),
+ GFP_KERNEL);
if (!p1) return -ENOMEM;
for (i = 0; i < 32; i++)
p1[i] = NULL;
p2 = p1[n = (unicode >> 6) & 0x1f];
if (!p2) {
- p2 = p1[n] = kmalloc(64*sizeof(u16), GFP_KERNEL);
+ p2 = p1[n] = kmalloc_array(64, sizeof(u16), GFP_KERNEL);
if (!p2) return -ENOMEM;
memset(p2, 0xff, 64*sizeof(u16)); /* No glyphs for the characters (yet) */
}
struct kbdiacr *dia;
int i;
- dia = kmalloc(MAX_DIACR * sizeof(struct kbdiacr),
+ dia = kmalloc_array(MAX_DIACR, sizeof(struct kbdiacr),
GFP_KERNEL);
if (!dia)
return -ENOMEM;
struct kbdiacrsuc __user *a = udp;
void *buf;
- buf = kmalloc(MAX_DIACR * sizeof(struct kbdiacruc),
+ buf = kmalloc_array(MAX_DIACR, sizeof(struct kbdiacruc),
GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
/* Allocate a new buffer before freeing the old one ... */
multiplier = use_unicode ? 3 : 1; /* chars can take up to 3 bytes */
- bp = kmalloc(((sel_end-sel_start)/2+1)*multiplier, GFP_KERNEL);
+ bp = kmalloc_array((sel_end - sel_start) / 2 + 1, multiplier,
+ GFP_KERNEL);
if (!bp) {
printk(KERN_WARNING "selection: kmalloc() failed\n");
clear_selection();
if (num_streams_ret && (num_streams < 2 || num_streams > 65536))
return -EINVAL;
- eps = kmalloc(num_eps * sizeof(*eps), GFP_KERNEL);
+ eps = kmalloc_array(num_eps, sizeof(*eps), GFP_KERNEL);
if (!eps)
return -ENOMEM;
as->mem_usage = u;
if (num_sgs) {
- as->urb->sg = kmalloc(num_sgs * sizeof(struct scatterlist),
- GFP_KERNEL);
+ as->urb->sg = kmalloc_array(num_sgs,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
if (!as->urb->sg) {
ret = -ENOMEM;
goto error;
}
/* initialize all the urbs we'll use */
- io->urbs = kmalloc(io->entries * sizeof(*io->urbs), mem_flags);
+ io->urbs = kmalloc_array(io->entries, sizeof(*io->urbs), mem_flags);
if (!io->urbs)
goto nomem;
n = nintf = 0;
if (cp) {
nintf = cp->desc.bNumInterfaces;
- new_interfaces = kmalloc(nintf * sizeof(*new_interfaces),
- GFP_NOIO);
+ new_interfaces = kmalloc_array(nintf, sizeof(*new_interfaces),
+ GFP_NOIO);
if (!new_interfaces)
return -ENOMEM;
goto err;
}
- buff = kmalloc(1028 * sizeof(*buff), GFP_KERNEL);
+ buff = kmalloc_array(1028, sizeof(*buff), GFP_KERNEL);
if (!buff) {
kfree(pkt);
err_for = "buffer";
char *next;
unsigned i;
- seen = kmalloc(DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC);
+ seen = kmalloc_array(DBG_SCHED_LIMIT, sizeof(*seen), GFP_ATOMIC);
if (!seen)
return 0;
seen_count = 0;
dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
- dev->ring_buffer = kmalloc(ring_buffer_size*(sizeof(size_t)+dev->interrupt_in_endpoint_size), GFP_KERNEL);
+ dev->ring_buffer =
+ kmalloc_array(ring_buffer_size,
+ sizeof(size_t) + dev->interrupt_in_endpoint_size,
+ GFP_KERNEL);
if (!dev->ring_buffer)
goto error;
dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
goto error;
dev->interrupt_out_endpoint_size = dev->interrupt_out_endpoint ? usb_endpoint_maxp(dev->interrupt_out_endpoint) :
udev->descriptor.bMaxPacketSize0;
- dev->interrupt_out_buffer = kmalloc(write_buffer_size*dev->interrupt_out_endpoint_size, GFP_KERNEL);
+ dev->interrupt_out_buffer =
+ kmalloc_array(write_buffer_size,
+ dev->interrupt_out_endpoint_size, GFP_KERNEL);
if (!dev->interrupt_out_buffer)
goto error;
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
int status;
u8 *buf;
- buf = kmalloc(sizeof(u8) * 4, GFP_KERNEL);
+ buf = kmalloc(4, GFP_KERNEL);
if (!buf)
return -ENOMEM;
unsigned int T1FrekvensHZ = 0;
dev_dbg(&port->dev, "%s - enter baud_base=%d\n", __func__, baud_base);
- dataout = kmalloc(sizeof(u8) * 5, GFP_KERNEL);
+ dataout = kmalloc(5, GFP_KERNEL);
if (!dataout)
return -ENOMEM;
* We also need a temporary block buffer, where we read in the old data,
* overwrite parts with the new data, and manipulate the redundancy data
*/
- blockbuffer = kmalloc((pagesize + 64) * blocksize, GFP_NOIO);
+ blockbuffer = kmalloc_array(pagesize + 64, blocksize, GFP_NOIO);
if (!blockbuffer) {
kfree(buffer);
return USB_STOR_TRANSPORT_ERROR;
u32 i;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
- info->MS_Lib.Phy2LogMap = kmalloc(info->MS_Lib.NumberOfPhyBlock * sizeof(u16), GFP_KERNEL);
- info->MS_Lib.Log2PhyMap = kmalloc(info->MS_Lib.NumberOfLogBlock * sizeof(u16), GFP_KERNEL);
+ info->MS_Lib.Phy2LogMap = kmalloc_array(info->MS_Lib.NumberOfPhyBlock,
+ sizeof(u16),
+ GFP_KERNEL);
+ info->MS_Lib.Log2PhyMap = kmalloc_array(info->MS_Lib.NumberOfLogBlock,
+ sizeof(u16),
+ GFP_KERNEL);
if ((info->MS_Lib.Phy2LogMap == NULL) || (info->MS_Lib.Log2PhyMap == NULL)) {
ms_lib_free_logicalmap(us);
info->MS_Lib.wrtblk = (u16)-1;
- info->MS_Lib.blkpag = kmalloc(info->MS_Lib.PagesPerBlock * info->MS_Lib.BytesPerSector, GFP_KERNEL);
- info->MS_Lib.blkext = kmalloc(info->MS_Lib.PagesPerBlock * sizeof(struct ms_lib_type_extdat), GFP_KERNEL);
+ info->MS_Lib.blkpag = kmalloc_array(info->MS_Lib.PagesPerBlock,
+ info->MS_Lib.BytesPerSector,
+ GFP_KERNEL);
+ info->MS_Lib.blkext = kmalloc_array(info->MS_Lib.PagesPerBlock,
+ sizeof(struct ms_lib_type_extdat),
+ GFP_KERNEL);
if ((info->MS_Lib.blkpag == NULL) || (info->MS_Lib.blkext == NULL)) {
ms_lib_free_writebuf(us);
kfree(info->lba_to_pba);
kfree(info->pba_to_lba);
- info->lba_to_pba = kmalloc(numblocks*sizeof(int), GFP_NOIO);
- info->pba_to_lba = kmalloc(numblocks*sizeof(int), GFP_NOIO);
+ info->lba_to_pba = kmalloc_array(numblocks, sizeof(int), GFP_NOIO);
+ info->pba_to_lba = kmalloc_array(numblocks, sizeof(int), GFP_NOIO);
if (info->lba_to_pba == NULL || info->pba_to_lba == NULL) {
printk(KERN_WARNING "sddr09_read_map: out of memory\n");
numblocks = info->capacity >> (info->blockshift + info->pageshift);
- buffer = kmalloc( numblocks * 2, GFP_NOIO );
+ buffer = kmalloc_array(numblocks, 2, GFP_NOIO );
if (!buffer)
return -1;
kfree(info->lba_to_pba);
kfree(info->pba_to_lba);
- info->lba_to_pba = kmalloc(numblocks*sizeof(int), GFP_NOIO);
- info->pba_to_lba = kmalloc(numblocks*sizeof(int), GFP_NOIO);
+ info->lba_to_pba = kmalloc_array(numblocks, sizeof(int), GFP_NOIO);
+ info->pba_to_lba = kmalloc_array(numblocks, sizeof(int), GFP_NOIO);
if (info->lba_to_pba == NULL || info->pba_to_lba == NULL) {
kfree(info->lba_to_pba);
int uwb_est_grow(void)
{
size_t actual_size = uwb_est_size * sizeof(uwb_est[0]);
- void *new = kmalloc(2 * actual_size, GFP_ATOMIC);
+ void *new = kmalloc_array(2, actual_size, GFP_ATOMIC);
if (new == NULL)
return -ENOMEM;
memcpy(new, uwb_est, actual_size);
i1480 = &i1480_usb->i1480;
i1480->buf_size = 512;
- i1480->cmd_buf = kmalloc(2 * i1480->buf_size, GFP_KERNEL);
+ i1480->cmd_buf = kmalloc_array(2, i1480->buf_size, GFP_KERNEL);
if (i1480->cmd_buf == NULL) {
dev_err(dev, "Cannot allocate transfer buffers\n");
result = -ENOMEM;
zcopy = vhost_net_zcopy_mask & (0x1 << i);
if (!zcopy)
continue;
- n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
- UIO_MAXIOV, GFP_KERNEL);
+ n->vqs[i].ubuf_info =
+ kmalloc_array(UIO_MAXIOV,
+ sizeof(*n->vqs[i].ubuf_info),
+ GFP_KERNEL);
if (!n->vqs[i].ubuf_info)
goto err;
}
n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!n)
return -ENOMEM;
- vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
+ vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
kvfree(n);
return -ENOMEM;
goto err_vs;
}
- vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
+ vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
if (!vqs)
goto err_vqs;
if (!n)
return -ENOMEM;
- vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
+ vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
kfree(n);
return -ENOMEM;
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
- vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV,
- GFP_KERNEL);
- vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL);
- vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL);
+ vq->indirect = kmalloc_array(UIO_MAXIOV,
+ sizeof(*vq->indirect),
+ GFP_KERNEL);
+ vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log),
+ GFP_KERNEL);
+ vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads),
+ GFP_KERNEL);
if (!vq->indirect || !vq->log || !vq->heads)
goto err_nomem;
}
if (flag)
new = krealloc(iov->iov, new_num * sizeof(struct iovec), gfp);
else {
- new = kmalloc(new_num * sizeof(struct iovec), gfp);
+ new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
if (new) {
memcpy(new, iov->iov,
iov->max_num * sizeof(struct iovec));
if (attribute) {
u8 *dst;
- dst = kmalloc(w * vc->vc_font.height, GFP_ATOMIC);
+ dst = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC);
if (!dst)
return;
kfree(ops->cursor_data);
vc->vc_cursor_type != ops->p->cursor_shape ||
ops->cursor_state.mask == NULL ||
ops->cursor_reset) {
- char *mask = kmalloc(w*vc->vc_font.height, GFP_ATOMIC);
+ char *mask = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC);
int cur_height, size, i = 0;
u8 msk = 0xff;
if (scr_readw(r) != vc->vc_video_erase_char)
break;
if (r != q && new_rows >= rows + logo_lines) {
- save = kmalloc(logo_lines * new_cols * 2, GFP_KERNEL);
+ save = kmalloc(array3_size(logo_lines, new_cols, 2),
+ GFP_KERNEL);
if (save) {
int i = cols < new_cols ? cols : new_cols;
scr_memsetw(save, erase, logo_lines * new_cols * 2);
if (attribute) {
u8 *dst;
- dst = kmalloc(w * vc->vc_font.width, GFP_ATOMIC);
+ dst = kmalloc_array(w, vc->vc_font.width, GFP_ATOMIC);
if (!dst)
return;
kfree(ops->cursor_data);
vc->vc_cursor_type != ops->p->cursor_shape ||
ops->cursor_state.mask == NULL ||
ops->cursor_reset) {
- char *tmp, *mask = kmalloc(w*vc->vc_font.width, GFP_ATOMIC);
+ char *tmp, *mask = kmalloc_array(w, vc->vc_font.width,
+ GFP_ATOMIC);
int cur_height, size, i = 0;
int width = (vc->vc_font.width + 7)/8;
if (!mask)
return;
- tmp = kmalloc(width * vc->vc_font.height, GFP_ATOMIC);
+ tmp = kmalloc_array(width, vc->vc_font.height, GFP_ATOMIC);
if (!tmp) {
kfree(mask);
if (attribute) {
u8 *dst;
- dst = kmalloc(w * vc->vc_font.width, GFP_ATOMIC);
+ dst = kmalloc_array(w, vc->vc_font.width, GFP_ATOMIC);
if (!dst)
return;
kfree(ops->cursor_data);
vc->vc_cursor_type != ops->p->cursor_shape ||
ops->cursor_state.mask == NULL ||
ops->cursor_reset) {
- char *tmp, *mask = kmalloc(w*vc->vc_font.width, GFP_ATOMIC);
+ char *tmp, *mask = kmalloc_array(w, vc->vc_font.width,
+ GFP_ATOMIC);
int cur_height, size, i = 0;
int width = (vc->vc_font.width + 7)/8;
if (!mask)
return;
- tmp = kmalloc(width * vc->vc_font.height, GFP_ATOMIC);
+ tmp = kmalloc_array(width, vc->vc_font.height, GFP_ATOMIC);
if (!tmp) {
kfree(mask);
info->fbops->fb_sync(info);
if (ops->fd_size < d_cellsize * len) {
- dst = kmalloc(d_cellsize * len, GFP_KERNEL);
+ dst = kmalloc_array(len, d_cellsize, GFP_KERNEL);
if (dst == NULL) {
err = -ENOMEM;
if (attribute) {
u8 *dst;
- dst = kmalloc(w * vc->vc_font.height, GFP_ATOMIC);
+ dst = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC);
if (!dst)
return;
kfree(ops->cursor_data);
vc->vc_cursor_type != ops->p->cursor_shape ||
ops->cursor_state.mask == NULL ||
ops->cursor_reset) {
- char *mask = kmalloc(w*vc->vc_font.height, GFP_ATOMIC);
+ char *mask = kmalloc_array(w, vc->vc_font.height, GFP_ATOMIC);
int cur_height, size, i = 0;
u8 msk = 0xff;
}
if (fb_logo.depth <= 4) {
- logo_new = kmalloc(logo->width * logo->height, GFP_KERNEL);
+ logo_new = kmalloc_array(logo->width, logo->height,
+ GFP_KERNEL);
if (logo_new == NULL) {
kfree(palette);
if (saved_pseudo_palette)
image.height = logo->height;
if (rotate) {
- logo_rotate = kmalloc(logo->width *
- logo->height, GFP_KERNEL);
+ logo_rotate = kmalloc_array(logo->width, logo->height,
+ GFP_KERNEL);
if (logo_rotate)
fb_rotate_logo(info, logo_rotate, &image, rotate);
}
}
*dbsize = num;
- m = kmalloc(num * sizeof(struct fb_videomode), GFP_KERNEL);
+ m = kmalloc_array(num, sizeof(struct fb_videomode), GFP_KERNEL);
if (!m)
return mode;
memmove(m, mode, num * sizeof(struct fb_videomode));
pr_debug("%s\n",__func__);
- info->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
+ info->pseudo_palette = kmalloc_array(16, sizeof(u32), GFP_KERNEL);
if (!info->pseudo_palette)
return -ENOMEM;
return;
}
- cmd = kmalloc(cmdlen * 4, GFP_DMA);
+ cmd = kmalloc_array(cmdlen, 4, GFP_DMA);
if (!cmd)
return cfb_imageblit(info, image);
cmdfn(cmd, step, dx, dy, width, height, fgcolor, bgcolor, image, info);
u8 *msk = (u8 *) cursor->mask;
u8 *src;
- src = kmalloc(s_pitch * cursor->image.height, GFP_ATOMIC);
+ src = kmalloc_array(s_pitch, cursor->image.height, GFP_ATOMIC);
if (src) {
switch (cursor->rop) {
nr_pages = (count + PAGE_SIZE - 1) >> PAGE_SHIFT;
- pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
+ pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages)
return -ENOMEM;
u8 *msk = (u8 *) cursor->mask;
u8 *src;
- src = kmalloc(s_pitch * cursor->image.height, GFP_ATOMIC);
+ src = kmalloc_array(s_pitch, cursor->image.height, GFP_ATOMIC);
if (src) {
switch (cursor->rop) {
break;
case VIAFB_GET_GAMMA_LUT:
- viafb_gamma_table = kmalloc(256 * sizeof(u32), GFP_KERNEL);
+ viafb_gamma_table = kmalloc_array(256, sizeof(u32),
+ GFP_KERNEL);
if (!viafb_gamma_table)
return -ENOMEM;
viafb_get_gamma_table(viafb_gamma_table);
goto out;
}
- info->pseudo_palette = kmalloc(sizeof (u32) * MAX_PALETTES, GFP_KERNEL);
+ info->pseudo_palette = kmalloc_array(MAX_PALETTES, sizeof(u32),
+ GFP_KERNEL);
if (!info->pseudo_palette) {
err = -ENOMEM;
goto out;
/* Add 4M so that we can align the vmap to 4MiB as the host requires. */
size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
- pages = kmalloc(sizeof(*pages) * (size >> PAGE_SHIFT), GFP_KERNEL);
+ pages = kmalloc_array(size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL);
if (!pages)
goto out;
struct page **pages;
int i, rc, ret;
- pages = kmalloc(sizeof(*pages) * VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
- GFP_KERNEL | __GFP_NOWARN);
+ pages = kmalloc_array(VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
+ sizeof(*pages),
+ GFP_KERNEL | __GFP_NOWARN);
if (!pages)
return -ENOMEM;
vp_dev->msix_vectors = nvectors;
- vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
- GFP_KERNEL);
+ vp_dev->msix_names = kmalloc_array(nvectors,
+ sizeof(*vp_dev->msix_names),
+ GFP_KERNEL);
if (!vp_dev->msix_names)
goto error;
vp_dev->msix_affinity_masks
*/
gfp &= ~__GFP_HIGHMEM;
- desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
+ desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
if (!desc)
return NULL;
/* No need for kzalloc as it is initialized in following hypercall
* GNTTABOP_setup_table.
*/
- frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
+ frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
if (!frames)
return -ENOMEM;
max_nr_glist_frames = (max_nr_grant_frames *
gnttab_interface->grefs_per_grant_frame / RPP);
- gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
- GFP_KERNEL);
+ gnttab_list = kmalloc_array(max_nr_glist_frames,
+ sizeof(grant_ref_t *),
+ GFP_KERNEL);
if (gnttab_list == NULL)
return -ENOMEM;
if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
return -ENXIO;
- entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
+ entries = kmalloc_array(op->value, sizeof(*entries), GFP_KERNEL);
if (entries == NULL)
return -ENOMEM;
for (ds = dentry; !IS_ROOT(ds); ds = ds->d_parent)
n++;
- wnames = kmalloc(sizeof(char *) * n, GFP_KERNEL);
+ wnames = kmalloc_array(n, sizeof(char *), GFP_KERNEL);
if (!wnames)
goto err_out;
asb->s_ids_per_zone = zone_size / (asb->s_idlen + 1);
- dm = kmalloc(nzones * sizeof(*dm), GFP_KERNEL);
+ dm = kmalloc_array(nzones, sizeof(*dm), GFP_KERNEL);
if (dm == NULL) {
adfs_error(sb, "not enough memory");
return ERR_PTR(-ENOMEM);
if (call->count > AFSCBMAX)
return afs_protocol_error(call, -EBADMSG);
- call->buffer = kmalloc(call->count * 3 * 4, GFP_KERNEL);
+ call->buffer = kmalloc(array3_size(call->count, 3, 4),
+ GFP_KERNEL);
if (!call->buffer)
return -ENOMEM;
call->offset = 0;
switch (call->unmarshall) {
case 0:
call->offset = 0;
- call->buffer = kmalloc(11 * sizeof(__be32), GFP_KERNEL);
+ call->buffer = kmalloc_array(11, sizeof(__be32), GFP_KERNEL);
if (!call->buffer)
return -ENOMEM;
call->unmarshall++;
switch (call->unmarshall) {
case 0:
call->offset = 0;
- call->buffer = kmalloc(11 * sizeof(__be32), GFP_KERNEL);
+ call->buffer = kmalloc_array(11, sizeof(__be32), GFP_KERNEL);
if (!call->buffer)
return -ENOMEM;
call->unmarshall++;
INIT_LIST_HEAD(&info->thread_list);
/* Allocate space for ELF notes */
- info->notes = kmalloc(8 * sizeof(struct memelfnote), GFP_KERNEL);
+ info->notes = kmalloc_array(8, sizeof(struct memelfnote), GFP_KERNEL);
if (!info->notes)
return 0;
info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
if (!psinfo)
goto cleanup;
- notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
+ notes = kmalloc_array(NUM_NOTES, sizeof(struct memelfnote),
+ GFP_KERNEL);
if (!notes)
goto cleanup;
fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
if (nr_pages <= DIO_INLINE_BIO_VECS)
vecs = inline_vecs;
else {
- vecs = kmalloc(nr_pages * sizeof(struct bio_vec), GFP_KERNEL);
+ vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
+ GFP_KERNEL);
if (!vecs)
return -ENOMEM;
}
/* build page vector */
nr_pages = calc_pages_for(0, len);
- pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL);
+ pages = kmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out_put;
BUG_ON(pages);
max_pages = calc_pages_for(0, (u64)len);
- pages = kmalloc(max_pages * sizeof (*pages),
- GFP_NOFS);
+ pages = kmalloc_array(max_pages,
+ sizeof(*pages),
+ GFP_NOFS);
if (!pages) {
pool = fsc->wb_pagevec_pool;
pages = mempool_alloc(pool, GFP_NOFS);
/* allocate new pages array for next request */
data_pages = pages;
- pages = kmalloc(locked_pages * sizeof (*pages),
- GFP_NOFS);
+ pages = kmalloc_array(locked_pages, sizeof(*pages),
+ GFP_NOFS);
if (!pages) {
pool = fsc->wb_pagevec_pool;
pages = mempool_alloc(pool, GFP_NOFS);
num_flock_locks = 0;
}
if (num_fcntl_locks + num_flock_locks > 0) {
- flocks = kmalloc((num_fcntl_locks + num_flock_locks) *
- sizeof(struct ceph_filelock), GFP_NOFS);
+ flocks = kmalloc_array(num_fcntl_locks + num_flock_locks,
+ sizeof(struct ceph_filelock),
+ GFP_NOFS);
if (!flocks) {
err = -ENOMEM;
goto out_free;
if (size < 2 || size > UINT_MAX/sizeof(unsigned long))
return 0;
- *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
+ *oid = kmalloc_array(size, sizeof(unsigned long), GFP_ATOMIC);
if (*oid == NULL)
return 0;
if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
return;
- ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
- GFP_KERNEL);
+ ppace = kmalloc_array(num_aces, sizeof(struct cifs_ace *),
+ GFP_KERNEL);
if (!ppace)
return;
* with unix extensions enabled.
*/
info_buf_source =
- kmalloc(2 * sizeof(FILE_UNIX_BASIC_INFO),
+ kmalloc_array(2, sizeof(FILE_UNIX_BASIC_INFO),
GFP_KERNEL);
if (info_buf_source == NULL) {
rc = -ENOMEM;
if (!num)
return -EINVAL;
- iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL);
+ iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
if (!iov)
return -ENOMEM;
int rc;
int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
- data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
+ data = kmalloc_array(2, sizeof(void *), GFP_KERNEL);
if (!data)
return -ENOMEM;
int rc;
int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
- data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
+ data = kmalloc_array(2, sizeof(void *), GFP_KERNEL);
if (!data)
return -ENOMEM;
int rc;
if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
- new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
- GFP_KERNEL);
+ new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
+ GFP_KERNEL);
if (!new_iov) {
/* otherwise cifs_send_recv below sets resp_buf_type */
*resp_buf_type = CIFS_NO_BUFFER;
__be32 rfc1002_marker;
if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
- new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
- GFP_KERNEL);
+ new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
+ GFP_KERNEL);
if (!new_iov)
return -ENOMEM;
} else
pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
for (; pages; pages >>= 1) {
- pcol->pages = kmalloc(pages * sizeof(struct page *),
- GFP_KERNEL);
+ pcol->pages = kmalloc_array(pages, sizeof(struct page *),
+ GFP_KERNEL);
if (likely(pcol->pages)) {
pcol->alloc_pages = pages;
return 0;
/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
EXT2_DESC_PER_BLOCK(sb);
- sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
+ sbi->s_group_desc = kmalloc_array (db_count,
+ sizeof(struct buffer_head *),
+ GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
ext2_msg(sb, KERN_ERR, "error: not enough memory");
goto failed_mount;
goto out2;
flex_gd->count = flexbg_size;
- flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) *
- flexbg_size, GFP_NOFS);
+ flex_gd->groups = kmalloc_array(flexbg_size,
+ sizeof(struct ext4_new_group_data),
+ GFP_NOFS);
if (flex_gd->groups == NULL)
goto out2;
- flex_gd->bg_flags = kmalloc(flexbg_size * sizeof(__u16), GFP_NOFS);
+ flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16),
+ GFP_NOFS);
if (flex_gd->bg_flags == NULL)
goto out1;
int res, i;
int err;
- primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS);
+ primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
if (!primary)
return -ENOMEM;
if (len == 0)
return -ENOENT;
- slots = kmalloc(sizeof(*slots) * MSDOS_SLOTS, GFP_NOFS);
+ slots = kmalloc_array(MSDOS_SLOTS, sizeof(*slots), GFP_NOFS);
if (slots == NULL)
return -ENOMEM;
pages = req->inline_pages;
page_descs = req->inline_page_descs;
} else {
- pages = kmalloc(sizeof(struct page *) * npages, flags);
- page_descs = kmalloc(sizeof(struct fuse_page_desc) *
- npages, flags);
+ pages = kmalloc_array(npages, sizeof(struct page *),
+ flags);
+ page_descs =
+ kmalloc_array(npages,
+ sizeof(struct fuse_page_desc),
+ flags);
}
if (!pages || !page_descs) {
if (!fud)
return -EPERM;
- bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
+ bufs = kmalloc_array(pipe->buffers, sizeof(struct pipe_buffer),
+ GFP_KERNEL);
if (!bufs)
return -ENOMEM;
if (!fud)
return -EPERM;
- bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
+ bufs = kmalloc_array(pipe->buffers, sizeof(struct pipe_buffer),
+ GFP_KERNEL);
if (!bufs)
return -ENOMEM;
/* Change the pointers.
Don't bother distinguishing stuffed from non-stuffed.
This code is complicated enough already. */
- lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS);
+ lp = kmalloc_array(half_len, sizeof(__be64), GFP_NOFS);
if (!lp) {
error = -ENOMEM;
goto fail_brelse;
if (IS_ERR(hc))
return PTR_ERR(hc);
- hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS | __GFP_NOWARN);
+ hc2 = kmalloc_array(hsize_bytes, 2, GFP_NOFS | __GFP_NOWARN);
if (hc2 == NULL)
hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL);
error = -ENOMEM;
/* 96 is max number of dirents which can be stuffed into an inode */
- darr = kmalloc(96 * sizeof(struct gfs2_dirent *), GFP_NOFS);
+ darr = kmalloc_array(96, sizeof(struct gfs2_dirent *), GFP_NOFS);
if (darr) {
g.pdent = (const struct gfs2_dirent **)darr;
g.offset = 0;
default:
if (num_gh <= 4)
break;
- pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
+ pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
+ GFP_NOFS);
if (!pph)
return -ENOMEM;
}
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks);
- ghs = kmalloc(num_qd * sizeof(struct gfs2_holder), GFP_NOFS);
+ ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
if (!ghs)
return -ENOMEM;
{
unsigned int x;
- rlist->rl_ghs = kmalloc(rlist->rl_rgrps * sizeof(struct gfs2_holder),
- GFP_NOFS | __GFP_NOFAIL);
+ rlist->rl_ghs = kmalloc_array(rlist->rl_rgrps,
+ sizeof(struct gfs2_holder),
+ GFP_NOFS | __GFP_NOFAIL);
for (x = 0; x < rlist->rl_rgrps; x++)
gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
state, 0,
int error = 0, err;
memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
- gha = kmalloc(slots * sizeof(struct gfs2_holder), GFP_KERNEL);
+ gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
if (!gha)
return -ENOMEM;
for (x = 0; x < slots; x++)
if (hpfs_inode->i_rddir_off[i] == pos)
return 0;
if (!(i&0x0f)) {
- if (!(ppos = kmalloc((i+0x11) * sizeof(loff_t*), GFP_NOFS))) {
+ ppos = kmalloc_array(i + 0x11, sizeof(loff_t *), GFP_NOFS);
+ if (!ppos) {
pr_err("out of memory for position list\n");
return -ENOMEM;
}
int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21;
int i;
__le32 *b;
- if (!(b = kmalloc(n * 512, GFP_KERNEL))) {
+ if (!(b = kmalloc_array(n, 512, GFP_KERNEL))) {
pr_err("can't allocate memory for bitmap directory\n");
return NULL;
}
table->hash_size = hash_size;
table->hash_shift = shift;
table->hash_table =
- kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
+ kmalloc_array(hash_size, sizeof(struct list_head), GFP_KERNEL);
if (!table->hash_table) {
kmem_cache_free(jbd2_revoke_table_cache, table);
table = NULL;
if (!c->wbuf)
return -ENOMEM;
- c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
+ c->oobbuf = kmalloc_array(NR_OOB_SCAN_PAGES, c->oobavail, GFP_KERNEL);
if (!c->oobbuf) {
kfree(c->wbuf);
return -ENOMEM;
max_ranges = nblocks;
do_div(max_ranges, minlen);
range_cnt = min_t(u64, max_ranges + 1, 32 * 1024);
- totrim = kmalloc(sizeof(struct range2trim) * range_cnt, GFP_NOFS);
+ totrim = kmalloc_array(range_cnt, sizeof(struct range2trim), GFP_NOFS);
if (totrim == NULL) {
jfs_error(bmp->db_ipbmap->i_sb, "no memory for trim array\n");
IWRITE_UNLOCK(ipbmap);
struct component_name ciKey;
struct super_block *sb = ip->i_sb;
- ciKey.name = kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t), GFP_NOFS);
+ ciKey.name = kmalloc_array(JFS_NAME_MAX + 1, sizeof(wchar_t),
+ GFP_NOFS);
if (!ciKey.name) {
rc = -ENOMEM;
goto dtSearch_Exit2;
smp = split->mp;
sp = DT_PAGE(ip, smp);
- key.name = kmalloc((JFS_NAME_MAX + 2) * sizeof(wchar_t), GFP_NOFS);
+ key.name = kmalloc_array(JFS_NAME_MAX + 2, sizeof(wchar_t), GFP_NOFS);
if (!key.name) {
DT_PUTPAGE(smp);
rc = -ENOMEM;
struct component_name lkey;
struct component_name rkey;
- lkey.name = kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
+ lkey.name = kmalloc_array(JFS_NAME_MAX + 1, sizeof(wchar_t),
GFP_KERNEL);
if (lkey.name == NULL)
return -ENOMEM;
- rkey.name = kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
+ rkey.name = kmalloc_array(JFS_NAME_MAX + 1, sizeof(wchar_t),
GFP_KERNEL);
if (rkey.name == NULL) {
kfree(lkey.name);
return -ENAMETOOLONG;
uniName->name =
- kmalloc((length + 1) * sizeof(wchar_t), GFP_NOFS);
+ kmalloc_array(length + 1, sizeof(wchar_t), GFP_NOFS);
if (uniName->name == NULL)
return -ENOMEM;
cache->c_max_entries = bucket_count << 4;
INIT_LIST_HEAD(&cache->c_list);
spin_lock_init(&cache->c_list_lock);
- cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head),
- GFP_KERNEL);
+ cache->c_hash = kmalloc_array(bucket_count,
+ sizeof(struct hlist_bl_head),
+ GFP_KERNEL);
if (!cache->c_hash) {
kfree(cache);
goto err_out;
struct saved *p;
if (nd->flags & LOOKUP_RCU) {
- p= kmalloc(MAXSYMLINKS * sizeof(struct saved),
+ p= kmalloc_array(MAXSYMLINKS, sizeof(struct saved),
GFP_ATOMIC);
if (unlikely(!p))
return -ECHILD;
} else {
- p= kmalloc(MAXSYMLINKS * sizeof(struct saved),
+ p= kmalloc_array(MAXSYMLINKS, sizeof(struct saved),
GFP_KERNEL);
if (unlikely(!p))
return -ENOMEM;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int i;
- nn->reclaim_str_hashtbl = kmalloc(sizeof(struct list_head) *
- CLIENT_HASH_SIZE, GFP_KERNEL);
+ nn->reclaim_str_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
+ sizeof(struct list_head),
+ GFP_KERNEL);
if (!nn->reclaim_str_hashtbl)
return -ENOMEM;
clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
if (clp->cl_name.data == NULL)
goto err_no_name;
- clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
- OWNER_HASH_SIZE, GFP_KERNEL);
+ clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
+ sizeof(struct list_head),
+ GFP_KERNEL);
if (!clp->cl_ownerstr_hashtbl)
goto err_no_hashtbl;
for (i = 0; i < OWNER_HASH_SIZE; i++)
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int i;
- nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
- CLIENT_HASH_SIZE, GFP_KERNEL);
+ nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
+ sizeof(struct list_head),
+ GFP_KERNEL);
if (!nn->conf_id_hashtbl)
goto err;
- nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
- CLIENT_HASH_SIZE, GFP_KERNEL);
+ nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
+ sizeof(struct list_head),
+ GFP_KERNEL);
if (!nn->unconf_id_hashtbl)
goto err_unconf_id;
- nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
- SESSION_HASH_SIZE, GFP_KERNEL);
+ nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
+ sizeof(struct list_head),
+ GFP_KERNEL);
if (!nn->sessionid_hashtbl)
goto err_sessionid;
BUG_ON(ni->type != AT_DATA);
BUG_ON(ni->name_len);
- pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS);
+ pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
/* Allocate memory to store the buffer heads we need. */
bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
o2net_set_nst_sock_container(&nst, sc);
veclen = caller_veclen + 1;
- vec = kmalloc(sizeof(struct kvec) * veclen, GFP_ATOMIC);
+ vec = kmalloc_array(veclen, sizeof(struct kvec), GFP_ATOMIC);
if (vec == NULL) {
mlog(0, "failed to %zu element kvec!\n", veclen);
ret = -ENOMEM;
static void **dlm_alloc_pagevec(int pages)
{
- void **vec = kmalloc(pages * sizeof(void *), GFP_KERNEL);
+ void **vec = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
int i;
if (!vec)
unsigned long *entries;
int err;
- entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
+ entries = kmalloc_array(MAX_STACK_TRACE_DEPTH, sizeof(*entries),
+ GFP_KERNEL);
if (!entries)
return -ENOMEM;
pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
- pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_KERNEL);
+ pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
ret = -ENOMEM;
if (!pm.buffer)
goto out_mm;
goto out;
}
if (nr_segs > fast_segs) {
- iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
+ iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
if (iov == NULL) {
ret = -ENOMEM;
goto out;
goto out;
if (nr_segs > fast_segs) {
ret = -ENOMEM;
- iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
+ iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
if (iov == NULL)
goto out;
}
* now we know we've got a good transaction, and it was
* inside the valid time ranges
*/
- log_blocks = kmalloc(get_desc_trans_len(desc) *
- sizeof(struct buffer_head *), GFP_NOFS);
- real_blocks = kmalloc(get_desc_trans_len(desc) *
- sizeof(struct buffer_head *), GFP_NOFS);
+ log_blocks = kmalloc_array(get_desc_trans_len(desc),
+ sizeof(struct buffer_head *),
+ GFP_NOFS);
+ real_blocks = kmalloc_array(get_desc_trans_len(desc),
+ sizeof(struct buffer_head *),
+ GFP_NOFS);
if (!log_blocks || !real_blocks) {
brelse(c_bh);
brelse(d_bh);
size = FDS_BYTES(n);
bits = stack_fds;
if (size > sizeof(stack_fds) / 6) {
- bits = kmalloc(6 * size, GFP_KERNEL);
+ bits = kmalloc_array(6, size, GFP_KERNEL);
ret = -ENOMEM;
if (!bits)
goto out_nofds;
if (buffers <= PIPE_DEF_BUFFERS)
return 0;
- spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
- spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
+ spd->pages = kmalloc_array(buffers, sizeof(struct page *), GFP_KERNEL);
+ spd->partial = kmalloc_array(buffers, sizeof(struct partial_page),
+ GFP_KERNEL);
if (spd->pages && spd->partial)
return 0;
vec = __vec;
if (nr_pages > PIPE_DEF_BUFFERS) {
- vec = kmalloc(nr_pages * sizeof(struct kvec), GFP_KERNEL);
+ vec = kmalloc_array(nr_pages, sizeof(struct kvec), GFP_KERNEL);
if (unlikely(!vec)) {
res = -ENOMEM;
goto out;
/* Needed by 'ubifs_pack_lsave()' */
c->main_first = c->leb_cnt - *main_lebs;
- lsave = kmalloc(sizeof(int) * c->lsave_cnt, GFP_KERNEL);
+ lsave = kmalloc_array(c->lsave_cnt, sizeof(int), GFP_KERNEL);
pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_KERNEL);
nnode = kzalloc(sizeof(struct ubifs_nnode), GFP_KERNEL);
buf = vmalloc(c->leb_size);
return -ENOMEM;
for (i = 0; i < LPROPS_HEAP_CNT; i++) {
- c->lpt_heap[i].arr = kmalloc(sizeof(void *) * LPT_HEAP_SZ,
- GFP_KERNEL);
+ c->lpt_heap[i].arr = kmalloc_array(LPT_HEAP_SZ,
+ sizeof(void *),
+ GFP_KERNEL);
if (!c->lpt_heap[i].arr)
return -ENOMEM;
c->lpt_heap[i].cnt = 0;
c->lpt_heap[i].max_cnt = LPT_HEAP_SZ;
}
- c->dirty_idx.arr = kmalloc(sizeof(void *) * LPT_HEAP_SZ, GFP_KERNEL);
+ c->dirty_idx.arr = kmalloc_array(LPT_HEAP_SZ, sizeof(void *),
+ GFP_KERNEL);
if (!c->dirty_idx.arr)
return -ENOMEM;
c->dirty_idx.cnt = 0;
return -ENOMEM;
if (c->big_lpt) {
- c->lsave = kmalloc(sizeof(int) * c->lsave_cnt, GFP_NOFS);
+ c->lsave = kmalloc_array(c->lsave_cnt, sizeof(int), GFP_NOFS);
if (!c->lsave)
return -ENOMEM;
err = read_lsave(c);
return err;
}
- path = kmalloc(sizeof(struct lpt_scan_node) * (c->lpt_hght + 1),
- GFP_NOFS);
+ path = kmalloc_array(c->lpt_hght + 1, sizeof(struct lpt_scan_node),
+ GFP_NOFS);
if (!path)
return -ENOMEM;
* never exceed 64.
*/
err = -ENOMEM;
- c->bottom_up_buf = kmalloc(BOTTOM_UP_HEIGHT * sizeof(int), GFP_KERNEL);
+ c->bottom_up_buf = kmalloc_array(BOTTOM_UP_HEIGHT, sizeof(int),
+ GFP_KERNEL);
if (!c->bottom_up_buf)
goto out_free;
ubifs_assert(znode);
if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) {
kfree(c->bottom_up_buf);
- c->bottom_up_buf = kmalloc(c->zroot.znode->level * sizeof(int),
- GFP_NOFS);
+ c->bottom_up_buf = kmalloc_array(c->zroot.znode->level,
+ sizeof(int),
+ GFP_NOFS);
if (!c->bottom_up_buf)
return ERR_PTR(-ENOMEM);
path = c->bottom_up_buf;
dbg_gc("%d znodes to write", cnt);
- c->gap_lebs = kmalloc(sizeof(int) * (c->lst.idx_lebs + 1), GFP_NOFS);
+ c->gap_lebs = kmalloc_array(c->lst.idx_lebs + 1, sizeof(int),
+ GFP_NOFS);
if (!c->gap_lebs)
return -ENOMEM;
dbg_cmt("need about %d empty LEBS for TNC commit", leb_cnt);
if (!leb_cnt)
return 0;
- c->ilebs = kmalloc(leb_cnt * sizeof(int), GFP_NOFS);
+ c->ilebs = kmalloc_array(leb_cnt, sizeof(int), GFP_NOFS);
if (!c->ilebs)
return -ENOMEM;
for (i = 0; i < leb_cnt; i++) {
* Read cylinder group (we read only first fragment from block
* at this time) and prepare internal data structures for cg caching.
*/
- if (!(sbi->s_ucg = kmalloc (sizeof(struct buffer_head *) * uspi->s_ncg, GFP_NOFS)))
+ sbi->s_ucg = kmalloc_array(uspi->s_ncg, sizeof(struct buffer_head *),
+ GFP_NOFS);
+ if (!sbi->s_ucg)
goto failed;
for (i = 0; i < uspi->s_ncg; i++)
sbi->s_ucg[i] = NULL;
if (!key || key->prefixlen > trie->max_prefixlen)
goto find_leftmost;
- node_stack = kmalloc(trie->max_prefixlen * sizeof(struct lpm_trie_node *),
- GFP_ATOMIC | __GFP_NOWARN);
+ node_stack = kmalloc_array(trie->max_prefixlen,
+ sizeof(struct lpm_trie_node *),
+ GFP_ATOMIC | __GFP_NOWARN);
if (!node_stack)
return -ENOMEM;
if (PIDLIST_TOO_LARGE(count))
return vmalloc(count * sizeof(pid_t));
else
- return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
+ return kmalloc_array(count, sizeof(pid_t), GFP_KERNEL);
}
static void pidlist_free(void *p)
goto done;
}
- csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL);
+ csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
if (!csa)
goto done;
csn = 0;
* The rest of the code, including the scheduler, can deal with
* dattr==NULL case. No need to abort if alloc fails.
*/
- dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
+ dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
+ GFP_KERNEL);
for (nslot = 0, i = 0; i < csn; i++) {
struct cpuset *a = csa[i];
kdb_printf("Command only available during kdb_init()\n");
return KDB_NOTIMP;
}
- defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
- GFP_KDB);
+ defcmd_set = kmalloc_array(defcmd_set_count + 1, sizeof(*defcmd_set),
+ GFP_KDB);
if (!defcmd_set)
goto fail_defcmd;
memcpy(defcmd_set, save_defcmd_set,
}
if (i >= kdb_max_commands) {
- kdbtab_t *new = kmalloc((kdb_max_commands - KDB_BASE_CMD_MAX +
- kdb_command_extend) * sizeof(*new), GFP_KDB);
+ kdbtab_t *new = kmalloc_array(kdb_max_commands -
+ KDB_BASE_CMD_MAX +
+ kdb_command_extend,
+ sizeof(*new),
+ GFP_KDB);
if (!new) {
kdb_printf("Could not allocate new kdb_command "
"table\n");
/* cut off if it is too long */
if (count > KSYM_NAME_LEN)
count = KSYM_NAME_LEN;
- buf = kmalloc(sizeof(char) * (count + 1), GFP_KERNEL);
+ buf = kmalloc(count + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Initialize the statistics so that each run gets its own numbers. */
if (nwriters_stress) {
lock_is_write_held = 0;
- cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
+ cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
+ sizeof(*cxt.lwsa),
+ GFP_KERNEL);
if (cxt.lwsa == NULL) {
VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
firsterr = -ENOMEM;
if (nreaders_stress) {
lock_is_read_held = 0;
- cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
+ cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
+ sizeof(*cxt.lrsa),
+ GFP_KERNEL);
if (cxt.lrsa == NULL) {
VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
firsterr = -ENOMEM;
buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
if (!buf)
return NULL;
- buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
+ buf->padding = kmalloc_array(chan->n_subbufs, sizeof(size_t *),
+ GFP_KERNEL);
if (!buf->padding)
goto free_buf;
int i;
cpumask_var_t *doms;
- doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
+ doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL);
if (!doms)
return NULL;
for (i = 0; i < ndoms; i++) {
struct task_struct *g, *t;
for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
- ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
- * sizeof(struct ftrace_ret_stack),
- GFP_KERNEL);
+ ret_stack_list[i] =
+ kmalloc_array(FTRACE_RETFUNC_DEPTH,
+ sizeof(struct ftrace_ret_stack),
+ GFP_KERNEL);
if (!ret_stack_list[i]) {
start = 0;
end = i;
struct ftrace_ret_stack **ret_stack_list;
int ret, cpu;
- ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
- sizeof(struct ftrace_ret_stack *),
- GFP_KERNEL);
+ ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
+ sizeof(struct ftrace_ret_stack *),
+ GFP_KERNEL);
if (!ret_stack_list)
return -ENOMEM;
ret_stack = per_cpu(idle_ret_stack, cpu);
if (!ret_stack) {
- ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
- * sizeof(struct ftrace_ret_stack),
- GFP_KERNEL);
+ ret_stack =
+ kmalloc_array(FTRACE_RETFUNC_DEPTH,
+ sizeof(struct ftrace_ret_stack),
+ GFP_KERNEL);
if (!ret_stack)
return;
per_cpu(idle_ret_stack, cpu) = ret_stack;
if (ftrace_graph_active) {
struct ftrace_ret_stack *ret_stack;
- ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
- * sizeof(struct ftrace_ret_stack),
- GFP_KERNEL);
+ ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
+ sizeof(struct ftrace_ret_stack),
+ GFP_KERNEL);
if (!ret_stack)
return;
graph_init_task(t, ret_stack);
static int allocate_cmdlines_buffer(unsigned int val,
struct saved_cmdlines_buffer *s)
{
- s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
- GFP_KERNEL);
+ s->map_cmdline_to_pid = kmalloc_array(val,
+ sizeof(*s->map_cmdline_to_pid),
+ GFP_KERNEL);
if (!s->map_cmdline_to_pid)
return -ENOMEM;
- s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
+ s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
if (!s->saved_cmdlines) {
kfree(s->map_cmdline_to_pid);
return -ENOMEM;
* where the head holds the module and length of array, and the
* tail holds a pointer to the next list.
*/
- map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
+ map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
if (!map_array) {
pr_warn("Unable to allocate trace eval mapping\n");
return;
nr_preds += 2; /* For TRUE and FALSE */
- op_stack = kmalloc(sizeof(*op_stack) * nr_parens, GFP_KERNEL);
+ op_stack = kmalloc_array(nr_parens, sizeof(*op_stack), GFP_KERNEL);
if (!op_stack)
return ERR_PTR(-ENOMEM);
- prog_stack = kmalloc(sizeof(*prog_stack) * nr_preds, GFP_KERNEL);
+ prog_stack = kmalloc_array(nr_preds, sizeof(*prog_stack), GFP_KERNEL);
if (!prog_stack) {
parse_error(pe, -ENOMEM, 0);
goto out_free;
}
- inverts = kmalloc(sizeof(*inverts) * nr_preds, GFP_KERNEL);
+ inverts = kmalloc_array(nr_preds, sizeof(*inverts), GFP_KERNEL);
if (!inverts) {
parse_error(pe, -ENOMEM, 0);
goto out_free;
struct uid_gid_extent *forward;
/* Allocate memory for 340 mappings. */
- forward = kmalloc(sizeof(struct uid_gid_extent) *
- UID_GID_MAP_MAX_EXTENTS, GFP_KERNEL);
+ forward = kmalloc_array(UID_GID_MAP_MAX_EXTENTS,
+ sizeof(struct uid_gid_extent),
+ GFP_KERNEL);
if (!forward)
return -ENOMEM;
return NULL;
argc = count_argc(argv_str);
- argv = kmalloc(sizeof(*argv) * (argc + 2), gfp);
+ argv = kmalloc_array(argc + 2, sizeof(*argv), gfp);
if (!argv) {
kfree(argv_str);
return NULL;
unsigned long results;
cycles_t time1, time2, time;
- nodes = kmalloc(nnodes * sizeof(struct interval_tree_node), GFP_KERNEL);
+ nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
+ GFP_KERNEL);
if (!nodes)
return -ENOMEM;
- queries = kmalloc(nsearches * sizeof(int), GFP_KERNEL);
+ queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL);
if (!queries) {
kfree(nodes);
return -ENOMEM;
return -EINVAL;
}
- fifo->data = kmalloc(size * esize, gfp_mask);
+ fifo->data = kmalloc_array(esize, size, gfp_mask);
if (!fifo->data) {
fifo->mask = 0;
return 0; /* no need to do it */
if (a->d) {
- p = kmalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL);
+ p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
if (!p)
return -ENOMEM;
memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
cycles_t time1, time2, time;
struct rb_node *node;
- nodes = kmalloc(nnodes * sizeof(*nodes), GFP_KERNEL);
+ nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
if (!nodes)
return -ENOMEM;
rs->gffunc = gffunc;
/* Allocate the arrays */
- rs->alpha_to = kmalloc(sizeof(uint16_t) * (rs->nn + 1), gfp);
+ rs->alpha_to = kmalloc_array(rs->nn + 1, sizeof(uint16_t), gfp);
if (rs->alpha_to == NULL)
goto err;
- rs->index_of = kmalloc(sizeof(uint16_t) * (rs->nn + 1), gfp);
+ rs->index_of = kmalloc_array(rs->nn + 1, sizeof(uint16_t), gfp);
if (rs->index_of == NULL)
goto err;
- rs->genpoly = kmalloc(sizeof(uint16_t) * (rs->nroots + 1), gfp);
+ rs->genpoly = kmalloc_array(rs->nroots + 1, sizeof(uint16_t), gfp);
if(rs->genpoly == NULL)
goto err;
kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
return ptr;
} else
- return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
+ return kmalloc_array(nents, sizeof(struct scatterlist),
+ gfp_mask);
}
static void sg_kfree(struct scatterlist *sg, unsigned int nents)
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
- pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
- GFP_KERNEL);
+ pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *),
+ GFP_KERNEL);
if (unlikely(!pages)) {
ret |= VM_FAULT_OOM;
goto out;
num_fault_mutexes = 1;
#endif
hugetlb_fault_mutex_table =
- kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
+ kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
+ GFP_KERNEL);
BUG_ON(!hugetlb_fault_mutex_table);
for (i = 0; i < num_fault_mutexes; i++)
{
int node;
unsigned long count = 0;
- unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
- sizeof(unsigned long), GFP_KERNEL);
+ unsigned long *map = kmalloc_array(BITS_TO_LONGS(oo_objects(s->max)),
+ sizeof(unsigned long),
+ GFP_KERNEL);
struct kmem_cache_node *n;
if (!map)
unsigned long i;
struct loc_track t = { 0, 0, NULL };
int node;
- unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
- sizeof(unsigned long), GFP_KERNEL);
+ unsigned long *map = kmalloc_array(BITS_TO_LONGS(oo_objects(s->max)),
+ sizeof(unsigned long),
+ GFP_KERNEL);
struct kmem_cache_node *n;
if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
unsigned long sum = 0;
int cpu;
int len;
- int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
+ int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
if (!data)
return -ENOMEM;
"w", nwname);
if (!errcode) {
*wnames =
- kmalloc(sizeof(char *) * *nwname,
- GFP_NOFS);
+ kmalloc_array(*nwname,
+ sizeof(char *),
+ GFP_NOFS);
if (!*wnames)
errcode = -ENOMEM;
}
p9pdu_readf(pdu, proto_version, "w", nwqid);
if (!errcode) {
*wqids =
- kmalloc(*nwqid *
- sizeof(struct p9_qid),
- GFP_NOFS);
+ kmalloc_array(*nwqid,
+ sizeof(struct p9_qid),
+ GFP_NOFS);
if (*wqids == NULL)
errcode = -ENOMEM;
}
nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) -
(unsigned long)p / PAGE_SIZE;
- *pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
+ *pages = kmalloc_array(nr_pages, sizeof(struct page *),
+ GFP_NOFS);
if (!*pages)
return -ENOMEM;
if (mpc->number_of_mps_macs != 0)
kfree(mpc->mps_macs);
mpc->number_of_mps_macs = 0;
- mpc->mps_macs = kmalloc(num_macs * ETH_ALEN, GFP_KERNEL);
+ mpc->mps_macs = kmalloc_array(ETH_ALEN, num_macs, GFP_KERNEL);
if (mpc->mps_macs == NULL) {
pr_info("(%s) out of mem\n", mpc->dev->name);
return NULL;
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
* copy it to the user space.
*/
- buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
+ buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto done;
*/
alloc_size = roundup_pow_of_two(size);
- seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
+ seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
if (!seq_list->list)
return -ENOMEM;
/* create array for CAN frames and copy the data */
if (msg_head->nframes > 1) {
- op->frames = kmalloc(msg_head->nframes * op->cfsiz,
- GFP_KERNEL);
+ op->frames = kmalloc_array(msg_head->nframes,
+ op->cfsiz,
+ GFP_KERNEL);
if (!op->frames) {
kfree(op);
return -ENOMEM;
if (msg_head->nframes > 1) {
/* create array for CAN frames and copy the data */
- op->frames = kmalloc(msg_head->nframes * op->cfsiz,
- GFP_KERNEL);
+ op->frames = kmalloc_array(msg_head->nframes,
+ op->cfsiz,
+ GFP_KERNEL);
if (!op->frames) {
kfree(op);
return -ENOMEM;
if (!map->osd_primary_affinity) {
int i;
- map->osd_primary_affinity = kmalloc(map->max_osd*sizeof(u32),
- GFP_NOFS);
+ map->osd_primary_affinity = kmalloc_array(map->max_osd,
+ sizeof(u32),
+ GFP_NOFS);
if (!map->osd_primary_affinity)
return -ENOMEM;
int got = 0;
int rc = 0;
- pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
+ pages = kmalloc_array(num_pages, sizeof(*pages), GFP_NOFS);
if (!pages)
return ERR_PTR(-ENOMEM);
struct page **pages;
int i;
- pages = kmalloc(sizeof(*pages) * num_pages, flags);
+ pages = kmalloc_array(num_pages, sizeof(*pages), flags);
if (!pages)
return ERR_PTR(-ENOMEM);
for (i = 0; i < num_pages; i++) {
int i;
struct hlist_head *hash;
- hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
+ hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
if (hash != NULL)
for (i = 0; i < NETDEV_HASHENTRIES; i++)
INIT_HLIST_HEAD(&hash[i]);
return -EFAULT;
test.len = test_len;
- data = kmalloc(test_len * sizeof(u64), GFP_USER);
+ data = kmalloc_array(test_len, sizeof(u64), GFP_USER);
if (!data)
return -ENOMEM;
*/
err = ops->peer_getappinfo(netdev, &info, &app_count);
if (!err && app_count) {
- table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
+ table = kmalloc_array(app_count, sizeof(struct dcb_app),
+ GFP_KERNEL);
if (!table)
return -ENOMEM;
return -ENOMEM;
/* allocate buffer and initialize linked list */
- seqp = kmalloc(CCID2_SEQBUF_LEN * sizeof(struct ccid2_seq), gfp_any());
+ seqp = kmalloc_array(CCID2_SEQBUF_LEN, sizeof(struct ccid2_seq),
+ gfp_any());
if (seqp == NULL)
return -ENOMEM;
{
int cpu;
- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
+ ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
+ GFP_KERNEL);
if (!ip_idents)
panic("IP: failed to allocate ip_idents\n");
if (have_mfp)
n_suites += 4;
- suites = kmalloc(sizeof(u32) * n_suites, GFP_KERNEL);
+ suites = kmalloc_array(n_suites, sizeof(u32), GFP_KERNEL);
if (!suites)
return -ENOMEM;
if (!mi->r)
goto error;
- mi->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp);
+ mi->sample_table = kmalloc_array(max_rates, SAMPLE_COLUMNS, gfp);
if (!mi->sample_table)
goto error1;
if (!msp->ratelist)
goto error;
- msp->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp);
+ msp->sample_table = kmalloc_array(max_rates, SAMPLE_COLUMNS, gfp);
if (!msp->sample_table)
goto error1;
struct nf_conntrack_l4proto __rcu **proto_array;
int i;
- proto_array = kmalloc(MAX_NF_CT_PROTO *
+ proto_array =
+ kmalloc_array(MAX_NF_CT_PROTO,
sizeof(struct nf_conntrack_l4proto *),
GFP_KERNEL);
if (proto_array == NULL) {
mutex_lock(&nf_nat_proto_mutex);
if (nf_nat_l4protos[l3proto] == NULL) {
- l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *),
- GFP_KERNEL);
+ l4protos = kmalloc_array(IPPROTO_MAX,
+ sizeof(struct nf_nat_l4proto *),
+ GFP_KERNEL);
if (l4protos == NULL) {
ret = -ENOMEM;
goto out;
nft_chain_filter_init();
- info = kmalloc(sizeof(struct nft_expr_info) * NFT_RULE_MAXEXPRS,
- GFP_KERNEL);
+ info = kmalloc_array(NFT_RULE_MAXEXPRS, sizeof(struct nft_expr_info),
+ GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
goto err1;
seqcount_init(&per_cpu(xt_recseq, i));
}
- xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
+ xt = kmalloc_array(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
if (!xt)
return -ENOMEM;
}
if (family->maxattr && !family->parallel_ops) {
- family->attrbuf = kmalloc((family->maxattr+1) *
- sizeof(struct nlattr *), GFP_KERNEL);
+ family->attrbuf = kmalloc_array(family->maxattr + 1,
+ sizeof(struct nlattr *),
+ GFP_KERNEL);
if (family->attrbuf == NULL) {
err = -ENOMEM;
goto errout_locked;
return -EOPNOTSUPP;
if (family->maxattr && family->parallel_ops) {
- attrbuf = kmalloc((family->maxattr+1) *
- sizeof(struct nlattr *), GFP_KERNEL);
+ attrbuf = kmalloc_array(family->maxattr + 1,
+ sizeof(struct nlattr *),
+ GFP_KERNEL);
if (attrbuf == NULL)
return -ENOMEM;
} else
goto err_destroy_table;
}
- dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
- GFP_KERNEL);
+ dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
+ sizeof(struct hlist_head),
+ GFP_KERNEL);
if (!dp->ports) {
err = -ENOMEM;
goto err_destroy_percpu;
nr_pages = (PAGE_ALIGN(start + len) - (start & PAGE_MASK))
>> PAGE_SHIFT;
- pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
+ pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
sg = _sg;
if (unlikely(nsg > 4)) {
- sg = kmalloc(sizeof(*sg) * nsg, GFP_NOIO);
+ sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO);
if (!sg)
goto nomem;
}
/* Allocate and initialize the endpoint hash table. */
sctp_ep_hashsize = 64;
sctp_ep_hashtable =
- kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL);
+ kmalloc_array(64, sizeof(struct sctp_hashbucket), GFP_KERNEL);
if (!sctp_ep_hashtable) {
pr_err("Failed endpoint_hash alloc\n");
status = -ENOMEM;
last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
rqstp->rq_enc_pages_num = last - first + 1 + 1;
rqstp->rq_enc_pages
- = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
+ = kmalloc_array(rqstp->rq_enc_pages_num,
+ sizeof(struct page *),
GFP_NOFS);
if (!rqstp->rq_enc_pages)
goto out;
if (!trans_buf)
return -ENOMEM;
- attrbuf = kmalloc((tipc_genl_family.maxattr + 1) *
- sizeof(struct nlattr *), GFP_KERNEL);
+ attrbuf = kmalloc_array(tipc_genl_family.maxattr + 1,
+ sizeof(struct nlattr *),
+ GFP_KERNEL);
if (!attrbuf) {
err = -ENOMEM;
goto trans_out;
return -EINVAL;
if (buffer && buflen >= 2 * p->blob_len) {
- ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
+ ascii_buf = kmalloc_array(2, p->blob_len, GFP_KERNEL);
if (!ascii_buf)
return -ENOMEM;
get_user(frames, &data32->frames))
return -EFAULT;
bufptr = compat_ptr(buf);
- bufs = kmalloc(sizeof(void __user *) * ch, GFP_KERNEL);
+ bufs = kmalloc_array(ch, sizeof(void __user *), GFP_KERNEL);
if (bufs == NULL)
return -ENOMEM;
for (i = 0; i < ch; i++) {
if (!frame_aligned(runtime, to->iov->iov_len))
return -EINVAL;
frames = bytes_to_samples(runtime, to->iov->iov_len);
- bufs = kmalloc(sizeof(void *) * to->nr_segs, GFP_KERNEL);
+ bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL);
if (bufs == NULL)
return -ENOMEM;
for (i = 0; i < to->nr_segs; ++i)
!frame_aligned(runtime, from->iov->iov_len))
return -EINVAL;
frames = bytes_to_samples(runtime, from->iov->iov_len);
- bufs = kmalloc(sizeof(void *) * from->nr_segs, GFP_KERNEL);
+ bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL);
if (bufs == NULL)
return -ENOMEM;
for (i = 0; i < from->nr_segs; ++i)
struct snd_midi_channel *chan;
int i;
- chan = kmalloc(n * sizeof(struct snd_midi_channel), GFP_KERNEL);
+ chan = kmalloc_array(n, sizeof(struct snd_midi_channel), GFP_KERNEL);
if (chan) {
for (i = 0; i < n; i++)
snd_midi_channel_init(chan+i, i);
void *p;
int err;
- b->packets = kmalloc(count * sizeof(*b->packets), GFP_KERNEL);
+ b->packets = kmalloc_array(count, sizeof(*b->packets), GFP_KERNEL);
if (!b->packets) {
err = -ENOMEM;
goto error;
return 0;
sq->numBufs = num;
sq->bufSize = size;
- sq->buffers = kmalloc (num * sizeof(char *), GFP_KERNEL);
+ sq->buffers = kmalloc_array (num, sizeof(char *), GFP_KERNEL);
if (!sq->buffers)
return -ENOMEM;
for (i = 0; i < num; i++) {
entry->size = le32_to_cpu(fwdat[fwlen++]);
if (fwlen + entry->size > fwsize)
goto error_inval;
- entry->data = kmalloc(entry->size * 4, GFP_KERNEL);
+ entry->data = kmalloc_array(entry->size, 4, GFP_KERNEL);
if (!entry->data)
goto error;
memcpy_le32(entry->data, &fwdat[fwlen], entry->size * 4);
snd_cs46xx_proc_init(card, chip);
#ifdef CONFIG_PM_SLEEP
- chip->saved_regs = kmalloc(sizeof(*chip->saved_regs) *
- ARRAY_SIZE(saved_regs), GFP_KERNEL);
+ chip->saved_regs = kmalloc_array(ARRAY_SIZE(saved_regs),
+ sizeof(*chip->saved_regs),
+ GFP_KERNEL);
if (!chip->saved_regs) {
snd_cs46xx_free(chip);
return -ENOMEM;
ins->symbol_table.symbols = vmalloc(sizeof(struct dsp_symbol_entry) *
DSP_MAX_SYMBOLS);
ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL);
- ins->modules = kmalloc(sizeof(struct dsp_module_desc) * DSP_MAX_MODULES, GFP_KERNEL);
+ ins->modules = kmalloc_array(DSP_MAX_MODULES,
+ sizeof(struct dsp_module_desc),
+ GFP_KERNEL);
if (!ins->symbol_table.symbols || !ins->code.data || !ins->modules) {
cs46xx_dsp_spos_destroy(chip);
goto error;
int len;
len = emu->audigy ? 0x200 : 0x100;
- emu->saved_gpr = kmalloc(len * 4, GFP_KERNEL);
+ emu->saved_gpr = kmalloc_array(len, 4, GFP_KERNEL);
if (! emu->saved_gpr)
return -ENOMEM;
len = emu->audigy ? 0x100 : 0xa0;
- emu->tram_val_saved = kmalloc(len * 4, GFP_KERNEL);
- emu->tram_addr_saved = kmalloc(len * 4, GFP_KERNEL);
+ emu->tram_val_saved = kmalloc_array(len, 4, GFP_KERNEL);
+ emu->tram_addr_saved = kmalloc_array(len, 4, GFP_KERNEL);
if (! emu->tram_val_saved || ! emu->tram_addr_saved)
return -ENOMEM;
len = emu->audigy ? 2 * 1024 : 2 * 512;
len = snd_hda_get_raw_connections(codec, nid, list, ARRAY_SIZE(list));
if (len == -ENOSPC) {
len = snd_hda_get_num_raw_conns(codec, nid);
- result = kmalloc(sizeof(hda_nid_t) * len, GFP_KERNEL);
+ result = kmalloc_array(len, sizeof(hda_nid_t), GFP_KERNEL);
if (!result)
return -ENOMEM;
len = snd_hda_get_raw_connections(codec, nid, result, len);
int i;
hda_nid_t nid;
- codec->wcaps = kmalloc(codec->core.num_nodes * 4, GFP_KERNEL);
+ codec->wcaps = kmalloc_array(codec->core.num_nodes, 4, GFP_KERNEL);
if (!codec->wcaps)
return -ENOMEM;
nid = codec->core.start_nid;
if (wid_caps & AC_WCAP_CONN_LIST) {
conn_len = snd_hda_get_num_raw_conns(codec, nid);
if (conn_len > 0) {
- conn = kmalloc(sizeof(hda_nid_t) * conn_len,
- GFP_KERNEL);
+ conn = kmalloc_array(conn_len,
+ sizeof(hda_nid_t),
+ GFP_KERNEL);
if (!conn)
return;
if (snd_hda_get_raw_connections(codec, nid, conn,
return -ENOMEM;
}
if (! dev->idx_table) {
- dev->idx_table = kmalloc(sizeof(*dev->idx_table) * VIA_TABLE_SIZE, GFP_KERNEL);
+ dev->idx_table = kmalloc_array(VIA_TABLE_SIZE,
+ sizeof(*dev->idx_table),
+ GFP_KERNEL);
if (! dev->idx_table)
return -ENOMEM;
}
return -ENOMEM;
}
if (! dev->idx_table) {
- dev->idx_table = kmalloc(sizeof(*dev->idx_table) * VIA_TABLE_SIZE, GFP_KERNEL);
+ dev->idx_table = kmalloc_array(VIA_TABLE_SIZE,
+ sizeof(*dev->idx_table),
+ GFP_KERNEL);
if (! dev->idx_table)
return -ENOMEM;
}
goto free_chip;
#ifdef CONFIG_PM_SLEEP
- chip->saved_regs = kmalloc(YDSXGR_NUM_SAVED_REGS * sizeof(u32),
- GFP_KERNEL);
+ chip->saved_regs = kmalloc_array(YDSXGR_NUM_SAVED_REGS, sizeof(u32),
+ GFP_KERNEL);
if (chip->saved_regs == NULL) {
err = -ENOMEM;
goto free_chip;
wm8904_get_drc_enum, wm8904_put_drc_enum);
/* We need an array of texts for the enum API */
- wm8904->drc_texts = kmalloc(sizeof(char *)
- * pdata->num_drc_cfgs, GFP_KERNEL);
+ wm8904->drc_texts = kmalloc_array(pdata->num_drc_cfgs,
+ sizeof(char *),
+ GFP_KERNEL);
if (!wm8904->drc_texts)
return;
};
/* We need an array of texts for the enum API */
- wm8994->mbc_texts = kmalloc(sizeof(char *)
- * pdata->num_mbc_cfgs, GFP_KERNEL);
+ wm8994->mbc_texts = kmalloc_array(pdata->num_mbc_cfgs,
+ sizeof(char *),
+ GFP_KERNEL);
if (!wm8994->mbc_texts)
return;
};
/* We need an array of texts for the enum API */
- wm8994->vss_texts = kmalloc(sizeof(char *)
- * pdata->num_vss_cfgs, GFP_KERNEL);
+ wm8994->vss_texts = kmalloc_array(pdata->num_vss_cfgs,
+ sizeof(char *),
+ GFP_KERNEL);
if (!wm8994->vss_texts)
return;
};
/* We need an array of texts for the enum API */
- wm8994->vss_hpf_texts = kmalloc(sizeof(char *)
- * pdata->num_vss_hpf_cfgs, GFP_KERNEL);
+ wm8994->vss_hpf_texts = kmalloc_array(pdata->num_vss_hpf_cfgs,
+ sizeof(char *),
+ GFP_KERNEL);
if (!wm8994->vss_hpf_texts)
return;
};
/* We need an array of texts for the enum API */
- wm8994->enh_eq_texts = kmalloc(sizeof(char *)
- * pdata->num_enh_eq_cfgs, GFP_KERNEL);
+ wm8994->enh_eq_texts = kmalloc_array(pdata->num_enh_eq_cfgs,
+ sizeof(char *),
+ GFP_KERNEL);
if (!wm8994->enh_eq_texts)
return;
usb_sndisocpipe(usb_dev, ENDPOINT_PLAYBACK) :
usb_rcvisocpipe(usb_dev, ENDPOINT_CAPTURE);
- urbs = kmalloc(N_URBS * sizeof(*urbs), GFP_KERNEL);
+ urbs = kmalloc_array(N_URBS, sizeof(*urbs), GFP_KERNEL);
if (!urbs) {
*ret = -ENOMEM;
return NULL;
}
urbs[i]->transfer_buffer =
- kmalloc(FRAMES_PER_URB * BYTES_PER_FRAME, GFP_KERNEL);
+ kmalloc_array(BYTES_PER_FRAME, FRAMES_PER_URB,
+ GFP_KERNEL);
if (!urbs[i]->transfer_buffer) {
*ret = -ENOMEM;
return urbs;
&snd_usb_caiaq_ops);
cdev->data_cb_info =
- kmalloc(sizeof(struct snd_usb_caiaq_cb_info) * N_URBS,
+ kmalloc_array(N_URBS, sizeof(struct snd_usb_caiaq_cb_info),
GFP_KERNEL);
if (!cdev->data_cb_info)
*/
int r, idx;
- fp->rate_table = kmalloc(sizeof(int) * nr_rates, GFP_KERNEL);
+ fp->rate_table = kmalloc_array(nr_rates, sizeof(int),
+ GFP_KERNEL);
if (fp->rate_table == NULL)
return -ENOMEM;
goto err_free;
}
- fp->rate_table = kmalloc(sizeof(int) * fp->nr_rates, GFP_KERNEL);
+ fp->rate_table = kmalloc_array(fp->nr_rates, sizeof(int), GFP_KERNEL);
if (!fp->rate_table) {
ret = -ENOMEM;
goto err_free;
/* Invoked multiple times in a row so allocate once only */
if (!test_and_set_bit(type, &pstr->opened) && !pstr->buffer) {
- pstr->buffer = kmalloc(line6pcm->line6->iso_buffers *
- LINE6_ISO_PACKETS * pkt_size, GFP_KERNEL);
+ pstr->buffer =
+ kmalloc(array3_size(line6pcm->line6->iso_buffers,
+ LINE6_ISO_PACKETS, pkt_size),
+ GFP_KERNEL);
if (!pstr->buffer)
return -ENOMEM;
}
cval->control = (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) ?
UAC2_CX_CLOCK_SELECTOR : UAC2_SU_SELECTOR;
- namelist = kmalloc(sizeof(char *) * desc->bNrInPins, GFP_KERNEL);
+ namelist = kmalloc_array(desc->bNrInPins, sizeof(char *), GFP_KERNEL);
if (!namelist) {
kfree(cval);
return -ENOMEM;
return 0;
subs->rate_list.list = rate_list =
- kmalloc(sizeof(int) * count, GFP_KERNEL);
+ kmalloc_array(count, sizeof(int), GFP_KERNEL);
if (!subs->rate_list.list)
return -ENOMEM;
subs->rate_list.count = count;
int err = 0,
i;
- if (NULL == (usX2Y->AS04.buffer = kmalloc(URB_DataLen_AsyncSeq*URBS_AsyncSeq, GFP_KERNEL))) {
+ usX2Y->AS04.buffer = kmalloc_array(URBS_AsyncSeq,
+ URB_DataLen_AsyncSeq, GFP_KERNEL);
+ if (NULL == usX2Y->AS04.buffer) {
err = -ENOMEM;
} else
for (i = 0; i < URBS_AsyncSeq; ++i) {
}
if (!is_playback && !(*purb)->transfer_buffer) {
/* allocate a capture buffer per urb */
- (*purb)->transfer_buffer = kmalloc(subs->maxpacksize * nr_of_packs(), GFP_KERNEL);
+ (*purb)->transfer_buffer =
+ kmalloc_array(subs->maxpacksize,
+ nr_of_packs(), GFP_KERNEL);
if (NULL == (*purb)->transfer_buffer) {
usX2Y_urbs_release(subs);
return -ENOMEM;
err = -ENOMEM;
goto cleanup;
}
- usbdata = kmalloc(sizeof(int) * NOOF_SETRATE_URBS, GFP_KERNEL);
+ usbdata = kmalloc_array(NOOF_SETRATE_URBS, sizeof(int),
+ GFP_KERNEL);
if (NULL == usbdata) {
err = -ENOMEM;
goto cleanup;