if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
return -EINVAL;
- keys = kmalloc_array(args->count, sizeof(uint8_t),
- GFP_KERNEL | __GFP_NOWARN);
- if (!keys)
- keys = vmalloc(sizeof(uint8_t) * args->count);
+ keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
if (!keys)
return -ENOMEM;
if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
return -EINVAL;
- keys = kmalloc_array(args->count, sizeof(uint8_t),
- GFP_KERNEL | __GFP_NOWARN);
- if (!keys)
- keys = vmalloc(sizeof(uint8_t) * args->count);
+ keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
if (!keys)
return -ENOMEM;
{
void *ctx;
- ctx = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN);
- if (!ctx)
- ctx = vmalloc(LZO1X_MEM_COMPRESS);
+ ctx = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
if (i < erst_record_id_cache.len)
goto retry;
if (erst_record_id_cache.len >= erst_record_id_cache.size) {
- int new_size, alloc_size;
+ int new_size;
u64 *new_entries;
new_size = erst_record_id_cache.size * 2;
pr_warn(FW_WARN "too many record IDs!\n");
return 0;
}
- alloc_size = new_size * sizeof(entries[0]);
- if (alloc_size < PAGE_SIZE)
- new_entries = kmalloc(alloc_size, GFP_KERNEL);
- else
- new_entries = vmalloc(alloc_size);
+ new_entries = kvmalloc(new_size * sizeof(entries[0]), GFP_KERNEL);
if (!new_entries)
return -ENOMEM;
memcpy(new_entries, entries,
void agp_alloc_page_array(size_t size, struct agp_memory *mem)
{
- mem->pages = NULL;
-
- if (size <= 2*PAGE_SIZE)
- mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (mem->pages == NULL) {
- mem->pages = vmalloc(size);
- }
+ mem->pages = kvmalloc(size, GFP_KERNEL);
}
EXPORT_SYMBOL(agp_alloc_page_array);
size *= nmemb;
- mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!mem)
- mem = vmalloc(size);
+ mem = kvmalloc(size, GFP_KERNEL);
if (!mem)
return ERR_PTR(-ENOMEM);
(heap)->used = 0; \
(heap)->size = (_size); \
_bytes = (heap)->size * sizeof(*(heap)->data); \
- (heap)->data = NULL; \
- if (_bytes < KMALLOC_MAX_SIZE) \
- (heap)->data = kmalloc(_bytes, (gfp)); \
- if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \
- (heap)->data = vmalloc(_bytes); \
+ (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
(heap)->data; \
})
\
(fifo)->mask = _allocated_size - 1; \
(fifo)->front = (fifo)->back = 0; \
- (fifo)->data = NULL; \
\
- if (_bytes < KMALLOC_MAX_SIZE) \
- (fifo)->data = kmalloc(_bytes, (gfp)); \
- if ((!(fifo)->data) && ((gfp) & GFP_KERNEL)) \
- (fifo)->data = vmalloc(_bytes); \
+ (fifo)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
(fifo)->data; \
})
#define VALIDATE_TID 1
-void *cxgb_alloc_mem(unsigned long size);
-void cxgb_free_mem(void *addr);
-
/*
* Map an ATID or STID to their entries in the corresponding TID tables.
*/
}
/*
- * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
- * The allocated memory is cleared.
- */
-void *cxgb_alloc_mem(unsigned long size)
-{
- void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
-
- if (!p)
- p = vzalloc(size);
- return p;
-}
-
-/*
- * Free memory allocated through t3_alloc_mem().
- */
-void cxgb_free_mem(void *addr)
-{
- kvfree(addr);
-}
-
-/*
* Allocate and initialize the TID tables. Returns 0 on success.
*/
static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
unsigned long size = ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
- t->tid_tab = cxgb_alloc_mem(size);
+ t->tid_tab = kvzalloc(size, GFP_KERNEL);
if (!t->tid_tab)
return -ENOMEM;
static void free_tid_maps(struct tid_info *t)
{
- cxgb_free_mem(t->tid_tab);
+ kvfree(t->tid_tab);
}
static inline void add_adapter(struct adapter *adap)
return 0;
out_free_l2t:
- t3_free_l2t(l2td);
+ kvfree(l2td);
out_free:
kfree(t);
return err;
static void clean_l2_data(struct rcu_head *head)
{
struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
- t3_free_l2t(d);
+ kvfree(d);
}
struct l2t_data *d;
int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
- d = cxgb_alloc_mem(size);
+ d = kvzalloc(size, GFP_KERNEL);
if (!d)
return NULL;
}
return d;
}
-
-void t3_free_l2t(struct l2t_data *d)
-{
- cxgb_free_mem(d);
-}
-
struct l2t_entry *e);
void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
-void t3_free_l2t(struct l2t_data *d);
int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
return NULL;
- ctbl = t4_alloc_mem(sizeof(*ctbl) +
- clipt_size*sizeof(struct list_head));
+ ctbl = kvzalloc(sizeof(*ctbl) +
+ clipt_size*sizeof(struct list_head), GFP_KERNEL);
if (!ctbl)
return NULL;
for (i = 0; i < ctbl->clipt_size; ++i)
INIT_LIST_HEAD(&ctbl->hash_list[i]);
- cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry));
+ cl_list = kvzalloc(clipt_size*sizeof(struct clip_entry), GFP_KERNEL);
if (!cl_list) {
- t4_free_mem(ctbl);
+ kvfree(ctbl);
return NULL;
}
ctbl->cl_list = (void *)cl_list;
if (ctbl) {
if (ctbl->cl_list)
- t4_free_mem(ctbl->cl_list);
- t4_free_mem(ctbl);
+ kvfree(ctbl->cl_list);
+ kvfree(ctbl);
}
}
EXPORT_SYMBOL(t4_cleanup_clip_tbl);
void t4_os_portmod_changed(const struct adapter *adap, int port_id);
void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
-void *t4_alloc_mem(size_t size);
-
void t4_free_sge_resources(struct adapter *adap);
void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
irq_handler_t t4_intr_handler(struct adapter *adap);
int rateunit, int ratemode, int channel, int class,
int minrate, int maxrate, int weight, int pktsize);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
-void t4_free_mem(void *addr);
void t4_idma_monitor_init(struct adapter *adapter,
struct sge_idma_monitor_state *idma);
void t4_idma_monitor(struct adapter *adapter,
if (count > avail - pos)
count = avail - pos;
- data = t4_alloc_mem(count);
+ data = kvzalloc(count, GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
spin_unlock(&adap->win0_lock);
if (ret) {
- t4_free_mem(data);
+ kvfree(data);
return ret;
}
ret = copy_to_user(buf, data, count);
- t4_free_mem(data);
+ kvfree(data);
if (ret)
return -EFAULT;
adap->sge.egr_sz, adap->sge.blocked_fl);
len += sprintf(buf + len, "\n");
size = simple_read_from_buffer(ubuf, count, ppos, buf, len);
- t4_free_mem(buf);
+ kvfree(buf);
return size;
}
return err;
bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
- t4_free_mem(t);
+ kvfree(t);
return count;
}
{
int i, err = 0;
struct adapter *adapter = netdev2adap(dev);
- u8 *buf = t4_alloc_mem(EEPROMSIZE);
+ u8 *buf = kvzalloc(EEPROMSIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (!err)
memcpy(data, buf + e->offset, e->len);
- t4_free_mem(buf);
+ kvfree(buf);
return err;
}
if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
/* RMW possibly needed for first or last words.
*/
- buf = t4_alloc_mem(aligned_len);
+ buf = kvzalloc(aligned_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
err = t4_seeprom_wp(adapter, true);
out:
if (buf != data)
- t4_free_mem(buf);
+ kvfree(buf);
return err;
}
return err;
}
-/*
- * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
- * The allocated memory is cleared.
- */
-void *t4_alloc_mem(size_t size)
-{
- void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
-
- if (!p)
- p = vzalloc(size);
- return p;
-}
-
-/*
- * Free memory allocated through alloc_mem().
- */
-void t4_free_mem(void *addr)
-{
- kvfree(addr);
-}
-
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
max_ftids * sizeof(*t->ftid_tab) +
ftid_bmap_size * sizeof(long);
- t->tid_tab = t4_alloc_mem(size);
+ t->tid_tab = kvzalloc(size, GFP_KERNEL);
if (!t->tid_tab)
return -ENOMEM;
/* allocate memory to read the header of the firmware on the
* card
*/
- card_fw = t4_alloc_mem(sizeof(*card_fw));
+ card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
/* Get FW from from /lib/firmware/ */
ret = request_firmware(&fw, fw_info->fw_mod_name,
/* Cleaning up */
release_firmware(fw);
- t4_free_mem(card_fw);
+ kvfree(card_fw);
if (ret < 0)
goto bye;
{
unsigned int i;
- t4_free_mem(adapter->l2t);
+ kvfree(adapter->l2t);
t4_cleanup_sched(adapter);
- t4_free_mem(adapter->tids.tid_tab);
+ kvfree(adapter->tids.tid_tab);
cxgb4_cleanup_tc_u32(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
for (i = 0; i < t->size; i++) {
struct cxgb4_link *link = &t->table[i];
- t4_free_mem(link->tid_map);
+ kvfree(link->tid_map);
}
- t4_free_mem(adap->tc_u32);
+ kvfree(adap->tc_u32);
}
struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
if (!max_tids)
return NULL;
- t = t4_alloc_mem(sizeof(*t) +
- (max_tids * sizeof(struct cxgb4_link)));
+ t = kvzalloc(sizeof(*t) +
+ (max_tids * sizeof(struct cxgb4_link)), GFP_KERNEL);
if (!t)
return NULL;
unsigned int bmap_size;
bmap_size = BITS_TO_LONGS(max_tids);
- link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size);
+ link->tid_map = kvzalloc(sizeof(unsigned long) * bmap_size, GFP_KERNEL);
if (!link->tid_map)
goto out_no_mem;
bitmap_zero(link->tid_map, max_tids);
struct cxgb4_link *link = &t->table[i];
if (link->tid_map)
- t4_free_mem(link->tid_map);
+ kvfree(link->tid_map);
}
if (t)
- t4_free_mem(t);
+ kvfree(t);
return NULL;
}
if (l2t_size < L2T_MIN_HASH_BUCKETS)
return NULL;
- d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
+ d = kvzalloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry), GFP_KERNEL);
if (!d)
return NULL;
}
list_del(&qe->list);
- t4_free_mem(qe);
+ kvfree(qe);
if (atomic_dec_and_test(&e->refcnt)) {
e->state = SCHED_STATE_UNUSED;
memset(&e->info, 0, sizeof(e->info));
if (p->queue < 0 || p->queue >= pi->nqsets)
return -ERANGE;
- qe = t4_alloc_mem(sizeof(struct sched_queue_entry));
+ qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL);
if (!qe)
return -ENOMEM;
/* Unbind queue from any existing class */
err = t4_sched_queue_unbind(pi, p);
if (err) {
- t4_free_mem(qe);
+ kvfree(qe);
goto out;
}
spin_lock(&e->lock);
err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
if (err) {
- t4_free_mem(qe);
+ kvfree(qe);
spin_unlock(&e->lock);
goto out;
}
struct sched_table *s;
unsigned int i;
- s = t4_alloc_mem(sizeof(*s) + sched_size * sizeof(struct sched_class));
+ s = kvzalloc(sizeof(*s) + sched_size * sizeof(struct sched_class), GFP_KERNEL);
if (!s)
return NULL;
t4_sched_class_free(pi, e);
write_unlock(&s->rw_lock);
}
- t4_free_mem(s);
+ kvfree(s);
}
}
ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
tmp = size * sizeof(struct mlx4_en_tx_info);
- ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node);
+ ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node);
if (!ring->tx_info) {
- ring->tx_info = vmalloc(tmp);
- if (!ring->tx_info) {
- err = -ENOMEM;
- goto err_ring;
- }
+ err = -ENOMEM;
+ goto err_ring;
}
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
- buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
- if (!buddy->bits[i]) {
- buddy->bits[i] = vzalloc(s * sizeof(long));
- if (!buddy->bits[i])
- goto err_out_free;
- }
+ buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO);
+ if (!buddy->bits[i])
+ goto err_out_free;
}
set_bit(0, buddy->bits[buddy->max_order]);
return -ENXIO;
}
- ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL);
- if (!ndd->data)
- ndd->data = vmalloc(ndd->nsarea.config_size);
-
+ ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL);
if (!ndd->data)
return -ENOMEM;
void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size,
gfp_t flags)
{
- void *ret;
-
- ret = kzalloc_node(size, flags | __GFP_NOWARN,
- cfs_cpt_spread_node(cptab, cpt));
- if (!ret) {
- WARN_ON(!(flags & (__GFP_FS | __GFP_HIGH)));
- ret = vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt));
- }
-
- return ret;
+ return kvzalloc_node(size, flags, cfs_cpt_spread_node(cptab, cpt));
}
EXPORT_SYMBOL(libcfs_kvzalloc_cpt);
bool enabled;
};
-static evtchn_port_t *evtchn_alloc_ring(unsigned int size)
-{
- evtchn_port_t *ring;
- size_t s = size * sizeof(*ring);
-
- ring = kmalloc(s, GFP_KERNEL);
- if (!ring)
- ring = vmalloc(s);
-
- return ring;
-}
-
static void evtchn_free_ring(evtchn_port_t *ring)
{
kvfree(ring);
else
new_size = 2 * u->ring_size;
- new_ring = evtchn_alloc_ring(new_size);
+ new_ring = kvmalloc(new_size * sizeof(*new_ring), GFP_KERNEL);
if (!new_ring)
return -ENOMEM;
goto out;
}
- tmp_buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN);
+ tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
if (!tmp_buf) {
- tmp_buf = vmalloc(fs_info->nodesize);
- if (!tmp_buf) {
- ret = -ENOMEM;
- goto out;
- }
+ ret = -ENOMEM;
+ goto out;
}
left_path->search_commit_root = 1;
u64 last_dest_end = destoff;
ret = -ENOMEM;
- buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN);
- if (!buf) {
- buf = vmalloc(fs_info->nodesize);
- if (!buf)
- return ret;
- }
+ buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
+ if (!buf)
+ return ret;
path = btrfs_alloc_path();
if (!path) {
sctx->clone_roots_cnt = arg->clone_sources_count;
sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
- sctx->send_buf = kmalloc(sctx->send_max_size, GFP_KERNEL | __GFP_NOWARN);
+ sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
if (!sctx->send_buf) {
- sctx->send_buf = vmalloc(sctx->send_max_size);
- if (!sctx->send_buf) {
- ret = -ENOMEM;
- goto out;
- }
+ ret = -ENOMEM;
+ goto out;
}
- sctx->read_buf = kmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL | __GFP_NOWARN);
+ sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL);
if (!sctx->read_buf) {
- sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE);
- if (!sctx->read_buf) {
- ret = -ENOMEM;
- goto out;
- }
+ ret = -ENOMEM;
+ goto out;
}
sctx->pending_dir_moves = RB_ROOT;
alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources);
if (arg->clone_sources_count) {
- clone_sources_tmp = kmalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN);
+ clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
if (!clone_sources_tmp) {
- clone_sources_tmp = vmalloc(alloc_size);
- if (!clone_sources_tmp) {
- ret = -ENOMEM;
- goto out;
- }
+ ret = -ENOMEM;
+ goto out;
}
ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
(PAGE_SIZE - 1);
npages = calc_pages_for(align, nbytes);
- pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
- if (!pages) {
- pages = vmalloc(sizeof(*pages) * npages);
- if (!pages)
- return ERR_PTR(-ENOMEM);
- }
+ pages = kvmalloc(sizeof(*pages) * npages, GFP_KERNEL);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
for (idx = 0; idx < npages; ) {
size_t start;
goto out_nofds;
alloc_size = 6 * size;
- bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
- if (!bits && alloc_size > PAGE_SIZE)
- bits = vmalloc(alloc_size);
-
+ bits = kvmalloc(alloc_size, GFP_KERNEL);
if (!bits)
goto out_nofds;
}
if (size) {
if (size > XATTR_SIZE_MAX)
return -E2BIG;
- kvalue = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!kvalue) {
- kvalue = vmalloc(size);
- if (!kvalue)
- return -ENOMEM;
- }
+ kvalue = kvmalloc(size, GFP_KERNEL);
+ if (!kvalue)
+ return -ENOMEM;
if (copy_from_user(kvalue, value, size)) {
error = -EFAULT;
goto out;
if (size) {
if (size > XATTR_SIZE_MAX)
size = XATTR_SIZE_MAX;
- kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!kvalue) {
- kvalue = vzalloc(size);
- if (!kvalue)
- return -ENOMEM;
- }
+ kvalue = kvzalloc(size, GFP_KERNEL);
+ if (!kvalue)
+ return -ENOMEM;
}
error = vfs_getxattr(d, kname, kvalue, size);
if (size) {
if (size > XATTR_LIST_MAX)
size = XATTR_LIST_MAX;
- klist = kmalloc(size, __GFP_NOWARN | GFP_KERNEL);
- if (!klist) {
- klist = vmalloc(size);
- if (!klist)
- return -ENOMEM;
- }
+ klist = kvmalloc(size, GFP_KERNEL);
+ if (!klist)
+ return -ENOMEM;
}
error = vfs_listxattr(d, klist, size);
static inline void *mlx5_vzalloc(unsigned long size)
{
- void *rtn;
-
- rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!rtn)
- rtn = vzalloc(size);
- return rtn;
+ return kvzalloc(size, GFP_KERNEL);
}
static inline u32 mlx5_base_mkey(const u32 key)
return kvmalloc(size, flags | __GFP_ZERO);
}
+static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ if (size != 0 && n > SIZE_MAX / size)
+ return NULL;
+
+ return kvmalloc(n * size, flags);
+}
+
extern void kvfree(const void *addr);
static inline atomic_t *compound_mapcount_ptr(struct page *page)
static struct page **get_pages_array(size_t n)
{
- struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
- if (!p)
- p = vmalloc(n * sizeof(struct page *));
- return p;
+ return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
}
static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
* Avoid higher order allocations, use vmalloc instead. It should
* be rare anyway.
*/
- if (size <= PAGE_SIZE)
- vec = kmalloc(size, GFP_KERNEL);
- else
- vec = vmalloc(size);
+ vec = kvmalloc(size, GFP_KERNEL);
if (!vec)
return NULL;
vec->nr_allocated = nr_frames;
/* no more locks than number of hash buckets */
nblocks = min(nblocks, hashinfo->ehash_mask + 1);
- hashinfo->ehash_locks = kmalloc_array(nblocks, locksz,
- GFP_KERNEL | __GFP_NOWARN);
- if (!hashinfo->ehash_locks)
- hashinfo->ehash_locks = vmalloc(nblocks * locksz);
-
+ hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
if (!hashinfo->ehash_locks)
return -ENOMEM;
tcp_metrics_hash_log = order_base_2(slots);
size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
- tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!tcp_metrics_hash)
- tcp_metrics_hash = vzalloc(size);
-
+ tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
if (!tcp_metrics_hash)
return -ENOMEM;
unsigned index;
if (size) {
- labels = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (!labels)
- labels = vzalloc(size);
-
+ labels = kvzalloc(size, GFP_KERNEL);
if (!labels)
goto nolabels;
}
*/
unsigned int *xt_alloc_entry_offsets(unsigned int size)
{
- unsigned int *off;
+ return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
- off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN);
-
- if (off)
- return off;
-
- if (size < (SIZE_MAX / sizeof(unsigned int)))
- off = vmalloc(size * sizeof(unsigned int));
-
- return off;
}
EXPORT_SYMBOL(xt_alloc_entry_offsets);
size = sizeof(void **) * nr_cpu_ids;
if (size > PAGE_SIZE)
- i->jumpstack = vzalloc(size);
+ i->jumpstack = kvzalloc(size, GFP_KERNEL);
else
i->jumpstack = kzalloc(size, GFP_KERNEL);
if (i->jumpstack == NULL)
*/
size = sizeof(void *) * i->stacksize * 2u;
for_each_possible_cpu(cpu) {
- if (size > PAGE_SIZE)
- i->jumpstack[cpu] = vmalloc_node(size,
- cpu_to_node(cpu));
- else
- i->jumpstack[cpu] = kmalloc_node(size,
- GFP_KERNEL, cpu_to_node(cpu));
+ i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
+ cpu_to_node(cpu));
if (i->jumpstack[cpu] == NULL)
/*
* Freeing will be done later on by the callers. The
}
sz = sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size;
- if (sz <= PAGE_SIZE)
- t = kzalloc(sz, GFP_KERNEL);
- else
- t = vzalloc(sz);
+ t = kvzalloc(sz, GFP_KERNEL);
if (t == NULL) {
ret = -ENOMEM;
goto out;
if (mask != q->tab_mask) {
struct sk_buff **ntab;
- ntab = kcalloc(mask + 1, sizeof(struct sk_buff *),
- GFP_KERNEL | __GFP_NOWARN);
- if (!ntab)
- ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
+ ntab = kvmalloc_array((mask + 1), sizeof(struct sk_buff *), GFP_KERNEL | __GFP_ZERO);
if (!ntab)
return -ENOMEM;
return 0;
}
-static void *fq_codel_zalloc(size_t sz)
-{
- void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
-
- if (!ptr)
- ptr = vzalloc(sz);
- return ptr;
-}
-
-static void fq_codel_free(void *addr)
-{
- kvfree(addr);
-}
-
static void fq_codel_destroy(struct Qdisc *sch)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
tcf_destroy_chain(&q->filter_list);
- fq_codel_free(q->backlogs);
- fq_codel_free(q->flows);
+ kvfree(q->backlogs);
+ kvfree(q->flows);
}
static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
}
if (!q->flows) {
- q->flows = fq_codel_zalloc(q->flows_cnt *
- sizeof(struct fq_codel_flow));
+ q->flows = kvzalloc(q->flows_cnt *
+ sizeof(struct fq_codel_flow), GFP_KERNEL);
if (!q->flows)
return -ENOMEM;
- q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
+ q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL);
if (!q->backlogs) {
- fq_codel_free(q->flows);
+ kvfree(q->flows);
return -ENOMEM;
}
for (i = 0; i < q->flows_cnt; i++) {
rtnl_kfree_skbs(skb, skb);
}
-static void *hhf_zalloc(size_t sz)
-{
- void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
-
- if (!ptr)
- ptr = vzalloc(sz);
-
- return ptr;
-}
-
-static void hhf_free(void *addr)
-{
- kvfree(addr);
-}
-
static void hhf_destroy(struct Qdisc *sch)
{
int i;
struct hhf_sched_data *q = qdisc_priv(sch);
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
- hhf_free(q->hhf_arrays[i]);
- hhf_free(q->hhf_valid_bits[i]);
+ kvfree(q->hhf_arrays[i]);
+ kvfree(q->hhf_valid_bits[i]);
}
for (i = 0; i < HH_FLOWS_CNT; i++) {
kfree(flow);
}
}
- hhf_free(q->hh_flows);
+ kvfree(q->hh_flows);
}
static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
if (!q->hh_flows) {
/* Initialize heavy-hitter flow table. */
- q->hh_flows = hhf_zalloc(HH_FLOWS_CNT *
- sizeof(struct list_head));
+ q->hh_flows = kvzalloc(HH_FLOWS_CNT *
+ sizeof(struct list_head), GFP_KERNEL);
if (!q->hh_flows)
return -ENOMEM;
for (i = 0; i < HH_FLOWS_CNT; i++)
/* Initialize heavy-hitter filter arrays. */
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
- q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
- sizeof(u32));
+ q->hhf_arrays[i] = kvzalloc(HHF_ARRAYS_LEN *
+ sizeof(u32), GFP_KERNEL);
if (!q->hhf_arrays[i]) {
/* Note: hhf_destroy() will be called
* by our caller.
/* Initialize valid bits of heavy-hitter filter arrays. */
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
- q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
- BITS_PER_BYTE);
+ q->hhf_valid_bits[i] = kvzalloc(HHF_ARRAYS_LEN /
+ BITS_PER_BYTE, GFP_KERNEL);
if (!q->hhf_valid_bits[i]) {
/* Note: hhf_destroy() will be called
* by our caller.
spinlock_t *root_lock;
struct disttable *d;
int i;
- size_t s;
if (n > NETEM_DIST_MAX)
return -EINVAL;
- s = sizeof(struct disttable) + n * sizeof(s16);
- d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
- if (!d)
- d = vmalloc(s);
+ d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
if (!d)
return -ENOMEM;
static void *sfq_alloc(size_t sz)
{
- void *ptr = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
-
- if (!ptr)
- ptr = vmalloc(sz);
- return ptr;
+ return kvmalloc(sz, GFP_KERNEL);
}
static void sfq_free(void *addr)
if (_payload) {
ret = -ENOMEM;
- payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
- if (!payload) {
- if (plen <= PAGE_SIZE)
- goto error2;
- payload = vmalloc(plen);
- if (!payload)
- goto error2;
- }
+ payload = kvmalloc(plen, GFP_KERNEL);
+ if (!payload)
+ goto error2;
ret = -EFAULT;
if (copy_from_user(payload, _payload, plen) != 0)
if (from) {
ret = -ENOMEM;
- payload = kmalloc(plen, GFP_KERNEL);
- if (!payload) {
- if (plen <= PAGE_SIZE)
- goto error;
- payload = vmalloc(plen);
- if (!payload)
- goto error;
- }
+ payload = kvmalloc(plen, GFP_KERNEL);
+ if (!payload)
+ goto error;
ret = -EFAULT;
if (!copy_from_iter_full(payload, plen, from))