goto err;
if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
goto err;
-
- ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
- if (!ctx->dummy_ubuf)
- goto err;
- /* set invalid range, so io_import_fixed() fails meeting it */
- ctx->dummy_ubuf->ubuf = -1UL;
-
if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
0, GFP_KERNEL))
goto err;
INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
return ctx;
err:
- kfree(ctx->dummy_ubuf);
kfree(ctx->cancel_table.hbs);
kfree(ctx->cancel_table_locked.hbs);
kfree(ctx->io_bl);
io_wq_put_hash(ctx->hash_map);
kfree(ctx->cancel_table.hbs);
kfree(ctx->cancel_table_locked.hbs);
- kfree(ctx->dummy_ubuf);
kfree(ctx->io_bl);
xa_destroy(&ctx->io_bl_xa);
kfree(ctx);
#define IORING_MAX_FIXED_FILES (1U << 20)
#define IORING_MAX_REG_BUFFERS (1U << 14)
+static const struct io_mapped_ubuf dummy_ubuf = {
+ /* set invalid range, so io_import_fixed() fails meeting it */
+ .ubuf = -1UL,
+ .ubuf_end = 0,
+};
+
int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
{
unsigned long page_limit, cur_pages, new_pages;
struct io_mapped_ubuf *imu = *slot;
unsigned int i;
- if (imu != ctx->dummy_ubuf) {
+ if (imu != &dummy_ubuf) {
for (i = 0; i < imu->nr_bvecs; i++)
unpin_user_page(imu->bvec[i].bv_page);
if (imu->acct_pages)
break;
i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
- if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
+ if (ctx->user_bufs[i] != &dummy_ubuf) {
err = io_queue_rsrc_removal(ctx->buf_data, i,
ctx->user_bufs[i]);
if (unlikely(err)) {
io_buffer_unmap(ctx, &imu);
break;
}
- ctx->user_bufs[i] = ctx->dummy_ubuf;
+ ctx->user_bufs[i] = (struct io_mapped_ubuf *)&dummy_ubuf;
}
ctx->user_bufs[i] = imu;
int ret, nr_pages, i;
struct folio *folio = NULL;
- *pimu = ctx->dummy_ubuf;
+ *pimu = (struct io_mapped_ubuf *)&dummy_ubuf;
if (!iov->iov_base)
return 0;