}
}
-bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
- u32 cflags, u64 extra1, u64 extra2)
+static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
+ s32 res, u32 cflags, u64 extra1, u64 extra2)
{
struct io_overflow_cqe *ocqe;
size_t ocq_size = sizeof(struct io_overflow_cqe);
return true;
}
+bool io_req_cqe_overflow(struct io_kiocb *req)
+{
+ if (!(req->flags & REQ_F_CQE32_INIT)) {
+ req->extra1 = 0;
+ req->extra2 = 0;
+ }
+ return io_cqring_event_overflow(req->ctx, req->cqe.user_data,
+ req->cqe.res, req->cqe.flags,
+ req->extra1, req->extra2);
+}
+
/*
* writes to the cq entry need to come after reading head; the
* control dependency is enough as we're using WRITE_ONCE to
};
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
-bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
- u32 cflags, u64 extra1, u64 extra2);
+bool io_req_cqe_overflow(struct io_kiocb *req);
static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
{
memcpy(cqe, &req->cqe, sizeof(*cqe));
return true;
}
-
- return io_cqring_event_overflow(ctx, req->cqe.user_data,
- req->cqe.res, req->cqe.flags,
- 0, 0);
} else {
u64 extra1 = 0, extra2 = 0;
WRITE_ONCE(cqe->big_cqe[1], extra2);
return true;
}
-
- return io_cqring_event_overflow(ctx, req->cqe.user_data,
- req->cqe.res, req->cqe.flags,
- extra1, extra2);
}
+ return io_req_cqe_overflow(req);
}
static inline void req_set_fail(struct io_kiocb *req)