return false;
}
+static inline unsigned io_fixup_rw_res(struct io_kiocb *req, unsigned res)
+{
+ struct io_async_rw *io = req->async_data;
+
+ /* add previously done IO, if any */
+ if (io && io->bytes_done > 0) {
+ if (res < 0)
+ res = io->bytes_done;
+ else
+ res += io->bytes_done;
+ }
+ return res;
+}
+
static void io_req_task_complete(struct io_kiocb *req, bool *locked)
{
unsigned int cflags = io_put_rw_kbuf(req);
{
if (__io_complete_rw_common(req, res))
return;
- __io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req));
+ __io_req_complete(req, issue_flags, io_fixup_rw_res(req, res), io_put_rw_kbuf(req));
}
static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
if (__io_complete_rw_common(req, res))
return;
- req->result = res;
+ req->result = io_fixup_rw_res(req, res);
req->io_task_work.func = io_req_task_complete;
io_req_task_work_add(req);
}
unsigned int issue_flags)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
- struct io_async_rw *io = req->async_data;
-
- /* add previously done IO, if any */
- if (io && io->bytes_done > 0) {
- if (ret < 0)
- ret = io->bytes_done;
- else
- ret += io->bytes_done;
- }
if (req->flags & REQ_F_CUR_POS)
req->file->f_pos = kiocb->ki_pos;
unsigned int cflags = io_put_rw_kbuf(req);
struct io_ring_ctx *ctx = req->ctx;
+ ret = io_fixup_rw_res(req, ret);
req_set_fail(req);
if (!(issue_flags & IO_URING_F_NONBLOCK)) {
mutex_lock(&ctx->uring_lock);