1 // SPDX-License-Identifier: GPL-2.0
3 * io_uring opcode handling table
5 #include <linux/kernel.h>
6 #include <linux/errno.h>
8 #include <linux/file.h>
9 #include <linux/io_uring.h>
26 #include "openclose.h"
27 #include "uring_cmd.h"
37 static int io_no_issue(struct io_kiocb *req, unsigned int issue_flags)
43 static __maybe_unused int io_eopnotsupp_prep(struct io_kiocb *kiocb,
44 const struct io_uring_sqe *sqe)
49 const struct io_op_def io_op_defs[] = {
59 .unbound_nonreg_file = 1,
67 .async_size = sizeof(struct io_async_rw),
71 .prep_async = io_readv_prep_async,
72 .cleanup = io_readv_writev_cleanup,
75 [IORING_OP_WRITEV] = {
78 .unbound_nonreg_file = 1,
85 .async_size = sizeof(struct io_async_rw),
89 .prep_async = io_writev_prep_async,
90 .cleanup = io_readv_writev_cleanup,
97 .prep = io_fsync_prep,
100 [IORING_OP_READ_FIXED] = {
102 .unbound_nonreg_file = 1,
109 .async_size = sizeof(struct io_async_rw),
110 .name = "READ_FIXED",
115 [IORING_OP_WRITE_FIXED] = {
118 .unbound_nonreg_file = 1,
125 .async_size = sizeof(struct io_async_rw),
126 .name = "WRITE_FIXED",
131 [IORING_OP_POLL_ADD] = {
133 .unbound_nonreg_file = 1,
136 .prep = io_poll_add_prep,
137 .issue = io_poll_add,
139 [IORING_OP_POLL_REMOVE] = {
141 .name = "POLL_REMOVE",
142 .prep = io_poll_remove_prep,
143 .issue = io_poll_remove,
145 [IORING_OP_SYNC_FILE_RANGE] = {
148 .name = "SYNC_FILE_RANGE",
150 .issue = io_sync_file_range,
152 [IORING_OP_SENDMSG] = {
154 .unbound_nonreg_file = 1,
159 #if defined(CONFIG_NET)
160 .async_size = sizeof(struct io_async_msghdr),
161 .prep = io_sendmsg_prep,
163 .prep_async = io_sendmsg_prep_async,
164 .cleanup = io_sendmsg_recvmsg_cleanup,
165 .fail = io_sendrecv_fail,
167 .prep = io_eopnotsupp_prep,
170 [IORING_OP_RECVMSG] = {
172 .unbound_nonreg_file = 1,
178 #if defined(CONFIG_NET)
179 .async_size = sizeof(struct io_async_msghdr),
180 .prep = io_recvmsg_prep,
182 .prep_async = io_recvmsg_prep_async,
183 .cleanup = io_sendmsg_recvmsg_cleanup,
184 .fail = io_sendrecv_fail,
186 .prep = io_eopnotsupp_prep,
189 [IORING_OP_TIMEOUT] = {
191 .async_size = sizeof(struct io_timeout_data),
193 .prep = io_timeout_prep,
196 [IORING_OP_TIMEOUT_REMOVE] = {
197 /* used by timeout updates' prep() */
199 .name = "TIMEOUT_REMOVE",
200 .prep = io_timeout_remove_prep,
201 .issue = io_timeout_remove,
203 [IORING_OP_ACCEPT] = {
205 .unbound_nonreg_file = 1,
208 .ioprio = 1, /* used for flags */
210 #if defined(CONFIG_NET)
211 .prep = io_accept_prep,
214 .prep = io_eopnotsupp_prep,
217 [IORING_OP_ASYNC_CANCEL] = {
219 .name = "ASYNC_CANCEL",
220 .prep = io_async_cancel_prep,
221 .issue = io_async_cancel,
223 [IORING_OP_LINK_TIMEOUT] = {
225 .async_size = sizeof(struct io_timeout_data),
226 .name = "LINK_TIMEOUT",
227 .prep = io_link_timeout_prep,
228 .issue = io_no_issue,
230 [IORING_OP_CONNECT] = {
232 .unbound_nonreg_file = 1,
235 #if defined(CONFIG_NET)
236 .async_size = sizeof(struct io_async_connect),
237 .prep = io_connect_prep,
239 .prep_async = io_connect_prep_async,
241 .prep = io_eopnotsupp_prep,
244 [IORING_OP_FALLOCATE] = {
247 .prep = io_fallocate_prep,
248 .issue = io_fallocate,
250 [IORING_OP_OPENAT] = {
252 .prep = io_openat_prep,
254 .cleanup = io_open_cleanup,
256 [IORING_OP_CLOSE] = {
258 .prep = io_close_prep,
261 [IORING_OP_FILES_UPDATE] = {
264 .name = "FILES_UPDATE",
265 .prep = io_files_update_prep,
266 .issue = io_files_update,
268 [IORING_OP_STATX] = {
271 .prep = io_statx_prep,
273 .cleanup = io_statx_cleanup,
277 .unbound_nonreg_file = 1,
285 .async_size = sizeof(struct io_async_rw),
291 [IORING_OP_WRITE] = {
294 .unbound_nonreg_file = 1,
301 .async_size = sizeof(struct io_async_rw),
307 [IORING_OP_FADVISE] = {
311 .prep = io_fadvise_prep,
314 [IORING_OP_MADVISE] = {
316 .prep = io_madvise_prep,
321 .unbound_nonreg_file = 1,
327 #if defined(CONFIG_NET)
328 .async_size = sizeof(struct io_async_msghdr),
329 .prep = io_sendmsg_prep,
331 .fail = io_sendrecv_fail,
332 .prep_async = io_send_prep_async,
334 .prep = io_eopnotsupp_prep,
339 .unbound_nonreg_file = 1,
345 #if defined(CONFIG_NET)
346 .prep = io_recvmsg_prep,
348 .fail = io_sendrecv_fail,
350 .prep = io_eopnotsupp_prep,
353 [IORING_OP_OPENAT2] = {
355 .prep = io_openat2_prep,
357 .cleanup = io_open_cleanup,
359 [IORING_OP_EPOLL_CTL] = {
360 .unbound_nonreg_file = 1,
363 #if defined(CONFIG_EPOLL)
364 .prep = io_epoll_ctl_prep,
365 .issue = io_epoll_ctl,
367 .prep = io_eopnotsupp_prep,
370 [IORING_OP_SPLICE] = {
373 .unbound_nonreg_file = 1,
376 .prep = io_splice_prep,
379 [IORING_OP_PROVIDE_BUFFERS] = {
382 .name = "PROVIDE_BUFFERS",
383 .prep = io_provide_buffers_prep,
384 .issue = io_provide_buffers,
386 [IORING_OP_REMOVE_BUFFERS] = {
389 .name = "REMOVE_BUFFERS",
390 .prep = io_remove_buffers_prep,
391 .issue = io_remove_buffers,
396 .unbound_nonreg_file = 1,
402 [IORING_OP_SHUTDOWN] = {
405 #if defined(CONFIG_NET)
406 .prep = io_shutdown_prep,
407 .issue = io_shutdown,
409 .prep = io_eopnotsupp_prep,
412 [IORING_OP_RENAMEAT] = {
414 .prep = io_renameat_prep,
415 .issue = io_renameat,
416 .cleanup = io_renameat_cleanup,
418 [IORING_OP_UNLINKAT] = {
420 .prep = io_unlinkat_prep,
421 .issue = io_unlinkat,
422 .cleanup = io_unlinkat_cleanup,
424 [IORING_OP_MKDIRAT] = {
426 .prep = io_mkdirat_prep,
428 .cleanup = io_mkdirat_cleanup,
430 [IORING_OP_SYMLINKAT] = {
432 .prep = io_symlinkat_prep,
433 .issue = io_symlinkat,
434 .cleanup = io_link_cleanup,
436 [IORING_OP_LINKAT] = {
438 .prep = io_linkat_prep,
440 .cleanup = io_link_cleanup,
442 [IORING_OP_MSG_RING] = {
446 .prep = io_msg_ring_prep,
447 .issue = io_msg_ring,
448 .cleanup = io_msg_ring_cleanup,
450 [IORING_OP_FSETXATTR] = {
453 .prep = io_fsetxattr_prep,
454 .issue = io_fsetxattr,
455 .cleanup = io_xattr_cleanup,
457 [IORING_OP_SETXATTR] = {
459 .prep = io_setxattr_prep,
460 .issue = io_setxattr,
461 .cleanup = io_xattr_cleanup,
463 [IORING_OP_FGETXATTR] = {
466 .prep = io_fgetxattr_prep,
467 .issue = io_fgetxattr,
468 .cleanup = io_xattr_cleanup,
470 [IORING_OP_GETXATTR] = {
472 .prep = io_getxattr_prep,
473 .issue = io_getxattr,
474 .cleanup = io_xattr_cleanup,
476 [IORING_OP_SOCKET] = {
479 #if defined(CONFIG_NET)
480 .prep = io_socket_prep,
483 .prep = io_eopnotsupp_prep,
486 [IORING_OP_URING_CMD] = {
492 .async_size = uring_cmd_pdu_size(1),
493 .prep = io_uring_cmd_prep,
494 .issue = io_uring_cmd,
495 .prep_async = io_uring_cmd_prep_async,
497 [IORING_OP_SEND_ZC] = {
500 .unbound_nonreg_file = 1,
505 #if defined(CONFIG_NET)
506 .async_size = sizeof(struct io_async_msghdr),
507 .prep = io_send_zc_prep,
509 .prep_async = io_send_prep_async,
510 .cleanup = io_send_zc_cleanup,
511 .fail = io_sendrecv_fail,
513 .prep = io_eopnotsupp_prep,
516 [IORING_OP_SENDMSG_ZC] = {
517 .name = "SENDMSG_ZC",
519 .unbound_nonreg_file = 1,
523 #if defined(CONFIG_NET)
524 .async_size = sizeof(struct io_async_msghdr),
525 .prep = io_send_zc_prep,
526 .issue = io_sendmsg_zc,
527 .prep_async = io_sendmsg_prep_async,
528 .cleanup = io_send_zc_cleanup,
529 .fail = io_sendrecv_fail,
531 .prep = io_eopnotsupp_prep,
536 const char *io_uring_get_opcode(u8 opcode)
538 if (opcode < IORING_OP_LAST)
539 return io_op_defs[opcode].name;
543 void __init io_uring_optable_init(void)
547 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
549 for (i = 0; i < ARRAY_SIZE(io_op_defs); i++) {
550 BUG_ON(!io_op_defs[i].prep);
551 if (io_op_defs[i].prep != io_eopnotsupp_prep)
552 BUG_ON(!io_op_defs[i].issue);
553 WARN_ON_ONCE(!io_op_defs[i].name);