1 // SPDX-License-Identifier: GPL-2.0
3 * io_uring opcode handling table
5 #include <linux/kernel.h>
6 #include <linux/errno.h>
8 #include <linux/file.h>
9 #include <linux/io_uring.h>
26 #include "openclose.h"
27 #include "uring_cmd.h"
37 static int io_no_issue(struct io_kiocb *req, unsigned int issue_flags)
43 static __maybe_unused int io_eopnotsupp_prep(struct io_kiocb *kiocb,
44 const struct io_uring_sqe *sqe)
49 const struct io_op_def io_op_defs[] = {
59 .unbound_nonreg_file = 1,
66 .async_size = sizeof(struct io_async_rw),
70 .prep_async = io_readv_prep_async,
71 .cleanup = io_readv_writev_cleanup,
74 [IORING_OP_WRITEV] = {
77 .unbound_nonreg_file = 1,
83 .async_size = sizeof(struct io_async_rw),
87 .prep_async = io_writev_prep_async,
88 .cleanup = io_readv_writev_cleanup,
95 .prep = io_fsync_prep,
98 [IORING_OP_READ_FIXED] = {
100 .unbound_nonreg_file = 1,
106 .async_size = sizeof(struct io_async_rw),
107 .name = "READ_FIXED",
112 [IORING_OP_WRITE_FIXED] = {
115 .unbound_nonreg_file = 1,
121 .async_size = sizeof(struct io_async_rw),
122 .name = "WRITE_FIXED",
127 [IORING_OP_POLL_ADD] = {
129 .unbound_nonreg_file = 1,
132 .prep = io_poll_add_prep,
133 .issue = io_poll_add,
135 [IORING_OP_POLL_REMOVE] = {
137 .name = "POLL_REMOVE",
138 .prep = io_poll_remove_prep,
139 .issue = io_poll_remove,
141 [IORING_OP_SYNC_FILE_RANGE] = {
144 .name = "SYNC_FILE_RANGE",
146 .issue = io_sync_file_range,
148 [IORING_OP_SENDMSG] = {
150 .unbound_nonreg_file = 1,
155 #if defined(CONFIG_NET)
156 .async_size = sizeof(struct io_async_msghdr),
157 .prep = io_sendmsg_prep,
159 .prep_async = io_sendmsg_prep_async,
160 .cleanup = io_sendmsg_recvmsg_cleanup,
161 .fail = io_sendrecv_fail,
163 .prep = io_eopnotsupp_prep,
166 [IORING_OP_RECVMSG] = {
168 .unbound_nonreg_file = 1,
174 #if defined(CONFIG_NET)
175 .async_size = sizeof(struct io_async_msghdr),
176 .prep = io_recvmsg_prep,
178 .prep_async = io_recvmsg_prep_async,
179 .cleanup = io_sendmsg_recvmsg_cleanup,
180 .fail = io_sendrecv_fail,
182 .prep = io_eopnotsupp_prep,
185 [IORING_OP_TIMEOUT] = {
187 .async_size = sizeof(struct io_timeout_data),
189 .prep = io_timeout_prep,
192 [IORING_OP_TIMEOUT_REMOVE] = {
193 /* used by timeout updates' prep() */
195 .name = "TIMEOUT_REMOVE",
196 .prep = io_timeout_remove_prep,
197 .issue = io_timeout_remove,
199 [IORING_OP_ACCEPT] = {
201 .unbound_nonreg_file = 1,
204 .ioprio = 1, /* used for flags */
206 #if defined(CONFIG_NET)
207 .prep = io_accept_prep,
210 .prep = io_eopnotsupp_prep,
213 [IORING_OP_ASYNC_CANCEL] = {
215 .name = "ASYNC_CANCEL",
216 .prep = io_async_cancel_prep,
217 .issue = io_async_cancel,
219 [IORING_OP_LINK_TIMEOUT] = {
221 .async_size = sizeof(struct io_timeout_data),
222 .name = "LINK_TIMEOUT",
223 .prep = io_link_timeout_prep,
224 .issue = io_no_issue,
226 [IORING_OP_CONNECT] = {
228 .unbound_nonreg_file = 1,
231 #if defined(CONFIG_NET)
232 .async_size = sizeof(struct io_async_connect),
233 .prep = io_connect_prep,
235 .prep_async = io_connect_prep_async,
237 .prep = io_eopnotsupp_prep,
240 [IORING_OP_FALLOCATE] = {
243 .prep = io_fallocate_prep,
244 .issue = io_fallocate,
246 [IORING_OP_OPENAT] = {
248 .prep = io_openat_prep,
250 .cleanup = io_open_cleanup,
252 [IORING_OP_CLOSE] = {
254 .prep = io_close_prep,
257 [IORING_OP_FILES_UPDATE] = {
260 .name = "FILES_UPDATE",
261 .prep = io_files_update_prep,
262 .issue = io_files_update,
264 [IORING_OP_STATX] = {
267 .prep = io_statx_prep,
269 .cleanup = io_statx_cleanup,
273 .unbound_nonreg_file = 1,
280 .async_size = sizeof(struct io_async_rw),
286 [IORING_OP_WRITE] = {
289 .unbound_nonreg_file = 1,
295 .async_size = sizeof(struct io_async_rw),
301 [IORING_OP_FADVISE] = {
305 .prep = io_fadvise_prep,
308 [IORING_OP_MADVISE] = {
310 .prep = io_madvise_prep,
315 .unbound_nonreg_file = 1,
321 #if defined(CONFIG_NET)
322 .async_size = sizeof(struct io_async_msghdr),
323 .prep = io_sendmsg_prep,
325 .fail = io_sendrecv_fail,
326 .prep_async = io_send_prep_async,
328 .prep = io_eopnotsupp_prep,
333 .unbound_nonreg_file = 1,
339 #if defined(CONFIG_NET)
340 .prep = io_recvmsg_prep,
342 .fail = io_sendrecv_fail,
344 .prep = io_eopnotsupp_prep,
347 [IORING_OP_OPENAT2] = {
349 .prep = io_openat2_prep,
351 .cleanup = io_open_cleanup,
353 [IORING_OP_EPOLL_CTL] = {
354 .unbound_nonreg_file = 1,
357 #if defined(CONFIG_EPOLL)
358 .prep = io_epoll_ctl_prep,
359 .issue = io_epoll_ctl,
361 .prep = io_eopnotsupp_prep,
364 [IORING_OP_SPLICE] = {
367 .unbound_nonreg_file = 1,
370 .prep = io_splice_prep,
373 [IORING_OP_PROVIDE_BUFFERS] = {
376 .name = "PROVIDE_BUFFERS",
377 .prep = io_provide_buffers_prep,
378 .issue = io_provide_buffers,
380 [IORING_OP_REMOVE_BUFFERS] = {
383 .name = "REMOVE_BUFFERS",
384 .prep = io_remove_buffers_prep,
385 .issue = io_remove_buffers,
390 .unbound_nonreg_file = 1,
396 [IORING_OP_SHUTDOWN] = {
399 #if defined(CONFIG_NET)
400 .prep = io_shutdown_prep,
401 .issue = io_shutdown,
403 .prep = io_eopnotsupp_prep,
406 [IORING_OP_RENAMEAT] = {
408 .prep = io_renameat_prep,
409 .issue = io_renameat,
410 .cleanup = io_renameat_cleanup,
412 [IORING_OP_UNLINKAT] = {
414 .prep = io_unlinkat_prep,
415 .issue = io_unlinkat,
416 .cleanup = io_unlinkat_cleanup,
418 [IORING_OP_MKDIRAT] = {
420 .prep = io_mkdirat_prep,
422 .cleanup = io_mkdirat_cleanup,
424 [IORING_OP_SYMLINKAT] = {
426 .prep = io_symlinkat_prep,
427 .issue = io_symlinkat,
428 .cleanup = io_link_cleanup,
430 [IORING_OP_LINKAT] = {
432 .prep = io_linkat_prep,
434 .cleanup = io_link_cleanup,
436 [IORING_OP_MSG_RING] = {
440 .prep = io_msg_ring_prep,
441 .issue = io_msg_ring,
443 [IORING_OP_FSETXATTR] = {
446 .prep = io_fsetxattr_prep,
447 .issue = io_fsetxattr,
448 .cleanup = io_xattr_cleanup,
450 [IORING_OP_SETXATTR] = {
452 .prep = io_setxattr_prep,
453 .issue = io_setxattr,
454 .cleanup = io_xattr_cleanup,
456 [IORING_OP_FGETXATTR] = {
459 .prep = io_fgetxattr_prep,
460 .issue = io_fgetxattr,
461 .cleanup = io_xattr_cleanup,
463 [IORING_OP_GETXATTR] = {
465 .prep = io_getxattr_prep,
466 .issue = io_getxattr,
467 .cleanup = io_xattr_cleanup,
469 [IORING_OP_SOCKET] = {
472 #if defined(CONFIG_NET)
473 .prep = io_socket_prep,
476 .prep = io_eopnotsupp_prep,
479 [IORING_OP_URING_CMD] = {
484 .async_size = uring_cmd_pdu_size(1),
485 .prep = io_uring_cmd_prep,
486 .issue = io_uring_cmd,
487 .prep_async = io_uring_cmd_prep_async,
489 [IORING_OP_SEND_ZC] = {
492 .unbound_nonreg_file = 1,
497 #if defined(CONFIG_NET)
498 .async_size = sizeof(struct io_async_msghdr),
499 .prep = io_send_zc_prep,
501 .prep_async = io_send_prep_async,
502 .cleanup = io_send_zc_cleanup,
503 .fail = io_sendrecv_fail,
505 .prep = io_eopnotsupp_prep,
508 [IORING_OP_SENDMSG_ZC] = {
509 .name = "SENDMSG_ZC",
511 .unbound_nonreg_file = 1,
515 #if defined(CONFIG_NET)
516 .async_size = sizeof(struct io_async_msghdr),
517 .prep = io_send_zc_prep,
518 .issue = io_sendmsg_zc,
519 .prep_async = io_sendmsg_prep_async,
520 .cleanup = io_send_zc_cleanup,
521 .fail = io_sendrecv_fail,
523 .prep = io_eopnotsupp_prep,
528 const char *io_uring_get_opcode(u8 opcode)
530 if (opcode < IORING_OP_LAST)
531 return io_op_defs[opcode].name;
535 void __init io_uring_optable_init(void)
539 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
541 for (i = 0; i < ARRAY_SIZE(io_op_defs); i++) {
542 BUG_ON(!io_op_defs[i].prep);
543 if (io_op_defs[i].prep != io_eopnotsupp_prep)
544 BUG_ON(!io_op_defs[i].issue);
545 WARN_ON_ONCE(!io_op_defs[i].name);