2 * UVT - Userspace Virtual Terminals
4 * Copyright (c) 2011-2013 David Herrmann <dh.herrmann@gmail.com>
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files
8 * (the "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
21 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 * A client session represents the internal object that corresponds to a single
29 * open-file in the kernel. That is, for each user calling open() on a cdev, we
30 * create a client-session in UVT.
31 * Note that multiple client-sessions can share the same VT object. It is up to
32 * the API user to assign clients to the correct VTs. You can even move clients
33 * from one VT to another.
34 * On the other hand, user-space can have multiple FDs open for a single
35 * client-session similar to how they can have multiple FDs for a single
46 #include <sys/epoll.h>
50 #include "shl_dlist.h"
54 #include "uvt_internal.h"
56 #define LLOG_SUBSYSTEM "uvt_client"
60 * I/O has always two modes: blocking and nonblocking
61 * Nonblocking I/O is easy. We simply check whether we can actually forward the
62 * data. If we can't, we signal that back. However, blocking I/O is a lot more
63 * complex to implement. If a user submits a blocking I/O call, we have to wait
64 * until we can finish that request. In the kernel we simply put the user
65 * context asleep until we call can finish. However, in user-space via FUSE we
66 * have no user-context. Instead, we need to work around that.
67 * The most straightforward way would be to create a thread and put that thread
68 * asleep. However, this would create one thread for every blocking I/O call
69 * which seems to be way too much overhead. Also, we don't want threads in a
70 * library. Therefore, we use a different approach.
71 * For each blocking request, we create a uvt_waiter. This waiter is then linked
72 * into the waiter list and we continue with other requests. Everytime the I/O
73 * status changes, we retry the whole waiter list and try to finish the
74 * requests. If a request is done, we signal it back and destroy the waiter.
75 * This gets slightly more complex with interrupts and fuse_req objects. See
76 * below for the implementation.
79 enum uvt_waiter_type {
80 UVT_WAITER_INVALID = 0x00,
82 UVT_WAITER_READ = 0x01,
83 UVT_WAITER_WRITE = 0x02,
85 UVT_WAITER_ALL = UVT_WAITER_READ |
89 enum uvt_waiter_flags {
90 UVT_WAITER_KILLED = 0x01,
91 UVT_WAITER_RELEASED = 0x02,
95 struct shl_dlist list;
96 struct uvt_client *client;
115 static bool uvt_waiter_is_killed(struct uvt_waiter *waiter)
117 return !waiter || (waiter->flags & UVT_WAITER_KILLED);
120 static void uvt_waiter_set_killed(struct uvt_waiter *waiter)
123 waiter->flags |= UVT_WAITER_KILLED;
126 static bool uvt_waiter_is_released(struct uvt_waiter *waiter)
128 return !waiter || (waiter->flags & UVT_WAITER_RELEASED);
131 static void uvt_waiter_set_released(struct uvt_waiter *waiter)
134 waiter->flags |= UVT_WAITER_RELEASED;
137 static void uvt_waiter_interrupt(fuse_req_t req, void *data)
139 struct uvt_waiter *waiter = data;
141 uvt_waiter_set_killed(waiter);
144 static int uvt_waiter_new(struct uvt_waiter **out, struct uvt_client *client,
147 struct uvt_waiter *waiter;
151 if (fuse_req_interrupted(req))
154 waiter = malloc(sizeof(*waiter));
157 memset(waiter, 0, sizeof(*waiter));
158 waiter->client = client;
162 fuse_req_interrupt_func(req, uvt_waiter_interrupt, waiter);
163 if (uvt_waiter_is_killed(waiter)) {
164 fuse_req_interrupt_func(req, NULL, NULL);
169 shl_dlist_link_tail(&client->waiters, &waiter->list);
174 static int uvt_waiter_new_read(struct uvt_waiter **out,
175 struct uvt_client *client, fuse_req_t req,
176 uint8_t *buf, size_t size)
178 struct uvt_waiter *waiter;
184 ret = uvt_waiter_new(&waiter, client, req);
187 waiter->type = UVT_WAITER_READ;
188 waiter->read.size = size;
189 waiter->read.buf = buf;
195 static int uvt_waiter_new_write(struct uvt_waiter **out,
196 struct uvt_client *client, fuse_req_t req,
197 const uint8_t *mem, size_t size)
199 struct uvt_waiter *waiter;
209 memcpy(buf, mem, size);
211 ret = uvt_waiter_new(&waiter, client, req);
214 waiter->type = UVT_WAITER_WRITE;
215 waiter->write.size = size;
216 waiter->write.buf = buf;
226 static void uvt_waiter_release(struct uvt_waiter *waiter, int error)
228 if (!waiter || uvt_waiter_is_released(waiter))
231 uvt_waiter_set_released(waiter);
232 fuse_req_interrupt_func(waiter->req, NULL, NULL);
234 fuse_reply_err(waiter->req, abs(error));
237 static void uvt_waiter_free(struct uvt_waiter *waiter, int error)
239 shl_dlist_unlink(&waiter->list);
240 uvt_waiter_release(waiter, error);
242 switch (waiter->type) {
243 case UVT_WAITER_READ:
244 free(waiter->read.buf);
246 case UVT_WAITER_WRITE:
247 free(waiter->write.buf);
254 static void uvt_waiter_free_read(struct uvt_waiter *waiter, size_t len)
259 if (!uvt_waiter_is_released(waiter)) {
260 uvt_waiter_release(waiter, 0);
261 fuse_reply_buf(waiter->req, (void*)waiter->read.buf, len);
263 uvt_waiter_free(waiter, -EINVAL);
266 static void uvt_waiter_free_write(struct uvt_waiter *waiter, size_t len)
271 if (!uvt_waiter_is_released(waiter)) {
272 uvt_waiter_release(waiter, 0);
273 fuse_reply_write(waiter->req, len);
275 uvt_waiter_free(waiter, -EINVAL);
280 * A client session is the user-space counterpart of kernel-space open-files.
281 * For each open-file we have one client-session in user-space. Users can access
282 * a single client-session via multiple file-descriptors via dup(). However, for
283 * each open() call on the device, we create a new open-file, that is, a new
285 * A single client session dispatches all the I/O calls on the file. It does
286 * blocking and nonblocking I/O, parses ioctls() and correctly performs any
287 * other state-tracking. But it does not implement any device logic. That means,
288 * the client-session doesn't provide any functionality. Instead, you have to
289 * assign a VT to the session. The client-session performs any maintenance tasks
290 * and then forwards the requests to the VT object. If no VT object is assigned,
291 * the user gets ENODEV as error.
292 * Because the client-session performs all state-tracking and parsing, the VT
293 * object can be a lot simpler and doesn't have to be aware of any FUSE objects
294 * or sessions. Instead, the VT object can concentrate on implementing a _VT_
296 * Furthermore, this allows to assign the same VT object to multiple different
297 * sessions at the same time. Or to assign a different VT to each session on the
298 * same device, or any other combination you want.
301 static void uvt_client_waiters_retry(struct uvt_client *client,
304 static int uvt_client_new(struct uvt_client **out, struct uvt_cdev *cdev)
306 struct uvt_client *client;
311 return llog_EINVAL(cdev);
313 client = malloc(sizeof(*client));
315 return llog_ENOMEM(cdev);
316 memset(client, 0, sizeof(*client));
319 client->llog = cdev->llog;
320 client->llog_data = cdev->llog_data;
321 shl_dlist_init(&client->waiters);
323 llog_debug(client, "new client %p on cdev %p", client, cdev);
325 shl_dlist_link_tail(&cdev->clients, &client->list);
331 void uvt_client_ref(struct uvt_client *client)
333 if (!client || !client->ref)
340 void uvt_client_unref(struct uvt_client *client)
342 if (!client || !client->ref || --client->ref)
345 llog_debug(client, "free client %p", client);
347 uvt_client_kill(client);
352 * This must be called after each event dispatch round. It cleans up all
353 * interrupted/killed readers. The readers cannot be released right away due
354 * to heavy locking inside of FUSE. We have to delay these tasks and clean up
355 * after each dispatch round.
357 void uvt_client_cleanup(struct uvt_client *client)
359 struct shl_dlist *i, *tmp;
360 struct uvt_waiter *waiter;
365 shl_dlist_for_each_safe(i, tmp, &client->waiters) {
366 waiter = shl_dlist_entry(i, struct uvt_waiter, list);
367 if (uvt_waiter_is_killed(waiter))
368 uvt_waiter_free(waiter, -ENOENT);
372 static void uvt_client_waiters_release(struct uvt_client *client, int error)
374 struct uvt_waiter *waiter;
380 while (!shl_dlist_empty(&client->waiters)) {
381 waiter = shl_dlist_entry(client->waiters.next,
382 struct uvt_waiter, list);
384 if (uvt_waiter_is_killed(waiter))
389 uvt_waiter_free(waiter, err);
394 bool uvt_client_is_dead(struct uvt_client *client)
396 return !client || !client->cdev;
400 void uvt_client_kill(struct uvt_client *client)
402 if (!client || !client->cdev)
405 llog_debug(client, "kill client %p", client);
408 fuse_notify_poll(client->ph);
409 fuse_pollhandle_destroy(client->ph);
413 shl_dlist_unlink(&client->list);
415 uvt_client_set_vt(client, NULL, NULL);
416 uvt_client_waiters_release(client, -EPIPE);
420 * We allow recursive VT-actions so we need sophisticated locking. That is, we
421 * allow each client->vt->XY() function to itself raise VT events. These VT
422 * events cause our uvt_client_vt_event() handler to call
423 * uvt_client_waiters_retry(). But uvt_client_waiters_retry() itself can call
424 * VT functions again.
425 * This recursion isn't particularly bad, as any _proper_ implementation would
426 * have an upper limit (which is the number of active waiters). However, to
427 * avoid wasting stack space for recursion, we lock the VT when calling VT
428 * callbacks. The uvt_client_vt_event() handler checks whether the callbacks are
429 * currently locked and sets markers otherwise. These markers cause our
430 * unlock-function to notice that we got events in between and then retries all
431 * interrupted operations.
432 * The client->vt_in_unlock is used to avoid recursion in unlock() itself.
435 static bool uvt_client_lock_vt(struct uvt_client *client)
437 if (!client || client->vt_locked)
440 client->vt_locked = true;
444 static void uvt_client_unlock_vt(struct uvt_client *client)
448 if (!client || !client->vt_locked)
451 client->vt_locked = false;
452 if (client->vt_in_unlock)
455 while (client->vt_retry) {
456 retry = client->vt_retry;
457 client->vt_retry = 0;
459 client->vt_in_unlock = true;
460 uvt_client_waiters_retry(client, retry);
461 client->vt_in_unlock = false;
465 static void uvt_client_waiters_retry(struct uvt_client *client,
468 struct shl_dlist *iter, *tmp;
469 struct uvt_waiter *waiter;
472 if (!client || !types || uvt_client_is_dead(client) || !client->vt)
475 if (!uvt_client_lock_vt(client))
478 shl_dlist_for_each_safe(iter, tmp, &client->waiters) {
482 waiter = shl_dlist_entry(iter, struct uvt_waiter, list);
483 if (!(waiter->type & types) || uvt_waiter_is_killed(waiter))
486 if (waiter->type == UVT_WAITER_READ) {
487 ret = client->vt->read(client->vt_data,
490 if (ret == -EAGAIN) {
491 types &= ~UVT_WAITER_READ;
493 } else if (ret < 0) {
494 uvt_waiter_free(waiter, ret);
496 if (ret > waiter->read.size)
497 ret = waiter->read.size;
498 uvt_waiter_free_read(waiter, ret);
500 } else if (waiter->type == UVT_WAITER_WRITE) {
501 ret = client->vt->write(client->vt_data,
504 if (ret == -EAGAIN) {
505 types &= ~UVT_WAITER_WRITE;
507 } else if (ret < 0) {
508 uvt_waiter_free(waiter, ret);
510 if (ret > waiter->write.size)
511 ret = waiter->write.size;
512 uvt_waiter_free_write(waiter, ret);
517 uvt_client_unlock_vt(client);
520 static void uvt_client_vt_event(void *vt, struct uvt_vt_event *ev, void *data)
522 struct uvt_client *client = data;
524 if (uvt_client_is_dead(client))
529 uvt_client_kill(client);
532 switch (ev->tty.type) {
534 uvt_client_kill(client);
538 fuse_notify_poll(client->ph);
539 client->vt_retry |= UVT_WAITER_READ;
543 fuse_notify_poll(client->ph);
544 client->vt_retry |= UVT_WAITER_WRITE;
550 uvt_client_waiters_retry(client, client->vt_retry);
554 int uvt_client_set_vt(struct uvt_client *client, const struct uvt_vt_ops *vt,
561 if (uvt_client_is_dead(client) && vt)
565 client->vt->unregister_cb(client->vt_data, uvt_client_vt_event,
567 client->vt->unref(client->vt_data);
571 client->vt_data = vt_data;
574 ret = client->vt->register_cb(client->vt_data,
575 uvt_client_vt_event, client);
577 client->vt->ref(client->vt_data);
578 uvt_client_waiters_retry(client, UVT_WAITER_ALL);
586 client->vt_data = NULL;
587 uvt_client_waiters_release(client, -ENODEV);
592 * Internal FUSE low-level fops implementation
593 * These functions implement the callbacks used by the CUSE/FUSE-ll
594 * implementation in uvt_cdev objects. Our infrastructure allows to provide
595 * other callbacks, too, but this is currently not needed. Moreover, I cannot
596 * see any reason to add them to the public API as nobody would want anything
597 * different than CUSE/FUSE as frontend.
600 int uvt_client_ll_open(struct uvt_client **out, struct uvt_cdev *cdev,
601 fuse_req_t req, struct fuse_file_info *fi)
603 struct uvt_client *client;
606 ret = uvt_client_new(&client, cdev);
608 fuse_reply_err(req, -ret);
612 fi->fh = (uint64_t)(uintptr_t)(void*)client;
615 ret = fuse_reply_open(req, fi);
617 uvt_client_kill(client);
618 uvt_client_unref(client);
626 void uvt_client_ll_release(fuse_req_t req, struct fuse_file_info *fi)
628 struct uvt_client *client = (void*)(uintptr_t)fi->fh;
631 fuse_reply_err(req, EINVAL);
635 uvt_client_kill(client);
636 uvt_client_unref(client);
637 fuse_reply_err(req, 0);
640 void uvt_client_ll_read(fuse_req_t req, size_t size, off_t off,
641 struct fuse_file_info *fi)
643 struct uvt_client *client = (void*)(uintptr_t)fi->fh;
644 struct uvt_waiter *waiter;
649 fuse_reply_err(req, EINVAL);
651 } else if (uvt_client_is_dead(client)) {
652 fuse_reply_err(req, EPIPE);
655 fuse_reply_err(req, EINVAL);
658 fuse_reply_buf(req, "", 0);
660 } else if (!client->vt) {
661 fuse_reply_err(req, ENODEV);
667 fuse_reply_err(req, ENOMEM);
671 ret = client->vt->read(client->vt_data, buf, size);
676 fuse_reply_buf(req, (void*)buf, ret);
679 } else if (ret == -EAGAIN && !(fi->flags & O_NONBLOCK)) {
680 ret = uvt_waiter_new_read(&waiter, client, req, buf, size);
685 fuse_reply_err(req, -ret);
689 void uvt_client_ll_write(fuse_req_t req, const char *buf, size_t size,
690 off_t off, struct fuse_file_info *fi)
692 struct uvt_client *client = (void*)(uintptr_t)fi->fh;
693 struct uvt_waiter *waiter;
697 fuse_reply_err(req, EINVAL);
699 } else if (uvt_client_is_dead(client)) {
700 fuse_reply_err(req, EPIPE);
703 fuse_reply_err(req, EINVAL);
706 fuse_reply_write(req, 0);
708 } else if (!client->vt) {
709 fuse_reply_err(req, ENODEV);
713 ret = client->vt->write(client->vt_data, (void*)buf, size);
718 fuse_reply_write(req, ret);
720 } else if (ret == -EAGAIN && !(fi->flags & O_NONBLOCK)) {
721 ret = uvt_waiter_new_write(&waiter, client, req, (void*)buf,
727 fuse_reply_err(req, -ret);
730 void uvt_client_ll_poll(fuse_req_t req, struct fuse_file_info *fi,
731 struct fuse_pollhandle *ph)
733 struct uvt_client *client = (void*)(uintptr_t)fi->fh;
734 unsigned int flags, fl;
737 fuse_reply_err(req, EINVAL);
739 } else if (uvt_client_is_dead(client)) {
741 fuse_pollhandle_destroy(ph);
742 fuse_reply_poll(req, EPOLLHUP | EPOLLIN | EPOLLOUT |
743 EPOLLWRNORM | EPOLLRDNORM);
748 fuse_pollhandle_destroy(client->ph);
752 fuse_reply_err(req, ENODEV);
757 fl = client->vt->poll(client->vt_data);
758 if (fl & UVT_TTY_HUP)
760 if (fl & UVT_TTY_READ)
761 flags |= EPOLLIN | EPOLLRDNORM;
762 if (fl & UVT_TTY_WRITE)
763 flags |= EPOLLOUT | EPOLLWRNORM;
765 fuse_reply_poll(req, flags);
768 static bool ioctl_param(fuse_req_t req, void *arg, size_t in_want,
769 size_t in_have, size_t out_want, size_t out_have)
772 struct iovec in, out;
773 size_t in_num, out_num;
776 memset(&in, 0, sizeof(in));
778 memset(&out, 0, sizeof(out));
784 } else if (in_have < in_want) {
785 fuse_reply_err(req, EFAULT);
790 in.iov_len = in_want;
796 } else if (out_have < out_want) {
797 fuse_reply_err(req, EFAULT);
802 out.iov_len = out_want;
807 fuse_reply_ioctl_retry(req, in_num ? &in : NULL, in_num,
808 out_num ? &out : NULL, out_num);
812 void uvt_client_ll_ioctl(fuse_req_t req, int cmd, void *arg,
813 struct fuse_file_info *fi, unsigned int flags,
814 const void *in_buf, size_t in_bufsz, size_t out_bufsz)
816 struct uvt_client *client = (void*)(uintptr_t)fi->fh;
817 uintptr_t uarg = (uintptr_t)arg;
820 struct vt_stat vtstat;
821 struct vt_mode vtmode;
825 fuse_reply_err(req, EINVAL);
827 } else if (uvt_client_is_dead(client)) {
828 fuse_reply_err(req, EPIPE);
830 } else if (!client->vt) {
831 fuse_reply_err(req, ENODEV);
835 /* TODO: fix compat-ioctls */
836 compat = !!(flags & FUSE_IOCTL_COMPAT);
838 fuse_reply_err(req, EOPNOTSUPP);
847 if (ioctl_param(req, arg, 0, in_bufsz, 0, out_bufsz))
849 if (!client->vt->ioctl_TCFLSH) {
850 fuse_reply_err(req, EOPNOTSUPP);
852 ret = client->vt->ioctl_TCFLSH(client->vt_data,
853 (unsigned long)uarg);
855 fuse_reply_err(req, abs(ret));
857 fuse_reply_ioctl(req, 0, NULL, 0);
879 fuse_reply_err(req, EOPNOTSUPP);
885 if (ioctl_param(req, arg, 0, in_bufsz, 0, out_bufsz))
887 if (!client->vt->ioctl_VT_ACTIVATE) {
888 fuse_reply_err(req, EOPNOTSUPP);
890 ret = client->vt->ioctl_VT_ACTIVATE(client->vt_data,
891 (unsigned long)uarg);
893 fuse_reply_err(req, abs(ret));
895 fuse_reply_ioctl(req, 0, NULL, 0);
900 if (ioctl_param(req, arg, 0, in_bufsz, 0, out_bufsz))
902 if (!client->vt->ioctl_VT_WAITACTIVE) {
903 fuse_reply_err(req, EOPNOTSUPP);
905 ret = client->vt->ioctl_VT_WAITACTIVE(client->vt_data,
906 (unsigned long)uarg);
908 fuse_reply_err(req, abs(ret));
910 fuse_reply_ioctl(req, 0, NULL, 0);
915 if (ioctl_param(req, arg, 0, in_bufsz,
916 sizeof(struct vt_stat), out_bufsz))
918 if (!client->vt->ioctl_VT_GETSTATE) {
919 fuse_reply_err(req, EOPNOTSUPP);
921 memset(&vtstat, 0, sizeof(vtstat));
922 ret = client->vt->ioctl_VT_GETSTATE(client->vt_data,
925 fuse_reply_err(req, abs(ret));
927 fuse_reply_ioctl(req, 0, &vtstat,
933 if (ioctl_param(req, arg, 0, in_bufsz,
934 sizeof(unsigned int), out_bufsz))
936 if (!client->vt->ioctl_VT_OPENQRY) {
937 fuse_reply_err(req, EOPNOTSUPP);
940 ret = client->vt->ioctl_VT_OPENQRY(client->vt_data,
943 fuse_reply_err(req, abs(ret));
945 fuse_reply_ioctl(req, 0, &uval, sizeof(uval));
950 if (ioctl_param(req, arg, 0, in_bufsz,
951 sizeof(struct vt_mode), out_bufsz))
953 if (!client->vt->ioctl_VT_GETMODE) {
954 fuse_reply_err(req, EOPNOTSUPP);
956 memset(&vtmode, 0, sizeof(vtmode));
957 ret = client->vt->ioctl_VT_GETMODE(client->vt_data,
960 fuse_reply_err(req, abs(ret));
962 fuse_reply_ioctl(req, 0, &vtmode,
968 if (ioctl_param(req, arg, sizeof(struct vt_mode), in_bufsz,
971 if (!client->vt->ioctl_VT_SETMODE) {
972 fuse_reply_err(req, EOPNOTSUPP);
974 ret = client->vt->ioctl_VT_SETMODE(client->vt_data,
975 (const struct vt_mode*)in_buf,
976 fuse_req_ctx(req)->pid);
978 fuse_reply_err(req, abs(ret));
980 fuse_reply_ioctl(req, 0, NULL, 0);
985 if (ioctl_param(req, arg, 0, in_bufsz, 0, out_bufsz))
987 if (!client->vt->ioctl_VT_RELDISP) {
988 fuse_reply_err(req, EOPNOTSUPP);
990 ret = client->vt->ioctl_VT_RELDISP(client->vt_data,
991 (unsigned long)uarg);
993 fuse_reply_err(req, abs(ret));
995 fuse_reply_ioctl(req, 0, NULL, 0);
1000 if (ioctl_param(req, arg, 0, in_bufsz,
1001 sizeof(unsigned int), out_bufsz))
1003 if (!client->vt->ioctl_KDGETMODE) {
1004 fuse_reply_err(req, EOPNOTSUPP);
1007 ret = client->vt->ioctl_KDGETMODE(client->vt_data,
1010 fuse_reply_err(req, abs(ret));
1012 fuse_reply_ioctl(req, 0, &uval, sizeof(uval));
1017 if (ioctl_param(req, arg, 0, in_bufsz, 0, out_bufsz))
1019 if (!client->vt->ioctl_KDSETMODE) {
1020 fuse_reply_err(req, EOPNOTSUPP);
1022 ret = client->vt->ioctl_KDSETMODE(client->vt_data,
1023 (unsigned int)uarg);
1025 fuse_reply_err(req, abs(ret));
1027 fuse_reply_ioctl(req, 0, NULL, 0);
1032 if (ioctl_param(req, arg, 0, in_bufsz,
1033 sizeof(unsigned int), out_bufsz))
1035 if (!client->vt->ioctl_KDGKBMODE) {
1036 fuse_reply_err(req, EOPNOTSUPP);
1039 ret = client->vt->ioctl_KDGKBMODE(client->vt_data,
1042 fuse_reply_err(req, abs(ret));
1044 fuse_reply_ioctl(req, 0, &uval, sizeof(uval));
1049 if (ioctl_param(req, arg, 0, in_bufsz, 0, out_bufsz))
1051 if (!client->vt->ioctl_KDSKBMODE) {
1052 fuse_reply_err(req, EOPNOTSUPP);
1054 ret = client->vt->ioctl_KDSKBMODE(client->vt_data,
1055 (unsigned int)uarg);
1057 fuse_reply_err(req, abs(ret));
1059 fuse_reply_ioctl(req, 0, NULL, 0);
1091 case VT_SETACTIVATE:
1092 case VT_DISALLOCATE:
1105 case GIO_UNISCRNMAP:
1106 case PIO_UNISCRNMAP:
1111 case VT_UNLOCKSWITCH:
1112 case VT_GETHIFONTMASK:
1114 fuse_reply_err(req, EOPNOTSUPP);
1117 fuse_reply_err(req, EINVAL);