2 * Copyright © 2013 Red Hat, Inc.
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
29 #include <linux/uinput.h>
32 #include "libevdev-int.h"
33 #include "libevdev-util.h"
34 #include "event-names.h"
38 static int sync_mt_state(struct libevdev *dev, int create_events);
41 init_event_queue(struct libevdev *dev)
43 /* FIXME: count the number of axes, keys, etc. to get a better idea at how many events per
44 EV_SYN we could possibly get. Then multiply that by the actual buffer size we care about */
46 const int QUEUE_SIZE = 256;
48 return queue_alloc(dev, QUEUE_SIZE);
52 _libevdev_log(struct libevdev *dev, const char *format, ...)
56 va_start(args, format);
57 dev->log(format, args);
62 libevdev_noop_log_func(const char *format, va_list args)
71 dev = calloc(1, sizeof(*dev));
76 dev->current_slot = -1;
77 dev->log = libevdev_noop_log_func;
78 dev->grabbed = LIBEVDEV_UNGRAB;
79 dev->sync_state = SYNC_NONE;
85 libevdev_new_from_fd(int fd, struct libevdev **dev)
94 rc = libevdev_set_fd(d, fd);
103 libevdev_free(struct libevdev *dev)
116 libevdev_set_log_handler(struct libevdev *dev, libevdev_log_func_t logfunc)
121 dev->log = logfunc ? logfunc : libevdev_noop_log_func;
125 libevdev_change_fd(struct libevdev *dev, int fd)
134 libevdev_set_fd(struct libevdev* dev, int fd)
143 rc = ioctl(fd, EVIOCGBIT(0, sizeof(dev->bits)), dev->bits);
147 memset(buf, 0, sizeof(buf));
148 rc = ioctl(fd, EVIOCGNAME(sizeof(buf) - 1), buf);
152 dev->name = strdup(buf);
158 memset(buf, 0, sizeof(buf));
159 rc = ioctl(fd, EVIOCGPHYS(sizeof(buf) - 1), buf);
161 /* uinput has no phys */
165 dev->phys = strdup(buf);
172 memset(buf, 0, sizeof(buf));
173 rc = ioctl(fd, EVIOCGUNIQ(sizeof(buf) - 1), buf);
178 dev->uniq = strdup(buf);
185 rc = ioctl(fd, EVIOCGID, &dev->ids);
189 rc = ioctl(fd, EVIOCGVERSION, &dev->driver_version);
193 rc = ioctl(fd, EVIOCGPROP(sizeof(dev->props)), dev->props);
197 rc = ioctl(fd, EVIOCGBIT(EV_REL, sizeof(dev->rel_bits)), dev->rel_bits);
201 rc = ioctl(fd, EVIOCGBIT(EV_ABS, sizeof(dev->abs_bits)), dev->abs_bits);
205 rc = ioctl(fd, EVIOCGBIT(EV_LED, sizeof(dev->led_bits)), dev->led_bits);
209 rc = ioctl(fd, EVIOCGBIT(EV_KEY, sizeof(dev->key_bits)), dev->key_bits);
213 rc = ioctl(fd, EVIOCGBIT(EV_SW, sizeof(dev->sw_bits)), dev->sw_bits);
217 rc = ioctl(fd, EVIOCGBIT(EV_MSC, sizeof(dev->msc_bits)), dev->msc_bits);
221 rc = ioctl(fd, EVIOCGBIT(EV_FF, sizeof(dev->ff_bits)), dev->ff_bits);
225 rc = ioctl(fd, EVIOCGBIT(EV_SND, sizeof(dev->snd_bits)), dev->snd_bits);
229 /* rep is a special case, always set it to 1 for both values if EV_REP is set */
230 if (bit_is_set(dev->bits, EV_REP)) {
231 for (i = 0; i < REP_MAX; i++)
232 set_bit(dev->rep_bits, i);
233 rc = ioctl(fd, EVIOCGREP, dev->rep_values);
238 for (i = ABS_X; i <= ABS_MAX; i++) {
239 if (bit_is_set(dev->abs_bits, i)) {
240 struct input_absinfo abs_info;
241 rc = ioctl(fd, EVIOCGABS(i), &abs_info);
245 dev->abs_info[i] = abs_info;
246 if (i == ABS_MT_SLOT) {
247 dev->num_slots = abs_info.maximum + 1;
248 dev->current_slot = abs_info.value;
255 sync_mt_state(dev, 0);
257 rc = init_event_queue(dev);
263 /* not copying key state because we won't know when we'll start to
264 * use this fd and key's are likely to change state by then.
265 * Same with the valuators, really, but they may not change.
269 return rc ? -errno : 0;
273 libevdev_get_fd(const struct libevdev* dev)
279 init_event(struct libevdev *dev, struct input_event *ev, int type, int code, int value)
281 ev->time = dev->last_event_time;
288 sync_key_state(struct libevdev *dev)
292 unsigned long keystate[NLONGS(KEY_MAX)];
294 rc = ioctl(dev->fd, EVIOCGKEY(sizeof(keystate)), keystate);
298 for (i = 0; i < KEY_MAX; i++) {
300 old = bit_is_set(dev->key_values, i);
301 new = bit_is_set(keystate, i);
303 struct input_event *ev = queue_push(dev);
304 init_event(dev, ev, EV_KEY, i, new ? 1 : 0);
306 set_bit_state(dev->key_values, i, new);
311 return rc ? -errno : 0;
315 sync_abs_state(struct libevdev *dev)
320 for (i = ABS_X; i <= ABS_MAX; i++) {
321 struct input_absinfo abs_info;
323 if (i >= ABS_MT_MIN && i <= ABS_MT_MAX)
326 if (!bit_is_set(dev->abs_bits, i))
329 rc = ioctl(dev->fd, EVIOCGABS(i), &abs_info);
333 if (dev->abs_info[i].value != abs_info.value) {
334 struct input_event *ev = queue_push(dev);
336 init_event(dev, ev, EV_ABS, i, abs_info.value);
337 dev->abs_info[i].value = abs_info.value;
343 return rc ? -errno : 0;
347 sync_mt_state(struct libevdev *dev, int create_events)
354 } mt_state[ABS_MT_CNT];
356 for (i = ABS_MT_MIN; i < ABS_MT_MAX; i++) {
358 if (i == ABS_MT_SLOT)
361 if (!libevdev_has_event_code(dev, EV_ABS, i))
364 idx = i - ABS_MT_MIN;
365 mt_state[idx].code = i;
366 rc = ioctl(dev->fd, EVIOCGMTSLOTS(sizeof(struct mt_state)), &mt_state[idx]);
371 for (i = 0; i < dev->num_slots; i++) {
373 struct input_event *ev;
376 ev = queue_push(dev);
377 init_event(dev, ev, EV_ABS, ABS_MT_SLOT, i);
380 for (j = ABS_MT_MIN; j < ABS_MT_MAX; j++) {
381 int jdx = j - ABS_MT_MIN;
383 if (j == ABS_MT_SLOT)
386 if (!libevdev_has_event_code(dev, EV_ABS, j))
389 if (dev->mt_slot_vals[i][jdx] == mt_state[jdx].val[i])
393 ev = queue_push(dev);
394 init_event(dev, ev, EV_ABS, j, mt_state[jdx].val[i]);
396 dev->mt_slot_vals[i][jdx] = mt_state[jdx].val[i];
402 return rc ? -errno : 0;
406 sync_state(struct libevdev *dev)
410 struct input_event *ev;
412 /* FIXME: if we have events in the queue after the SYN_DROPPED (which was
413 queue[0]) we need to shift this backwards. Except that chances are that the
414 queue may be either full or too full to prepend all the events needed for
417 so we search for the last sync event in the queue and drop everything before
418 including that event and rely on the kernel to tell us the right value for that
419 bitfield during the sync process.
422 for (i = queue_num_elements(dev) - 1; i >= 0; i--) {
423 struct input_event e;
424 queue_peek(dev, i, &e);
425 if (e.type == EV_SYN)
430 queue_shift_multiple(dev, i + 1, NULL);
432 if (libevdev_has_event_type(dev, EV_KEY))
433 rc = sync_key_state(dev);
434 if (rc == 0 && libevdev_has_event_type(dev, EV_ABS))
435 rc = sync_abs_state(dev);
436 if (rc == 0 && libevdev_has_event_code(dev, EV_ABS, ABS_MT_SLOT))
437 rc = sync_mt_state(dev, 1);
439 dev->queue_nsync = queue_num_elements(dev);
441 if (dev->queue_nsync > 0) {
442 ev = queue_push(dev);
443 init_event(dev, ev, EV_SYN, SYN_REPORT, 0);
451 update_key_state(struct libevdev *dev, const struct input_event *e)
453 if (!libevdev_has_event_type(dev, EV_KEY))
456 if (e->code > KEY_MAX)
460 clear_bit(dev->key_values, e->code);
462 set_bit(dev->key_values, e->code);
468 update_mt_state(struct libevdev *dev, const struct input_event *e)
470 if (e->code == ABS_MT_SLOT) {
471 dev->current_slot = e->value;
473 } else if (dev->current_slot == -1)
476 dev->mt_slot_vals[dev->current_slot][e->code - ABS_MT_MIN] = e->value;
482 update_abs_state(struct libevdev *dev, const struct input_event *e)
484 if (!libevdev_has_event_type(dev, EV_ABS))
487 if (e->code > ABS_MAX)
490 if (e->code >= ABS_MT_MIN && e->code <= ABS_MT_MAX)
491 return update_mt_state(dev, e);
493 dev->abs_info[e->code].value = e->value;
499 update_state(struct libevdev *dev, const struct input_event *e)
508 rc = update_key_state(dev, e);
511 rc = update_abs_state(dev, e);
515 dev->last_event_time = e->time;
521 read_more_events(struct libevdev *dev)
525 struct input_event *next;
527 free_elem = queue_num_free_elements(dev);
531 next = queue_next_element(dev);
532 len = read(dev->fd, next, free_elem * sizeof(struct input_event));
535 } else if (len > 0 && len % sizeof(struct input_event) != 0)
538 int nev = len/sizeof(struct input_event);
539 queue_set_num_elements(dev, queue_num_elements(dev) + nev);
545 int libevdev_next_event(struct libevdev *dev, unsigned int flags, struct input_event *ev)
552 if (!(flags & (LIBEVDEV_READ_NORMAL|LIBEVDEV_READ_SYNC|LIBEVDEV_FORCE_SYNC)))
555 if (flags & LIBEVDEV_READ_SYNC) {
556 if (dev->sync_state == SYNC_NEEDED) {
557 rc = sync_state(dev);
560 dev->sync_state = SYNC_IN_PROGRESS;
563 if (dev->queue_nsync == 0) {
564 dev->sync_state = SYNC_NONE;
568 } else if (dev->sync_state != SYNC_NONE) {
569 struct input_event e;
571 /* call update_state for all events here, otherwise the library has the wrong view
573 while (queue_shift(dev, &e) == 0) {
575 update_state(dev, &e);
578 dev->sync_state = SYNC_NONE;
581 /* FIXME: if the first event after SYNC_IN_PROGRESS is a SYN_DROPPED, log this */
583 /* Always read in some more events. Best case this smoothes over a potential SYN_DROPPED,
584 worst case we don't read fast enough and end up with SYN_DROPPED anyway.
586 Except if the fd is in blocking mode and we still have events from the last read, don't
590 if (!(flags & LIBEVDEV_READ_BLOCKING) ||
591 queue_num_elements(dev) == 0) {
592 rc = read_more_events(dev);
593 if (rc < 0 && rc != -EAGAIN)
597 if (flags & LIBEVDEV_FORCE_SYNC) {
598 dev->sync_state = SYNC_NEEDED;
604 if (queue_shift(dev, ev) != 0)
607 update_state(dev, ev);
609 /* if we disabled a code, get the next event instead */
610 } while(!libevdev_has_event_code(dev, ev->type, ev->code));
613 if (ev->type == EV_SYN && ev->code == SYN_DROPPED) {
614 dev->sync_state = SYNC_NEEDED;
618 if (flags & LIBEVDEV_READ_SYNC && dev->queue_nsync > 0) {
621 if (dev->queue_nsync == 0)
622 dev->sync_state = SYNC_NONE;
630 libevdev_get_name(const struct libevdev *dev)
632 return dev->name ? dev->name : "";
636 libevdev_get_phys(const struct libevdev *dev)
642 libevdev_get_uniq(const struct libevdev *dev)
647 int libevdev_get_product_id(const struct libevdev *dev)
649 return dev->ids.product;
652 int libevdev_get_vendor_id(const struct libevdev *dev)
654 return dev->ids.vendor;
657 int libevdev_get_bustype(const struct libevdev *dev)
659 return dev->ids.bustype;
662 int libevdev_get_version(const struct libevdev *dev)
664 return dev->ids.version;
667 int libevdev_get_driver_version(const struct libevdev *dev)
669 return dev->driver_version;
673 libevdev_has_property(const struct libevdev *dev, unsigned int prop)
675 return (prop <= INPUT_PROP_MAX) && bit_is_set(dev->props, prop);
679 libevdev_has_event_type(const struct libevdev *dev, unsigned int type)
681 return (type <= EV_MAX) && bit_is_set(dev->bits, type);
685 libevdev_has_event_code(const struct libevdev *dev, unsigned int type, unsigned int code)
687 const unsigned long *mask;
690 if (!libevdev_has_event_type(dev, type))
696 max = type_to_mask_const(dev, type, &mask);
698 if (max == -1 || code > max)
701 return bit_is_set(mask, code);
705 libevdev_get_event_value(const struct libevdev *dev, unsigned int type, unsigned int code)
709 if (!libevdev_has_event_type(dev, type) || !libevdev_has_event_code(dev, type, code))
713 case EV_ABS: value = dev->abs_info[code].value; break;
714 case EV_KEY: value = bit_is_set(dev->key_values, code); break;
724 libevdev_fetch_event_value(const struct libevdev *dev, unsigned int type, unsigned int code, int *value)
726 if (libevdev_has_event_type(dev, type) &&
727 libevdev_has_event_code(dev, type, code)) {
728 *value = libevdev_get_event_value(dev, type, code);
735 libevdev_get_slot_value(const struct libevdev *dev, unsigned int slot, unsigned int code)
737 if (!libevdev_has_event_type(dev, EV_ABS) || !libevdev_has_event_code(dev, EV_ABS, code))
740 if (slot >= dev->num_slots || slot >= MAX_SLOTS)
743 if (code > ABS_MT_MAX || code < ABS_MT_MIN)
746 return dev->mt_slot_vals[slot][code - ABS_MT_MIN];
750 libevdev_fetch_slot_value(const struct libevdev *dev, unsigned int slot, unsigned int code, int *value)
752 if (libevdev_has_event_type(dev, EV_ABS) &&
753 libevdev_has_event_code(dev, EV_ABS, code) &&
754 slot < dev->num_slots && slot < MAX_SLOTS) {
755 *value = libevdev_get_slot_value(dev, slot, code);
762 libevdev_get_num_slots(const struct libevdev *dev)
764 return dev->num_slots;
768 libevdev_get_current_slot(const struct libevdev *dev)
770 return dev->current_slot;
773 const struct input_absinfo*
774 libevdev_get_abs_info(const struct libevdev *dev, unsigned int code)
776 if (!libevdev_has_event_type(dev, EV_ABS) ||
777 !libevdev_has_event_code(dev, EV_ABS, code))
780 return &dev->abs_info[code];
784 libevdev_get_abs_min(const struct libevdev *dev, unsigned int code)
786 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
788 return absinfo ? absinfo->minimum : 0;
792 libevdev_get_abs_max(const struct libevdev *dev, unsigned int code)
794 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
796 return absinfo ? absinfo->maximum : 0;
800 libevdev_get_abs_fuzz(const struct libevdev *dev, unsigned int code)
802 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
804 return absinfo ? absinfo->fuzz : 0;
808 libevdev_get_abs_flat(const struct libevdev *dev, unsigned int code)
810 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
812 return absinfo ? absinfo->flat : 0;
816 libevdev_get_abs_resolution(const struct libevdev *dev, unsigned int code)
818 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
820 return absinfo ? absinfo->resolution : 0;
824 libevdev_enable_event_type(struct libevdev *dev, unsigned int type)
829 set_bit(dev->bits, type);
835 libevdev_disable_event_type(struct libevdev *dev, unsigned int type)
837 if (type > EV_MAX || type == EV_SYN)
840 clear_bit(dev->bits, type);
846 libevdev_enable_event_code(struct libevdev *dev, unsigned int type,
847 unsigned int code, const void *data)
852 if (libevdev_enable_event_type(dev, type))
855 if (type != EV_ABS && data != NULL)
861 max = type_to_mask(dev, type, &mask);
868 if (type == EV_ABS) {
869 const struct input_absinfo *abs = data;
870 dev->abs_info[code] = *abs;
877 libevdev_disable_event_code(struct libevdev *dev, unsigned int type, unsigned int code)
885 max = type_to_mask(dev, type, &mask);
890 clear_bit(mask, code);
896 libevdev_kernel_set_abs_value(struct libevdev *dev, unsigned int code, const struct input_absinfo *abs)
903 rc = ioctl(dev->fd, EVIOCSABS(code), abs);
907 rc = libevdev_enable_event_code(dev, EV_ABS, code, abs);
913 libevdev_grab(struct libevdev *dev, int grab)
917 if (grab != LIBEVDEV_GRAB && grab != LIBEVDEV_UNGRAB)
920 if (grab == dev->grabbed)
923 if (grab == LIBEVDEV_GRAB)
924 rc = ioctl(dev->fd, EVIOCGRAB, (void *)1);
925 else if (grab == LIBEVDEV_UNGRAB)
926 rc = ioctl(dev->fd, EVIOCGRAB, (void *)0);
931 return rc < 0 ? -errno : 0;
935 libevdev_is_event_type(const struct input_event *ev, unsigned int type)
937 return type < EV_MAX && ev->type == type;
941 libevdev_is_event_code(const struct input_event *ev, unsigned int type, unsigned int code)
943 return type < EV_MAX &&
945 (type == EV_SYN || code <= libevdev_get_event_type_max(type)) &&
950 libevdev_get_event_type_name(unsigned int type)
959 libevdev_get_event_code_name(unsigned int type, unsigned int code)
964 if (code > ev_max[type])
967 return event_type_map[type][code];
971 libevdev_get_input_prop_name(unsigned int prop)
973 if (prop > INPUT_PROP_MAX)
976 return input_prop_map[prop];
980 libevdev_get_event_type_max(unsigned int type)
989 libevdev_get_repeat(struct libevdev *dev, int *delay, int *period)
991 if (!libevdev_has_event_type(dev, EV_REP))
995 *delay = dev->rep_values[REP_DELAY];
997 *period = dev->rep_values[REP_PERIOD];