2 * Copyright © 2013 Red Hat, Inc.
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31 #include "libevdev-int.h"
32 #include "event-names.h"
37 bit_is_set(const unsigned long *array, int bit)
39 return !!(array[bit / LONG_BITS] & (1LL << (bit % LONG_BITS)));
43 set_bit(unsigned long *array, int bit)
45 array[bit / LONG_BITS] |= (1LL << (bit % LONG_BITS));
49 clear_bit(unsigned long *array, int bit)
51 array[bit / LONG_BITS] &= ~(1LL << (bit % LONG_BITS));
55 set_bit_state(unsigned long *array, int bit, int state)
60 clear_bit(array, bit);
64 type_to_mask_const(const struct libevdev *dev, unsigned int type, const unsigned long **mask)
70 *mask = dev->abs_bits;
74 *mask = dev->rel_bits;
78 *mask = dev->key_bits;
82 *mask = dev->led_bits;
93 type_to_mask(struct libevdev *dev, unsigned int type, unsigned long **mask)
99 *mask = dev->abs_bits;
103 *mask = dev->rel_bits;
107 *mask = dev->key_bits;
111 *mask = dev->led_bits;
122 init_event_queue(struct libevdev *dev)
124 /* FIXME: count the number of axes, keys, etc. to get a better idea at how many events per
125 EV_SYN we could possibly get. Then multiply that by the actual buffer size we care about */
127 const int QUEUE_SIZE = 256;
129 return queue_alloc(dev, QUEUE_SIZE);
133 _libevdev_log(struct libevdev *dev, const char *format, ...)
137 va_start(args, format);
138 dev->log(format, args);
143 libevdev_noop_log_func(const char *format, va_list args)
150 struct libevdev *dev;
152 dev = calloc(1, sizeof(*dev));
157 dev->current_slot = -1;
158 dev->log = libevdev_noop_log_func;
164 libevdev_new_from_fd(int fd, struct libevdev **dev)
173 rc = libevdev_set_fd(d, fd);
182 libevdev_free(struct libevdev *dev)
195 libevdev_set_log_handler(struct libevdev *dev, libevdev_log_func_t logfunc)
200 dev->log = logfunc ? logfunc : libevdev_noop_log_func;
204 libevdev_change_fd(struct libevdev *dev, int fd)
213 libevdev_set_fd(struct libevdev* dev, int fd)
222 rc = ioctl(fd, EVIOCGBIT(0, sizeof(dev->bits)), dev->bits);
226 memset(buf, 0, sizeof(buf));
227 rc = ioctl(fd, EVIOCGNAME(sizeof(buf) - 1), buf);
231 dev->name = calloc(strlen(buf) + 1, sizeof(char));
236 strcpy(dev->name, buf);
238 memset(buf, 0, sizeof(buf));
239 rc = ioctl(fd, EVIOCGPHYS(sizeof(buf) - 1), buf);
241 /* uinput has no phys */
245 dev->phys = calloc(strlen(buf) + 1, sizeof(char));
250 strcpy(dev->phys, buf);
253 memset(buf, 0, sizeof(buf));
254 rc = ioctl(fd, EVIOCGUNIQ(sizeof(buf) - 1), buf);
259 dev->uniq = calloc(strlen(buf) + 1, sizeof(char));
264 strcpy(dev->uniq, buf);
267 rc = ioctl(fd, EVIOCGID, &dev->ids);
271 rc = ioctl(fd, EVIOCGVERSION, &dev->driver_version);
275 rc = ioctl(fd, EVIOCGPROP(sizeof(dev->props)), dev->props);
279 rc = ioctl(fd, EVIOCGBIT(EV_REL, sizeof(dev->rel_bits)), dev->rel_bits);
283 rc = ioctl(fd, EVIOCGBIT(EV_ABS, sizeof(dev->abs_bits)), dev->abs_bits);
287 rc = ioctl(fd, EVIOCGBIT(EV_LED, sizeof(dev->led_bits)), dev->led_bits);
291 rc = ioctl(fd, EVIOCGBIT(EV_KEY, sizeof(dev->key_bits)), dev->key_bits);
295 for (i = ABS_X; i <= ABS_MAX; i++) {
296 if (bit_is_set(dev->abs_bits, i)) {
297 struct input_absinfo abs_info;
298 rc = ioctl(fd, EVIOCGABS(i), &abs_info);
302 dev->abs_info[i] = abs_info;
303 if (i == ABS_MT_SLOT) {
304 dev->num_slots = abs_info.maximum + 1; /* FIXME: non-zero min? */
305 dev->current_slot = abs_info.value;
311 rc = init_event_queue(dev);
315 /* not copying key state because we won't know when we'll start to
316 * use this fd and key's are likely to change state by then.
317 * Same with the valuators, really, but they may not change.
323 return rc ? -errno : 0;
327 libevdev_get_fd(const struct libevdev* dev)
333 init_event(struct libevdev *dev, struct input_event *ev, int type, int code, int value)
335 ev->time = dev->last_event_time;
342 sync_key_state(struct libevdev *dev)
346 unsigned long keystate[NLONGS(KEY_MAX)];
348 rc = ioctl(dev->fd, EVIOCGKEY(sizeof(keystate)), keystate);
352 for (i = 0; i < KEY_MAX; i++) {
354 old = bit_is_set(dev->key_values, i);
355 new = bit_is_set(keystate, i);
357 struct input_event *ev = queue_push(dev);
358 init_event(dev, ev, EV_KEY, i, new ? 1 : 0);
360 set_bit_state(dev->key_values, i, new);
365 return rc ? -errno : 0;
369 sync_abs_state(struct libevdev *dev)
374 for (i = ABS_X; i <= ABS_MAX; i++) {
375 struct input_absinfo abs_info;
377 if (i >= ABS_MT_MIN && i <= ABS_MT_MAX)
380 if (!bit_is_set(dev->abs_bits, i))
383 rc = ioctl(dev->fd, EVIOCGABS(i), &abs_info);
387 if (dev->abs_info[i].value != abs_info.value) {
388 struct input_event *ev = queue_push(dev);
390 init_event(dev, ev, EV_ABS, i, abs_info.value);
391 dev->abs_info[i].value = abs_info.value;
397 return rc ? -errno : 0;
401 sync_mt_state(struct libevdev *dev)
408 } mt_state[ABS_MT_CNT];
410 for (i = ABS_MT_MIN; i < ABS_MT_MAX; i++) {
412 if (i == ABS_MT_SLOT)
415 idx = i - ABS_MT_MIN;
416 mt_state[idx].code = i;
417 rc = ioctl(dev->fd, EVIOCGMTSLOTS(sizeof(struct mt_state)), &mt_state[idx]);
422 for (i = 0; i < dev->num_slots; i++) {
424 struct input_event *ev;
426 ev = queue_push(dev);
427 init_event(dev, ev, EV_ABS, ABS_MT_SLOT, i);
428 for (j = ABS_MT_MIN; j < ABS_MT_MAX; j++) {
429 int jdx = j - ABS_MT_MIN;
431 if (j == ABS_MT_SLOT)
434 if (dev->mt_slot_vals[i][jdx] == mt_state[jdx].val[i])
437 ev = queue_push(dev);
438 init_event(dev, ev, EV_ABS, j, mt_state[jdx].val[i]);
439 dev->mt_slot_vals[i][jdx] = mt_state[jdx].val[i];
445 return rc ? -errno : 0;
449 sync_state(struct libevdev *dev)
453 struct input_event *ev;
455 /* FIXME: if we have events in the queue after the SYN_DROPPED (which was
456 queue[0]) we need to shift this backwards. Except that chances are that the
457 queue may be either full or too full to prepend all the events needed for
460 so we search for the last sync event in the queue and drop everything before
461 including that event and rely on the kernel to tell us the right value for that
462 bitfield during the sync process.
465 for (i = queue_num_elements(dev) - 1; i >= 0; i--) {
466 struct input_event e;
467 queue_peek(dev, i, &e);
468 if (e.type == EV_SYN)
473 queue_shift_multiple(dev, i + 1, NULL);
475 if (libevdev_has_event_type(dev, EV_KEY))
476 rc = sync_key_state(dev);
477 if (rc == 0 && libevdev_has_event_type(dev, EV_ABS))
478 rc = sync_abs_state(dev);
479 if (rc == 0 && libevdev_has_event_code(dev, EV_ABS, ABS_MT_SLOT))
480 rc = sync_mt_state(dev);
482 ev = queue_push(dev);
483 init_event(dev, ev, EV_SYN, SYN_REPORT, 0);
485 dev->queue_nsync = queue_num_elements(dev);
492 update_key_state(struct libevdev *dev, const struct input_event *e)
494 if (!libevdev_has_event_type(dev, EV_KEY))
497 if (e->code > KEY_MAX)
501 clear_bit(dev->key_values, e->code);
503 set_bit(dev->key_values, e->code);
509 update_mt_state(struct libevdev *dev, const struct input_event *e)
511 if (e->code == ABS_MT_SLOT) {
512 dev->current_slot = e->value;
514 } else if (dev->current_slot == -1)
517 dev->mt_slot_vals[dev->current_slot][e->code - ABS_MT_MIN] = e->value;
523 update_abs_state(struct libevdev *dev, const struct input_event *e)
525 if (!libevdev_has_event_type(dev, EV_ABS))
528 if (e->code > ABS_MAX)
531 if (e->code >= ABS_MT_MIN && e->code <= ABS_MT_MAX)
532 return update_mt_state(dev, e);
534 dev->abs_info[e->code].value = e->value;
540 update_state(struct libevdev *dev, const struct input_event *e)
549 rc = update_key_state(dev, e);
552 rc = update_abs_state(dev, e);
556 dev->last_event_time = e->time;
562 read_more_events(struct libevdev *dev)
566 struct input_event *next;
568 free_elem = queue_num_free_elements(dev);
572 next = queue_next_element(dev);
573 len = read(dev->fd, next, free_elem * sizeof(struct input_event));
576 } else if (len > 0 && len % sizeof(struct input_event) != 0)
579 int nev = len/sizeof(struct input_event);
580 queue_set_num_elements(dev, queue_num_elements(dev) + nev);
586 int libevdev_next_event(struct libevdev *dev, unsigned int flags, struct input_event *ev)
593 if (flags & LIBEVDEV_READ_SYNC) {
594 if (!dev->need_sync && dev->queue_nsync == 0)
596 else if (dev->need_sync) {
597 rc = sync_state(dev);
601 } else if (dev->need_sync) {
602 /* FIXME: still need to call update_state for all events
603 * here, otherwise the library has the wrong view of the
605 queue_shift_multiple(dev, dev->queue_nsync, NULL);
608 /* FIXME: check for O_NONBLOCK and if not set, skip if we have an
609 * event in the queue from the previous read.
612 /* Always read in some more events. Best case this smoothes over a potential SYN_DROPPED,
613 worst case we don't read fast enough and end up with SYN_DROPPED anyway */
614 rc = read_more_events(dev);
615 if (rc < 0 && rc != -EAGAIN)
618 if (queue_shift(dev, ev) != 0)
621 update_state(dev, ev);
624 if (ev->type == EV_SYN && ev->code == SYN_DROPPED) {
629 if (flags & LIBEVDEV_READ_SYNC && dev->queue_nsync > 0) {
639 libevdev_get_name(const struct libevdev *dev)
645 libevdev_get_phys(const struct libevdev *dev)
651 libevdev_get_uniq(const struct libevdev *dev)
656 int libevdev_get_product_id(const struct libevdev *dev)
658 return dev->ids.product;
661 int libevdev_get_vendor_id(const struct libevdev *dev)
663 return dev->ids.vendor;
666 int libevdev_get_bustype(const struct libevdev *dev)
668 return dev->ids.bustype;
671 int libevdev_get_version(const struct libevdev *dev)
673 return dev->ids.version;
676 int libevdev_get_driver_version(const struct libevdev *dev)
678 return dev->driver_version;
682 libevdev_has_property(const struct libevdev *dev, unsigned int prop)
684 return (prop <= INPUT_PROP_MAX) && bit_is_set(dev->props, prop);
688 libevdev_has_event_type(const struct libevdev *dev, unsigned int type)
690 return (type <= EV_MAX) && bit_is_set(dev->bits, type);
694 libevdev_has_event_code(const struct libevdev *dev, unsigned int type, unsigned int code)
696 const unsigned long *mask;
699 if (!libevdev_has_event_type(dev, type))
705 max = type_to_mask_const(dev, type, &mask);
710 return bit_is_set(mask, code);
714 libevdev_get_event_value(const struct libevdev *dev, unsigned int type, unsigned int code)
718 if (!libevdev_has_event_type(dev, type) || !libevdev_has_event_code(dev, type, code))
722 case EV_ABS: value = dev->abs_info[code].value; break;
723 case EV_KEY: value = bit_is_set(dev->key_values, code); break;
733 libevdev_fetch_event_value(const struct libevdev *dev, unsigned int type, unsigned int code, int *value)
735 if (libevdev_has_event_type(dev, type) &&
736 libevdev_has_event_code(dev, type, code)) {
737 *value = libevdev_get_event_value(dev, type, code);
744 libevdev_get_slot_value(const struct libevdev *dev, unsigned int slot, unsigned int code)
746 if (!libevdev_has_event_type(dev, EV_ABS) || !libevdev_has_event_code(dev, EV_ABS, code))
749 if (slot >= dev->num_slots || slot >= MAX_SLOTS)
752 if (code > ABS_MT_MAX || code < ABS_MT_MIN)
755 return dev->mt_slot_vals[slot][code - ABS_MT_MIN];
759 libevdev_fetch_slot_value(const struct libevdev *dev, unsigned int slot, unsigned int code, int *value)
761 if (libevdev_has_event_type(dev, EV_ABS) &&
762 libevdev_has_event_code(dev, EV_ABS, code) &&
763 slot < dev->num_slots && slot < MAX_SLOTS) {
764 *value = libevdev_get_slot_value(dev, slot, code);
771 libevdev_get_num_slots(const struct libevdev *dev)
773 return dev->num_slots;
777 libevdev_get_current_slot(const struct libevdev *dev)
779 return dev->current_slot;
782 const struct input_absinfo*
783 libevdev_get_abs_info(const struct libevdev *dev, unsigned int code)
785 if (!libevdev_has_event_type(dev, EV_ABS) ||
786 !libevdev_has_event_code(dev, EV_ABS, code))
789 return &dev->abs_info[code];
793 libevdev_get_abs_min(const struct libevdev *dev, unsigned int code)
795 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
797 return absinfo ? absinfo->minimum : 0;
801 libevdev_get_abs_max(const struct libevdev *dev, unsigned int code)
803 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
805 return absinfo ? absinfo->maximum : 0;
809 libevdev_get_abs_fuzz(const struct libevdev *dev, unsigned int code)
811 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
813 return absinfo ? absinfo->fuzz : 0;
817 libevdev_get_abs_flat(const struct libevdev *dev, unsigned int code)
819 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
821 return absinfo ? absinfo->flat : 0;
825 libevdev_get_abs_resolution(const struct libevdev *dev, unsigned int code)
827 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
829 return absinfo ? absinfo->resolution : 0;
833 libevdev_enable_event_type(struct libevdev *dev, unsigned int type)
838 set_bit(dev->bits, type);
840 /* FIXME: pass through to kernel */
846 libevdev_disable_event_type(struct libevdev *dev, unsigned int type)
851 clear_bit(dev->bits, type);
853 /* FIXME: pass through to kernel */
859 libevdev_enable_event_code(struct libevdev *dev, unsigned int type,
860 unsigned int code, const void *data)
865 if (libevdev_enable_event_type(dev, type))
868 max = type_to_mask(dev, type, &mask);
875 if (type == EV_ABS) {
876 const struct input_absinfo *abs = data;
877 dev->abs_info[code] = *abs;
880 /* FIXME: pass through to kernel */
886 libevdev_disable_event_code(struct libevdev *dev, unsigned int type, unsigned int code)
894 max = type_to_mask(dev, type, &mask);
899 clear_bit(mask, code);
901 /* FIXME: pass through to kernel */
907 libevdev_kernel_set_abs_value(struct libevdev *dev, unsigned int code, const struct input_absinfo *abs)
914 rc = ioctl(dev->fd, EVIOCSABS(code), *abs);
918 rc = libevdev_enable_event_code(dev, EV_ABS, code, abs);
924 libevdev_grab(struct libevdev *dev, int grab)
928 if (grab != LIBEVDEV_GRAB && grab != LIBEVDEV_UNGRAB)
931 if (grab == dev->grabbed)
934 if (grab == LIBEVDEV_GRAB)
935 rc = ioctl(dev->fd, EVIOCGRAB, (void *)1);
936 else if (grab == LIBEVDEV_UNGRAB)
937 rc = ioctl(dev->fd, EVIOCGRAB, (void *)0);
942 return rc < 0 ? -errno : 0;
946 libevdev_get_event_type_name(unsigned int type)
955 libevdev_get_event_code_name(unsigned int type, unsigned int code)
960 if (code > ev_max[type])
963 return event_type_map[type][code];
967 libevdev_get_input_prop_name(unsigned int prop)
969 if (prop > INPUT_PROP_MAX)
972 return input_prop_map[prop];