2 * Copyright © 2013 Red Hat, Inc.
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
29 #include <linux/uinput.h>
32 #include "libevdev-int.h"
33 #include "libevdev-util.h"
34 #include "event-names.h"
39 init_event_queue(struct libevdev *dev)
41 /* FIXME: count the number of axes, keys, etc. to get a better idea at how many events per
42 EV_SYN we could possibly get. Then multiply that by the actual buffer size we care about */
44 const int QUEUE_SIZE = 256;
46 return queue_alloc(dev, QUEUE_SIZE);
50 _libevdev_log(struct libevdev *dev, const char *format, ...)
54 va_start(args, format);
55 dev->log(format, args);
60 libevdev_noop_log_func(const char *format, va_list args)
69 dev = calloc(1, sizeof(*dev));
74 dev->current_slot = -1;
75 dev->log = libevdev_noop_log_func;
76 dev->grabbed = LIBEVDEV_UNGRAB;
77 dev->sync_state = SYNC_NONE;
83 libevdev_new_from_fd(int fd, struct libevdev **dev)
92 rc = libevdev_set_fd(d, fd);
101 libevdev_free(struct libevdev *dev)
114 libevdev_set_log_handler(struct libevdev *dev, libevdev_log_func_t logfunc)
119 dev->log = logfunc ? logfunc : libevdev_noop_log_func;
123 libevdev_change_fd(struct libevdev *dev, int fd)
132 libevdev_set_fd(struct libevdev* dev, int fd)
141 rc = ioctl(fd, EVIOCGBIT(0, sizeof(dev->bits)), dev->bits);
145 memset(buf, 0, sizeof(buf));
146 rc = ioctl(fd, EVIOCGNAME(sizeof(buf) - 1), buf);
150 dev->name = strdup(buf);
156 memset(buf, 0, sizeof(buf));
157 rc = ioctl(fd, EVIOCGPHYS(sizeof(buf) - 1), buf);
159 /* uinput has no phys */
163 dev->phys = strdup(buf);
170 memset(buf, 0, sizeof(buf));
171 rc = ioctl(fd, EVIOCGUNIQ(sizeof(buf) - 1), buf);
176 dev->uniq = strdup(buf);
183 rc = ioctl(fd, EVIOCGID, &dev->ids);
187 rc = ioctl(fd, EVIOCGVERSION, &dev->driver_version);
191 rc = ioctl(fd, EVIOCGPROP(sizeof(dev->props)), dev->props);
195 rc = ioctl(fd, EVIOCGBIT(EV_REL, sizeof(dev->rel_bits)), dev->rel_bits);
199 rc = ioctl(fd, EVIOCGBIT(EV_ABS, sizeof(dev->abs_bits)), dev->abs_bits);
203 rc = ioctl(fd, EVIOCGBIT(EV_LED, sizeof(dev->led_bits)), dev->led_bits);
207 rc = ioctl(fd, EVIOCGBIT(EV_KEY, sizeof(dev->key_bits)), dev->key_bits);
211 rc = ioctl(fd, EVIOCGBIT(EV_SW, sizeof(dev->sw_bits)), dev->sw_bits);
215 rc = ioctl(fd, EVIOCGBIT(EV_MSC, sizeof(dev->msc_bits)), dev->msc_bits);
219 rc = ioctl(fd, EVIOCGBIT(EV_FF, sizeof(dev->ff_bits)), dev->ff_bits);
223 rc = ioctl(fd, EVIOCGBIT(EV_SND, sizeof(dev->snd_bits)), dev->snd_bits);
227 /* rep is a special case, always set it to 1 for both values if EV_REP is set */
228 if (bit_is_set(dev->bits, EV_REP)) {
229 for (i = 0; i < REP_MAX; i++)
230 set_bit(dev->rep_bits, i);
231 rc = ioctl(fd, EVIOCGREP, dev->rep_values);
236 for (i = ABS_X; i <= ABS_MAX; i++) {
237 if (bit_is_set(dev->abs_bits, i)) {
238 struct input_absinfo abs_info;
239 rc = ioctl(fd, EVIOCGABS(i), &abs_info);
243 dev->abs_info[i] = abs_info;
244 if (i == ABS_MT_SLOT) {
245 dev->num_slots = abs_info.maximum + 1;
246 dev->current_slot = abs_info.value;
252 rc = init_event_queue(dev);
256 /* not copying key state because we won't know when we'll start to
257 * use this fd and key's are likely to change state by then.
258 * Same with the valuators, really, but they may not change.
264 return rc ? -errno : 0;
268 libevdev_get_fd(const struct libevdev* dev)
274 init_event(struct libevdev *dev, struct input_event *ev, int type, int code, int value)
276 ev->time = dev->last_event_time;
283 sync_key_state(struct libevdev *dev)
287 unsigned long keystate[NLONGS(KEY_MAX)];
289 rc = ioctl(dev->fd, EVIOCGKEY(sizeof(keystate)), keystate);
293 for (i = 0; i < KEY_MAX; i++) {
295 old = bit_is_set(dev->key_values, i);
296 new = bit_is_set(keystate, i);
298 struct input_event *ev = queue_push(dev);
299 init_event(dev, ev, EV_KEY, i, new ? 1 : 0);
301 set_bit_state(dev->key_values, i, new);
306 return rc ? -errno : 0;
310 sync_abs_state(struct libevdev *dev)
315 for (i = ABS_X; i <= ABS_MAX; i++) {
316 struct input_absinfo abs_info;
318 if (i >= ABS_MT_MIN && i <= ABS_MT_MAX)
321 if (!bit_is_set(dev->abs_bits, i))
324 rc = ioctl(dev->fd, EVIOCGABS(i), &abs_info);
328 if (dev->abs_info[i].value != abs_info.value) {
329 struct input_event *ev = queue_push(dev);
331 init_event(dev, ev, EV_ABS, i, abs_info.value);
332 dev->abs_info[i].value = abs_info.value;
338 return rc ? -errno : 0;
342 sync_mt_state(struct libevdev *dev)
349 } mt_state[ABS_MT_CNT];
351 for (i = ABS_MT_MIN; i < ABS_MT_MAX; i++) {
353 if (i == ABS_MT_SLOT)
356 if (!libevdev_has_event_code(dev, EV_ABS, i))
359 idx = i - ABS_MT_MIN;
360 mt_state[idx].code = i;
361 rc = ioctl(dev->fd, EVIOCGMTSLOTS(sizeof(struct mt_state)), &mt_state[idx]);
366 for (i = 0; i < dev->num_slots; i++) {
368 struct input_event *ev;
370 ev = queue_push(dev);
371 init_event(dev, ev, EV_ABS, ABS_MT_SLOT, i);
372 for (j = ABS_MT_MIN; j < ABS_MT_MAX; j++) {
373 int jdx = j - ABS_MT_MIN;
375 if (j == ABS_MT_SLOT)
378 if (!libevdev_has_event_code(dev, EV_ABS, j))
381 if (dev->mt_slot_vals[i][jdx] == mt_state[jdx].val[i])
384 ev = queue_push(dev);
385 init_event(dev, ev, EV_ABS, j, mt_state[jdx].val[i]);
386 dev->mt_slot_vals[i][jdx] = mt_state[jdx].val[i];
392 return rc ? -errno : 0;
396 sync_state(struct libevdev *dev)
400 struct input_event *ev;
402 /* FIXME: if we have events in the queue after the SYN_DROPPED (which was
403 queue[0]) we need to shift this backwards. Except that chances are that the
404 queue may be either full or too full to prepend all the events needed for
407 so we search for the last sync event in the queue and drop everything before
408 including that event and rely on the kernel to tell us the right value for that
409 bitfield during the sync process.
412 for (i = queue_num_elements(dev) - 1; i >= 0; i--) {
413 struct input_event e;
414 queue_peek(dev, i, &e);
415 if (e.type == EV_SYN)
420 queue_shift_multiple(dev, i + 1, NULL);
422 if (libevdev_has_event_type(dev, EV_KEY))
423 rc = sync_key_state(dev);
424 if (rc == 0 && libevdev_has_event_type(dev, EV_ABS))
425 rc = sync_abs_state(dev);
426 if (rc == 0 && libevdev_has_event_code(dev, EV_ABS, ABS_MT_SLOT))
427 rc = sync_mt_state(dev);
429 ev = queue_push(dev);
430 init_event(dev, ev, EV_SYN, SYN_REPORT, 0);
432 dev->queue_nsync = queue_num_elements(dev);
438 update_key_state(struct libevdev *dev, const struct input_event *e)
440 if (!libevdev_has_event_type(dev, EV_KEY))
443 if (e->code > KEY_MAX)
447 clear_bit(dev->key_values, e->code);
449 set_bit(dev->key_values, e->code);
455 update_mt_state(struct libevdev *dev, const struct input_event *e)
457 if (e->code == ABS_MT_SLOT) {
458 dev->current_slot = e->value;
460 } else if (dev->current_slot == -1)
463 dev->mt_slot_vals[dev->current_slot][e->code - ABS_MT_MIN] = e->value;
469 update_abs_state(struct libevdev *dev, const struct input_event *e)
471 if (!libevdev_has_event_type(dev, EV_ABS))
474 if (e->code > ABS_MAX)
477 if (e->code >= ABS_MT_MIN && e->code <= ABS_MT_MAX)
478 return update_mt_state(dev, e);
480 dev->abs_info[e->code].value = e->value;
486 update_state(struct libevdev *dev, const struct input_event *e)
495 rc = update_key_state(dev, e);
498 rc = update_abs_state(dev, e);
502 dev->last_event_time = e->time;
508 read_more_events(struct libevdev *dev)
512 struct input_event *next;
514 free_elem = queue_num_free_elements(dev);
518 next = queue_next_element(dev);
519 len = read(dev->fd, next, free_elem * sizeof(struct input_event));
522 } else if (len > 0 && len % sizeof(struct input_event) != 0)
525 int nev = len/sizeof(struct input_event);
526 queue_set_num_elements(dev, queue_num_elements(dev) + nev);
532 int libevdev_next_event(struct libevdev *dev, unsigned int flags, struct input_event *ev)
539 if (!(flags & (LIBEVDEV_READ_NORMAL|LIBEVDEV_READ_SYNC|LIBEVDEV_FORCE_SYNC)))
542 if (flags & LIBEVDEV_READ_SYNC) {
543 if (dev->sync_state == SYNC_NEEDED) {
544 rc = sync_state(dev);
547 dev->sync_state = SYNC_IN_PROGRESS;
550 if (dev->queue_nsync == 0) {
551 dev->sync_state = SYNC_NONE;
555 } else if (dev->sync_state != SYNC_NONE) {
556 struct input_event e;
558 /* call update_state for all events here, otherwise the library has the wrong view
560 while (queue_shift(dev, &e) == 0) {
562 update_state(dev, &e);
565 dev->sync_state = SYNC_NONE;
568 /* FIXME: if the first event after SYNC_IN_PROGRESS is a SYN_DROPPED, log this */
570 /* Always read in some more events. Best case this smoothes over a potential SYN_DROPPED,
571 worst case we don't read fast enough and end up with SYN_DROPPED anyway.
573 Except if the fd is in blocking mode and we still have events from the last read, don't
577 if (!(flags & LIBEVDEV_READ_BLOCKING) ||
578 queue_num_elements(dev) == 0) {
579 rc = read_more_events(dev);
580 if (rc < 0 && rc != -EAGAIN)
584 if (flags & LIBEVDEV_FORCE_SYNC) {
585 dev->sync_state = SYNC_NEEDED;
591 if (queue_shift(dev, ev) != 0)
594 update_state(dev, ev);
596 /* if we disabled a code, get the next event instead */
597 } while(!libevdev_has_event_code(dev, ev->type, ev->code));
600 if (ev->type == EV_SYN && ev->code == SYN_DROPPED) {
601 dev->sync_state = SYNC_NEEDED;
605 if (flags & LIBEVDEV_READ_SYNC && dev->queue_nsync > 0) {
608 if (dev->queue_nsync == 0)
609 dev->sync_state = SYNC_NONE;
617 libevdev_get_name(const struct libevdev *dev)
619 return dev->name ? dev->name : "";
623 libevdev_get_phys(const struct libevdev *dev)
629 libevdev_get_uniq(const struct libevdev *dev)
634 int libevdev_get_product_id(const struct libevdev *dev)
636 return dev->ids.product;
639 int libevdev_get_vendor_id(const struct libevdev *dev)
641 return dev->ids.vendor;
644 int libevdev_get_bustype(const struct libevdev *dev)
646 return dev->ids.bustype;
649 int libevdev_get_version(const struct libevdev *dev)
651 return dev->ids.version;
654 int libevdev_get_driver_version(const struct libevdev *dev)
656 return dev->driver_version;
660 libevdev_has_property(const struct libevdev *dev, unsigned int prop)
662 return (prop <= INPUT_PROP_MAX) && bit_is_set(dev->props, prop);
666 libevdev_has_event_type(const struct libevdev *dev, unsigned int type)
668 return (type <= EV_MAX) && bit_is_set(dev->bits, type);
672 libevdev_has_event_code(const struct libevdev *dev, unsigned int type, unsigned int code)
674 const unsigned long *mask;
677 if (!libevdev_has_event_type(dev, type))
683 max = type_to_mask_const(dev, type, &mask);
685 if (max == -1 || code > max)
688 return bit_is_set(mask, code);
692 libevdev_get_event_value(const struct libevdev *dev, unsigned int type, unsigned int code)
696 if (!libevdev_has_event_type(dev, type) || !libevdev_has_event_code(dev, type, code))
700 case EV_ABS: value = dev->abs_info[code].value; break;
701 case EV_KEY: value = bit_is_set(dev->key_values, code); break;
711 libevdev_fetch_event_value(const struct libevdev *dev, unsigned int type, unsigned int code, int *value)
713 if (libevdev_has_event_type(dev, type) &&
714 libevdev_has_event_code(dev, type, code)) {
715 *value = libevdev_get_event_value(dev, type, code);
722 libevdev_get_slot_value(const struct libevdev *dev, unsigned int slot, unsigned int code)
724 if (!libevdev_has_event_type(dev, EV_ABS) || !libevdev_has_event_code(dev, EV_ABS, code))
727 if (slot >= dev->num_slots || slot >= MAX_SLOTS)
730 if (code > ABS_MT_MAX || code < ABS_MT_MIN)
733 return dev->mt_slot_vals[slot][code - ABS_MT_MIN];
737 libevdev_fetch_slot_value(const struct libevdev *dev, unsigned int slot, unsigned int code, int *value)
739 if (libevdev_has_event_type(dev, EV_ABS) &&
740 libevdev_has_event_code(dev, EV_ABS, code) &&
741 slot < dev->num_slots && slot < MAX_SLOTS) {
742 *value = libevdev_get_slot_value(dev, slot, code);
749 libevdev_get_num_slots(const struct libevdev *dev)
751 return dev->num_slots;
755 libevdev_get_current_slot(const struct libevdev *dev)
757 return dev->current_slot;
760 const struct input_absinfo*
761 libevdev_get_abs_info(const struct libevdev *dev, unsigned int code)
763 if (!libevdev_has_event_type(dev, EV_ABS) ||
764 !libevdev_has_event_code(dev, EV_ABS, code))
767 return &dev->abs_info[code];
771 libevdev_get_abs_min(const struct libevdev *dev, unsigned int code)
773 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
775 return absinfo ? absinfo->minimum : 0;
779 libevdev_get_abs_max(const struct libevdev *dev, unsigned int code)
781 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
783 return absinfo ? absinfo->maximum : 0;
787 libevdev_get_abs_fuzz(const struct libevdev *dev, unsigned int code)
789 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
791 return absinfo ? absinfo->fuzz : 0;
795 libevdev_get_abs_flat(const struct libevdev *dev, unsigned int code)
797 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
799 return absinfo ? absinfo->flat : 0;
803 libevdev_get_abs_resolution(const struct libevdev *dev, unsigned int code)
805 const struct input_absinfo *absinfo = libevdev_get_abs_info(dev, code);
807 return absinfo ? absinfo->resolution : 0;
811 libevdev_enable_event_type(struct libevdev *dev, unsigned int type)
816 set_bit(dev->bits, type);
822 libevdev_disable_event_type(struct libevdev *dev, unsigned int type)
824 if (type > EV_MAX || type == EV_SYN)
827 clear_bit(dev->bits, type);
833 libevdev_enable_event_code(struct libevdev *dev, unsigned int type,
834 unsigned int code, const void *data)
839 if (libevdev_enable_event_type(dev, type))
842 if (type != EV_ABS && data != NULL)
845 max = type_to_mask(dev, type, &mask);
852 if (type == EV_ABS) {
853 const struct input_absinfo *abs = data;
854 dev->abs_info[code] = *abs;
861 libevdev_disable_event_code(struct libevdev *dev, unsigned int type, unsigned int code)
869 max = type_to_mask(dev, type, &mask);
874 clear_bit(mask, code);
880 libevdev_kernel_set_abs_value(struct libevdev *dev, unsigned int code, const struct input_absinfo *abs)
887 rc = ioctl(dev->fd, EVIOCSABS(code), abs);
891 rc = libevdev_enable_event_code(dev, EV_ABS, code, abs);
897 libevdev_grab(struct libevdev *dev, int grab)
901 if (grab != LIBEVDEV_GRAB && grab != LIBEVDEV_UNGRAB)
904 if (grab == dev->grabbed)
907 if (grab == LIBEVDEV_GRAB)
908 rc = ioctl(dev->fd, EVIOCGRAB, (void *)1);
909 else if (grab == LIBEVDEV_UNGRAB)
910 rc = ioctl(dev->fd, EVIOCGRAB, (void *)0);
915 return rc < 0 ? -errno : 0;
919 libevdev_get_event_type_name(unsigned int type)
928 libevdev_get_event_code_name(unsigned int type, unsigned int code)
933 if (code > ev_max[type])
936 return event_type_map[type][code];
940 libevdev_get_input_prop_name(unsigned int prop)
942 if (prop > INPUT_PROP_MAX)
945 return input_prop_map[prop];
949 libevdev_get_event_type_max(unsigned int type)
958 libevdev_get_repeat(struct libevdev *dev, int *delay, int *period)
960 if (!libevdev_has_event_type(dev, EV_REP))
964 *delay = dev->rep_values[REP_DELAY];
966 *period = dev->rep_values[REP_PERIOD];