goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
iio_sw_rb_free(indio_dev->ring);
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
case 0:
/* Take the iio_dev status lock */
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_RING_TRIGGERED)
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
ret = lis3l02dq_read_accel_from_ring(indio_dev->ring,
chan->scan_index,
val);
return 0;
error_remove_trigger:
- if (indio_dev->modes & INDIO_RING_TRIGGERED)
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
lis3l02dq_remove_trigger(indio_dev);
error_free_interrupt:
if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
indio_dev->ring = sca3000_rb_allocate(indio_dev);
if (indio_dev->ring == NULL)
return -ENOMEM;
- indio_dev->modes |= INDIO_RING_HARDWARE_BUFFER;
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
indio_dev->ring->access = &sca3000_ring_access_funcs;
indio_dev->ring->setup_ops = &ad7192_ring_setup_ops;
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_deallocate_sw_rb:
indio_dev->ring->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_deallocate_sw_rb:
indio_dev->ring->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_deallocate_sw_rb:
INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_deallocate_sw_rb:
indio_dev->ring->setup_ops = &ad7793_ring_setup_ops;
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_deallocate_sw_rb:
indio_dev->ring->setup_ops = &ad7887_ring_setup_ops;
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_deallocate_sw_rb:
indio_dev->ring->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_deallocate_sw_rb:
indio_dev->ring->setup_ops = &max1363_ring_setup_ops;
/* Flag that polled ring buffering is possible */
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
/* Device operating modes */
#define INDIO_DIRECT_MODE 0x01
-#define INDIO_RING_TRIGGERED 0x02
-#define INDIO_RING_HARDWARE_BUFFER 0x08
+#define INDIO_BUFFER_TRIGGERED 0x02
+#define INDIO_BUFFER_HARDWARE 0x08
-#define INDIO_ALL_RING_MODES (INDIO_RING_TRIGGERED | INDIO_RING_HARDWARE_BUFFER)
+#define INDIO_ALL_BUFFER_MODES \
+ (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE)
/* Vast majority of this is set by the industrialio subsystem on a
* call to iio_device_register. */
static inline bool iio_ring_enabled(struct iio_dev *dev_info)
{
return dev_info->currentmode
- & (INDIO_RING_TRIGGERED
- | INDIO_RING_HARDWARE_BUFFER);
+ & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE);
};
#endif /* _INDUSTRIAL_IO_H_ */
/* Ring buffer functions - here trigger setup related */
indio_dev->ring->setup_ops = &ad5933_ring_setup_ops;
- indio_dev->modes |= INDIO_RING_HARDWARE_BUFFER;
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
return 0;
}
return 0;
error_remove_trigger:
- if (indio_dev->modes & INDIO_RING_TRIGGERED)
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
adis16400_remove_trigger(indio_dev);
error_uninitialize_ring:
iio_ring_buffer_unregister(indio_dev);
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
return 0;
error_iio_sw_rb_free:
iio_sw_rb_free(indio_dev->ring);
{
struct iio_dev *dev_info = container_of(device, struct iio_dev, dev);
cdev_del(&dev_info->chrdev);
- if (dev_info->modes & INDIO_RING_TRIGGERED)
+ if (dev_info->modes & INDIO_BUFFER_TRIGGERED)
iio_device_unregister_trigger_consumer(dev_info);
iio_device_unregister_eventset(dev_info);
iio_device_unregister_sysfs(dev_info);
"Failed to register event set\n");
goto error_free_sysfs;
}
- if (dev_info->modes & INDIO_RING_TRIGGERED)
+ if (dev_info->modes & INDIO_BUFFER_TRIGGERED)
iio_device_register_trigger_consumer(dev_info);
ret = device_add(&dev_info->dev);
state = !(buf[0] == '0');
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
ret = -EBUSY;
goto error_ret;
}
state = !(buf[0] == '0');
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
ret = -EBUSY;
goto error_ret;
}
mutex_lock(&dev_info->mlock);
previous_mode = dev_info->currentmode;
requested_state = !(buf[0] == '0');
- current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
+ current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES);
if (current_state == requested_state) {
printk(KERN_INFO "iio-ring, current state requested again\n");
goto done;
if (ring->access->mark_in_use)
ring->access->mark_in_use(ring);
/* Definitely possible for devices to support both of these.*/
- if (dev_info->modes & INDIO_RING_TRIGGERED) {
+ if (dev_info->modes & INDIO_BUFFER_TRIGGERED) {
if (!dev_info->trig) {
printk(KERN_INFO
"Buffer not started: no trigger\n");
ring->access->unmark_in_use(ring);
goto error_ret;
}
- dev_info->currentmode = INDIO_RING_TRIGGERED;
- } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
- dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
+ dev_info->currentmode = INDIO_BUFFER_TRIGGERED;
+ } else if (dev_info->modes & INDIO_BUFFER_HARDWARE)
+ dev_info->currentmode = INDIO_BUFFER_HARDWARE;
else { /* should never be reached */
ret = -EINVAL;
goto error_ret;
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", !!(dev_info->currentmode
- & INDIO_ALL_RING_MODES));
+ & INDIO_ALL_BUFFER_MODES));
}
EXPORT_SYMBOL(iio_show_ring_enable);
int ret;
mutex_lock(&dev_info->mlock);
- if (dev_info->currentmode == INDIO_RING_TRIGGERED) {
+ if (dev_info->currentmode == INDIO_BUFFER_TRIGGERED) {
mutex_unlock(&dev_info->mlock);
return -EBUSY;
}
return 0;
error_remove_trigger:
- if (indio_dev->modes & INDIO_RING_TRIGGERED)
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
ade7758_remove_trigger(indio_dev);
error_uninitialize_ring:
ade7758_uninitialize_ring(indio_dev);
goto error_iio_sw_rb_free;
}
- indio_dev->modes |= INDIO_RING_TRIGGERED;
+ indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
st->tx_buf[0] = ADE7758_READ_REG(ADE7758_RSTATUS);
st->tx_buf[1] = 0;