written = 0;
add_wait_queue(&rb->pollq, &wait);
do {
- if (indio_dev->info == NULL)
+ if (!indio_dev->info)
return -ENODEV;
if (!iio_buffer_space_available(rb)) {
}
wait_woken(&wait, TASK_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
+ MAX_SCHEDULE_TIMEOUT);
continue;
}
struct iio_buffer *rb = ib->buffer;
struct iio_dev *indio_dev = ib->indio_dev;
- if (!indio_dev->info || rb == NULL)
+ if (!indio_dev->info || !rb)
return 0;
poll_wait(filp, &rb->pollq, wait);
/* Note NULL used as error indicator as it doesn't make sense. */
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
- unsigned int masklength,
- const unsigned long *mask,
- bool strict)
+ unsigned int masklength,
+ const unsigned long *mask,
+ bool strict)
{
if (bitmap_empty(mask, masklength))
return NULL;
}
static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
- const unsigned long *mask)
+ const unsigned long *mask)
{
if (!indio_dev->setup_ops->validate_scan_mask)
return true;
* individual buffers request is plausible.
*/
static int iio_scan_mask_set(struct iio_dev *indio_dev,
- struct iio_buffer *buffer, int bit)
+ struct iio_buffer *buffer, int bit)
{
const unsigned long *mask;
unsigned long *trialmask;
mutex_unlock(&iio_dev_opaque->mlock);
return ret < 0 ? ret : len;
-
}
static ssize_t iio_scan_el_ts_show(struct device *dev,
}
static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
- const unsigned long *mask, bool timestamp)
+ const unsigned long *mask, bool timestamp)
{
unsigned int bytes = 0;
int length, i, largest = 0;
}
static void iio_buffer_activate(struct iio_dev *indio_dev,
- struct iio_buffer *buffer)
+ struct iio_buffer *buffer)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer, *_buffer;
list_for_each_entry_safe(buffer, _buffer,
- &iio_dev_opaque->buffer_list, buffer_list)
+ &iio_dev_opaque->buffer_list, buffer_list)
iio_buffer_deactivate(buffer);
}
static int iio_buffer_enable(struct iio_buffer *buffer,
- struct iio_dev *indio_dev)
+ struct iio_dev *indio_dev)
{
if (!buffer->access->enable)
return 0;
}
static int iio_buffer_disable(struct iio_buffer *buffer,
- struct iio_dev *indio_dev)
+ struct iio_dev *indio_dev)
{
if (!buffer->access->disable)
return 0;
}
static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
- struct iio_buffer *buffer)
+ struct iio_buffer *buffer)
{
unsigned int bytes;
return;
bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
- buffer->scan_timestamp);
+ buffer->scan_timestamp);
buffer->access->set_bytes_per_datum(buffer, bytes);
}
static int iio_buffer_request_update(struct iio_dev *indio_dev,
- struct iio_buffer *buffer)
+ struct iio_buffer *buffer)
{
int ret;
ret = buffer->access->request_update(buffer);
if (ret) {
dev_dbg(&indio_dev->dev,
- "Buffer not started: buffer parameter update failed (%d)\n",
+ "Buffer not started: buffer parameter update failed (%d)\n",
ret);
return ret;
}
}
static void iio_free_scan_mask(struct iio_dev *indio_dev,
- const unsigned long *mask)
+ const unsigned long *mask)
{
/* If the mask is dynamically allocated free it, otherwise do nothing */
if (!indio_dev->available_scan_masks)
};
static int iio_verify_update(struct iio_dev *indio_dev,
- struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
- struct iio_device_config *config)
+ struct iio_buffer *insert_buffer,
+ struct iio_buffer *remove_buffer,
+ struct iio_device_config *config)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
unsigned long *compound_mask;
if (insert_buffer) {
modes &= insert_buffer->access->modes;
config->watermark = min(config->watermark,
- insert_buffer->watermark);
+ insert_buffer->watermark);
}
/* Definitely possible for devices to support both of these. */
/* What scan mask do we actually have? */
compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
- if (compound_mask == NULL)
+ if (!compound_mask)
return -ENOMEM;
scan_timestamp = false;
if (indio_dev->available_scan_masks) {
scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
- indio_dev->masklength,
- compound_mask,
- strict_scanmask);
+ indio_dev->masklength,
+ compound_mask,
+ strict_scanmask);
bitmap_free(compound_mask);
- if (scan_mask == NULL)
+ if (!scan_mask)
return -EINVAL;
} else {
scan_mask = compound_mask;
}
config->scan_bytes = iio_compute_scan_bytes(indio_dev,
- scan_mask, scan_timestamp);
+ scan_mask, scan_timestamp);
config->scan_mask = scan_mask;
config->scan_timestamp = scan_timestamp;
}
static int iio_buffer_add_demux(struct iio_buffer *buffer,
- struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
- unsigned int length)
+ struct iio_demux_table **p, unsigned int in_loc,
+ unsigned int out_loc,
+ unsigned int length)
{
-
if (*p && (*p)->from + (*p)->length == in_loc &&
- (*p)->to + (*p)->length == out_loc) {
+ (*p)->to + (*p)->length == out_loc) {
(*p)->length += length;
} else {
*p = kmalloc(sizeof(**p), GFP_KERNEL);
- if (*p == NULL)
+ if (!(*p))
return -ENOMEM;
(*p)->from = in_loc;
(*p)->to = out_loc;
out_loc += length;
}
buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
- if (buffer->demux_bounce == NULL) {
+ if (!buffer->demux_bounce) {
ret = -ENOMEM;
goto error_clear_mux_table;
}
}
static int iio_enable_buffers(struct iio_dev *indio_dev,
- struct iio_device_config *config)
+ struct iio_device_config *config)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_buffer *buffer, *tmp = NULL;
ret = indio_dev->setup_ops->preenable(indio_dev);
if (ret) {
dev_dbg(&indio_dev->dev,
- "Buffer not started: buffer preenable failed (%d)\n", ret);
+ "Buffer not started: buffer preenable failed (%d)\n", ret);
goto err_undo_config;
}
}
ret = indio_dev->setup_ops->postenable(indio_dev);
if (ret) {
dev_dbg(&indio_dev->dev,
- "Buffer not started: postenable failed (%d)\n", ret);
+ "Buffer not started: postenable failed (%d)\n", ret);
goto err_detach_pollfunc;
}
}
}
static int __iio_update_buffers(struct iio_dev *indio_dev,
- struct iio_buffer *insert_buffer,
- struct iio_buffer *remove_buffer)
+ struct iio_buffer *insert_buffer,
+ struct iio_buffer *remove_buffer)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_device_config new_config;
int ret;
ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
- &new_config);
+ &new_config);
if (ret)
return ret;
return 0;
if (insert_buffer &&
- (insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT))
+ insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT)
return -EINVAL;
mutex_lock(&iio_dev_opaque->info_exist_lock);
goto out_unlock;
}
- if (indio_dev->info == NULL) {
+ if (!indio_dev->info) {
ret = -ENODEV;
goto out_unlock;
}
buffer_attrcount = 0;
if (buffer->attrs) {
- while (buffer->attrs[buffer_attrcount] != NULL)
+ while (buffer->attrs[buffer_attrcount])
buffer_attrcount++;
}
buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
}
ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
- &channels[i]);
+ &channels[i]);
if (ret < 0)
goto error_cleanup_dynamic;
scan_el_attrcount += ret;
iio_dev_opaque->scan_index_timestamp =
channels[i].scan_index;
}
- if (indio_dev->masklength && buffer->scan_mask == NULL) {
+ if (indio_dev->masklength && !buffer->scan_mask) {
buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
GFP_KERNEL);
- if (buffer->scan_mask == NULL) {
+ if (!buffer->scan_mask) {
ret = -ENOMEM;
goto error_cleanup_dynamic;
}
goto error_unwind_sysfs_and_mask;
}
- sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler));
+ sz = sizeof(*iio_dev_opaque->buffer_ioctl_handler);
iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
if (!iio_dev_opaque->buffer_ioctl_handler) {
ret = -ENOMEM;
* a time.
*/
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
- const unsigned long *mask)
+ const unsigned long *mask)
{
return bitmap_weight(mask, indio_dev->masklength) == 1;
}
EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
static const void *iio_demux(struct iio_buffer *buffer,
- const void *datain)
+ const void *datain)
{
struct iio_demux_table *t;