1 /* The industrial I/O core in kernel channel mapping
3 * Copyright (c) 2011 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
14 #include <linux/iio/iio.h>
16 #include <linux/iio/machine.h>
17 #include <linux/iio/driver.h>
18 #include <linux/iio/consumer.h>
20 struct iio_map_internal {
21 struct iio_dev *indio_dev;
26 static LIST_HEAD(iio_map_list);
27 static DEFINE_MUTEX(iio_map_list_lock);
29 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
32 struct iio_map_internal *mapi;
37 mutex_lock(&iio_map_list_lock);
38 while (maps[i].consumer_dev_name != NULL) {
39 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
45 mapi->indio_dev = indio_dev;
46 list_add(&mapi->l, &iio_map_list);
50 mutex_unlock(&iio_map_list_lock);
54 EXPORT_SYMBOL_GPL(iio_map_array_register);
57 /* Assumes the exact same array (e.g. memory locations)
58 * used at unregistration as used at registration rather than
59 * more complex checking of contents.
61 int iio_map_array_unregister(struct iio_dev *indio_dev,
66 struct iio_map_internal *mapi;
71 mutex_lock(&iio_map_list_lock);
72 while (maps[i].consumer_dev_name != NULL) {
74 list_for_each_entry(mapi, &iio_map_list, l)
75 if (&maps[i] == mapi->map) {
81 if (found_it == false) {
88 mutex_unlock(&iio_map_list_lock);
92 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
94 static const struct iio_chan_spec
95 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
98 const struct iio_chan_spec *chan = NULL;
100 for (i = 0; i < indio_dev->num_channels; i++)
101 if (indio_dev->channels[i].datasheet_name &&
102 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
103 chan = &indio_dev->channels[i];
110 struct iio_channel *iio_channel_get(const char *name, const char *channel_name)
112 struct iio_map_internal *c_i = NULL, *c = NULL;
113 struct iio_channel *channel;
116 if (name == NULL && channel_name == NULL)
117 return ERR_PTR(-ENODEV);
119 /* first find matching entry the channel map */
120 mutex_lock(&iio_map_list_lock);
121 list_for_each_entry(c_i, &iio_map_list, l) {
122 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
124 strcmp(channel_name, c_i->map->consumer_channel) != 0))
127 iio_device_get(c->indio_dev);
130 mutex_unlock(&iio_map_list_lock);
132 return ERR_PTR(-ENODEV);
134 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
135 if (channel == NULL) {
140 channel->indio_dev = c->indio_dev;
142 if (c->map->adc_channel_label) {
144 iio_chan_spec_from_name(channel->indio_dev,
145 c->map->adc_channel_label);
147 if (channel->channel == NULL) {
158 iio_device_put(c->indio_dev);
161 EXPORT_SYMBOL_GPL(iio_channel_get);
163 void iio_channel_release(struct iio_channel *channel)
165 iio_device_put(channel->indio_dev);
168 EXPORT_SYMBOL_GPL(iio_channel_release);
170 struct iio_channel *iio_channel_get_all(const char *name)
172 struct iio_channel *chans;
173 struct iio_map_internal *c = NULL;
179 return ERR_PTR(-EINVAL);
181 mutex_lock(&iio_map_list_lock);
182 /* first count the matching maps */
183 list_for_each_entry(c, &iio_map_list, l)
184 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
194 /* NULL terminated array to save passing size */
195 chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
201 /* for each map fill in the chans element */
202 list_for_each_entry(c, &iio_map_list, l) {
203 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
205 chans[mapind].indio_dev = c->indio_dev;
206 chans[mapind].channel =
207 iio_chan_spec_from_name(chans[mapind].indio_dev,
208 c->map->adc_channel_label);
209 if (chans[mapind].channel == NULL) {
211 goto error_free_chans;
213 iio_device_get(chans[mapind].indio_dev);
218 goto error_free_chans;
220 mutex_unlock(&iio_map_list_lock);
225 for (i = 0; i < nummaps; i++)
226 iio_device_put(chans[i].indio_dev);
229 mutex_unlock(&iio_map_list_lock);
233 EXPORT_SYMBOL_GPL(iio_channel_get_all);
235 void iio_channel_release_all(struct iio_channel *channels)
237 struct iio_channel *chan = &channels[0];
239 while (chan->indio_dev) {
240 iio_device_put(chan->indio_dev);
245 EXPORT_SYMBOL_GPL(iio_channel_release_all);
247 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
248 enum iio_chan_info_enum info)
255 return chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
259 int iio_read_channel_raw(struct iio_channel *chan, int *val)
263 mutex_lock(&chan->indio_dev->info_exist_lock);
264 if (chan->indio_dev->info == NULL) {
269 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
271 mutex_unlock(&chan->indio_dev->info_exist_lock);
275 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
277 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
278 int raw, int *processed, unsigned int scale)
280 int scale_type, scale_val, scale_val2, offset;
284 ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_SCALE);
288 scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
289 IIO_CHAN_INFO_SCALE);
293 switch (scale_type) {
295 *processed = raw64 * scale_val;
297 case IIO_VAL_INT_PLUS_MICRO:
299 *processed = -raw64 * scale_val;
301 *processed = raw64 * scale_val;
302 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
305 case IIO_VAL_INT_PLUS_NANO:
307 *processed = -raw64 * scale_val;
309 *processed = raw64 * scale_val;
310 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
313 case IIO_VAL_FRACTIONAL:
314 *processed = div_s64(raw64 * (s64)scale_val * scale,
324 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
325 int *processed, unsigned int scale)
329 mutex_lock(&chan->indio_dev->info_exist_lock);
330 if (chan->indio_dev->info == NULL) {
335 ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
338 mutex_unlock(&chan->indio_dev->info_exist_lock);
342 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
344 int iio_read_channel_processed(struct iio_channel *chan, int *val)
348 mutex_lock(&chan->indio_dev->info_exist_lock);
349 if (chan->indio_dev->info == NULL) {
354 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
355 ret = iio_channel_read(chan, val, NULL,
356 IIO_CHAN_INFO_PROCESSED);
358 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
361 ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
365 mutex_unlock(&chan->indio_dev->info_exist_lock);
369 EXPORT_SYMBOL_GPL(iio_read_channel_processed);
371 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
375 mutex_lock(&chan->indio_dev->info_exist_lock);
376 if (chan->indio_dev->info == NULL) {
381 ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
383 mutex_unlock(&chan->indio_dev->info_exist_lock);
387 EXPORT_SYMBOL_GPL(iio_read_channel_scale);
389 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
392 /* Need to verify underlying driver has not gone away */
394 mutex_lock(&chan->indio_dev->info_exist_lock);
395 if (chan->indio_dev->info == NULL) {
400 *type = chan->channel->type;
402 mutex_unlock(&chan->indio_dev->info_exist_lock);
406 EXPORT_SYMBOL_GPL(iio_get_channel_type);