1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Driver for the Diolan DLN-2 USB-ADC adapter
4 *
5 * Copyright (c) 2017 Jack Andersen
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/platform_device.h>
12 #include <linux/mfd/dln2.h>
13
14 #include <linux/iio/iio.h>
15 #include <linux/iio/sysfs.h>
16 #include <linux/iio/trigger.h>
17 #include <linux/iio/trigger_consumer.h>
18 #include <linux/iio/triggered_buffer.h>
19 #include <linux/iio/buffer.h>
20 #include <linux/iio/kfifo_buf.h>
21
22 #define DLN2_ADC_MOD_NAME "dln2-adc"
23
24 #define DLN2_ADC_ID 0x06
25
26 #define DLN2_ADC_GET_CHANNEL_COUNT DLN2_CMD(0x01, DLN2_ADC_ID)
27 #define DLN2_ADC_ENABLE DLN2_CMD(0x02, DLN2_ADC_ID)
28 #define DLN2_ADC_DISABLE DLN2_CMD(0x03, DLN2_ADC_ID)
29 #define DLN2_ADC_CHANNEL_ENABLE DLN2_CMD(0x05, DLN2_ADC_ID)
30 #define DLN2_ADC_CHANNEL_DISABLE DLN2_CMD(0x06, DLN2_ADC_ID)
31 #define DLN2_ADC_SET_RESOLUTION DLN2_CMD(0x08, DLN2_ADC_ID)
32 #define DLN2_ADC_CHANNEL_GET_VAL DLN2_CMD(0x0A, DLN2_ADC_ID)
33 #define DLN2_ADC_CHANNEL_GET_ALL_VAL DLN2_CMD(0x0B, DLN2_ADC_ID)
34 #define DLN2_ADC_CHANNEL_SET_CFG DLN2_CMD(0x0C, DLN2_ADC_ID)
35 #define DLN2_ADC_CHANNEL_GET_CFG DLN2_CMD(0x0D, DLN2_ADC_ID)
36 #define DLN2_ADC_CONDITION_MET_EV DLN2_CMD(0x10, DLN2_ADC_ID)
37
38 #define DLN2_ADC_EVENT_NONE 0
39 #define DLN2_ADC_EVENT_BELOW 1
40 #define DLN2_ADC_EVENT_LEVEL_ABOVE 2
41 #define DLN2_ADC_EVENT_OUTSIDE 3
42 #define DLN2_ADC_EVENT_INSIDE 4
43 #define DLN2_ADC_EVENT_ALWAYS 5
44
45 #define DLN2_ADC_MAX_CHANNELS 8
46 #define DLN2_ADC_DATA_BITS 10
47
48 /*
49 * Plays similar role to iio_demux_table in subsystem core; except allocated
50 * in a fixed 8-element array.
51 */
52 struct dln2_adc_demux_table {
53 unsigned int from;
54 unsigned int to;
55 unsigned int length;
56 };
57
58 struct dln2_adc {
59 struct platform_device *pdev;
60 struct iio_chan_spec iio_channels[DLN2_ADC_MAX_CHANNELS + 1];
61 int port, trigger_chan;
62 struct iio_trigger *trig;
63 struct mutex mutex;
64 /* Cached sample period in milliseconds */
65 unsigned int sample_period;
66 /* Demux table */
67 unsigned int demux_count;
68 struct dln2_adc_demux_table demux[DLN2_ADC_MAX_CHANNELS];
69 };
70
71 struct dln2_adc_port_chan {
72 u8 port;
73 u8 chan;
74 };
75
76 struct dln2_adc_get_all_vals {
77 __le16 channel_mask;
78 __le16 values[DLN2_ADC_MAX_CHANNELS];
79 };
80
dln2_adc_add_demux(struct dln2_adc * dln2,unsigned int in_loc,unsigned int out_loc,unsigned int length)81 static void dln2_adc_add_demux(struct dln2_adc *dln2,
82 unsigned int in_loc, unsigned int out_loc,
83 unsigned int length)
84 {
85 struct dln2_adc_demux_table *p = dln2->demux_count ?
86 &dln2->demux[dln2->demux_count - 1] : NULL;
87
88 if (p && p->from + p->length == in_loc &&
89 p->to + p->length == out_loc) {
90 p->length += length;
91 } else if (dln2->demux_count < DLN2_ADC_MAX_CHANNELS) {
92 p = &dln2->demux[dln2->demux_count++];
93 p->from = in_loc;
94 p->to = out_loc;
95 p->length = length;
96 }
97 }
98
dln2_adc_update_demux(struct dln2_adc * dln2)99 static void dln2_adc_update_demux(struct dln2_adc *dln2)
100 {
101 int in_ind = -1, out_ind;
102 unsigned int in_loc = 0, out_loc = 0;
103 struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
104
105 /* Clear out any old demux */
106 dln2->demux_count = 0;
107
108 /* Optimize all 8-channels case */
109 if (iio_get_masklength(indio_dev) &&
110 (*indio_dev->active_scan_mask & 0xff) == 0xff) {
111 dln2_adc_add_demux(dln2, 0, 0, 16);
112 return;
113 }
114
115 /* Build demux table from fixed 8-channels to active_scan_mask */
116 iio_for_each_active_channel(indio_dev, out_ind) {
117 /* Handle timestamp separately */
118 if (out_ind == DLN2_ADC_MAX_CHANNELS)
119 break;
120 for (++in_ind; in_ind != out_ind; ++in_ind)
121 in_loc += 2;
122 dln2_adc_add_demux(dln2, in_loc, out_loc, 2);
123 out_loc += 2;
124 in_loc += 2;
125 }
126 }
127
dln2_adc_get_chan_count(struct dln2_adc * dln2)128 static int dln2_adc_get_chan_count(struct dln2_adc *dln2)
129 {
130 int ret;
131 u8 port = dln2->port;
132 u8 count;
133 int olen = sizeof(count);
134
135 ret = dln2_transfer(dln2->pdev, DLN2_ADC_GET_CHANNEL_COUNT,
136 &port, sizeof(port), &count, &olen);
137 if (ret < 0) {
138 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
139 return ret;
140 }
141 if (olen < sizeof(count))
142 return -EPROTO;
143
144 return count;
145 }
146
dln2_adc_set_port_resolution(struct dln2_adc * dln2)147 static int dln2_adc_set_port_resolution(struct dln2_adc *dln2)
148 {
149 int ret;
150 struct dln2_adc_port_chan port_chan = {
151 .port = dln2->port,
152 .chan = DLN2_ADC_DATA_BITS,
153 };
154
155 ret = dln2_transfer_tx(dln2->pdev, DLN2_ADC_SET_RESOLUTION,
156 &port_chan, sizeof(port_chan));
157 if (ret < 0)
158 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
159
160 return ret;
161 }
162
dln2_adc_set_chan_enabled(struct dln2_adc * dln2,int channel,bool enable)163 static int dln2_adc_set_chan_enabled(struct dln2_adc *dln2,
164 int channel, bool enable)
165 {
166 int ret;
167 struct dln2_adc_port_chan port_chan = {
168 .port = dln2->port,
169 .chan = channel,
170 };
171 u16 cmd = enable ? DLN2_ADC_CHANNEL_ENABLE : DLN2_ADC_CHANNEL_DISABLE;
172
173 ret = dln2_transfer_tx(dln2->pdev, cmd, &port_chan, sizeof(port_chan));
174 if (ret < 0)
175 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
176
177 return ret;
178 }
179
dln2_adc_set_port_enabled(struct dln2_adc * dln2,bool enable,u16 * conflict_out)180 static int dln2_adc_set_port_enabled(struct dln2_adc *dln2, bool enable,
181 u16 *conflict_out)
182 {
183 int ret;
184 u8 port = dln2->port;
185 __le16 conflict;
186 int olen = sizeof(conflict);
187 u16 cmd = enable ? DLN2_ADC_ENABLE : DLN2_ADC_DISABLE;
188
189 if (conflict_out)
190 *conflict_out = 0;
191
192 ret = dln2_transfer(dln2->pdev, cmd, &port, sizeof(port),
193 &conflict, &olen);
194 if (ret < 0) {
195 dev_dbg(&dln2->pdev->dev, "Problem in %s(%d)\n",
196 __func__, (int)enable);
197 if (conflict_out && enable && olen >= sizeof(conflict))
198 *conflict_out = le16_to_cpu(conflict);
199 return ret;
200 }
201 if (enable && olen < sizeof(conflict))
202 return -EPROTO;
203
204 return ret;
205 }
206
dln2_adc_set_chan_period(struct dln2_adc * dln2,unsigned int channel,unsigned int period)207 static int dln2_adc_set_chan_period(struct dln2_adc *dln2,
208 unsigned int channel, unsigned int period)
209 {
210 int ret;
211 struct {
212 struct dln2_adc_port_chan port_chan;
213 __u8 type;
214 __le16 period;
215 __le16 low;
216 __le16 high;
217 } __packed set_cfg = {
218 .port_chan.port = dln2->port,
219 .port_chan.chan = channel,
220 .type = period ? DLN2_ADC_EVENT_ALWAYS : DLN2_ADC_EVENT_NONE,
221 .period = cpu_to_le16(period)
222 };
223
224 ret = dln2_transfer_tx(dln2->pdev, DLN2_ADC_CHANNEL_SET_CFG,
225 &set_cfg, sizeof(set_cfg));
226 if (ret < 0)
227 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
228
229 return ret;
230 }
231
dln2_adc_read(struct dln2_adc * dln2,unsigned int channel)232 static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
233 {
234 int ret, i;
235 u16 conflict;
236 __le16 value;
237 int olen = sizeof(value);
238 struct dln2_adc_port_chan port_chan = {
239 .port = dln2->port,
240 .chan = channel,
241 };
242
243 ret = dln2_adc_set_chan_enabled(dln2, channel, true);
244 if (ret < 0)
245 return ret;
246
247 ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
248 if (ret < 0) {
249 if (conflict) {
250 dev_err(&dln2->pdev->dev,
251 "ADC pins conflict with mask %04X\n",
252 (int)conflict);
253 ret = -EBUSY;
254 }
255 goto disable_chan;
256 }
257
258 /*
259 * Call GET_VAL twice due to initial zero-return immediately after
260 * enabling channel.
261 */
262 for (i = 0; i < 2; ++i) {
263 ret = dln2_transfer(dln2->pdev, DLN2_ADC_CHANNEL_GET_VAL,
264 &port_chan, sizeof(port_chan),
265 &value, &olen);
266 if (ret < 0) {
267 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
268 goto disable_port;
269 }
270 if (olen < sizeof(value)) {
271 ret = -EPROTO;
272 goto disable_port;
273 }
274 }
275
276 ret = le16_to_cpu(value);
277
278 disable_port:
279 dln2_adc_set_port_enabled(dln2, false, NULL);
280 disable_chan:
281 dln2_adc_set_chan_enabled(dln2, channel, false);
282
283 return ret;
284 }
285
dln2_adc_read_all(struct dln2_adc * dln2,struct dln2_adc_get_all_vals * get_all_vals)286 static int dln2_adc_read_all(struct dln2_adc *dln2,
287 struct dln2_adc_get_all_vals *get_all_vals)
288 {
289 int ret;
290 __u8 port = dln2->port;
291 int olen = sizeof(*get_all_vals);
292
293 ret = dln2_transfer(dln2->pdev, DLN2_ADC_CHANNEL_GET_ALL_VAL,
294 &port, sizeof(port), get_all_vals, &olen);
295 if (ret < 0) {
296 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
297 return ret;
298 }
299 if (olen < sizeof(*get_all_vals))
300 return -EPROTO;
301
302 return ret;
303 }
304
dln2_adc_read_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int * val,int * val2,long mask)305 static int dln2_adc_read_raw(struct iio_dev *indio_dev,
306 struct iio_chan_spec const *chan,
307 int *val,
308 int *val2,
309 long mask)
310 {
311 int ret;
312 unsigned int microhertz;
313 struct dln2_adc *dln2 = iio_priv(indio_dev);
314
315 switch (mask) {
316 case IIO_CHAN_INFO_RAW:
317 ret = iio_device_claim_direct_mode(indio_dev);
318 if (ret < 0)
319 return ret;
320
321 mutex_lock(&dln2->mutex);
322 ret = dln2_adc_read(dln2, chan->channel);
323 mutex_unlock(&dln2->mutex);
324
325 iio_device_release_direct_mode(indio_dev);
326
327 if (ret < 0)
328 return ret;
329
330 *val = ret;
331 return IIO_VAL_INT;
332
333 case IIO_CHAN_INFO_SCALE:
334 /*
335 * Voltage reference is fixed at 3.3v
336 * 3.3 / (1 << 10) * 1000000000
337 */
338 *val = 0;
339 *val2 = 3222656;
340 return IIO_VAL_INT_PLUS_NANO;
341
342 case IIO_CHAN_INFO_SAMP_FREQ:
343 if (dln2->sample_period) {
344 microhertz = 1000000000 / dln2->sample_period;
345 *val = microhertz / 1000000;
346 *val2 = microhertz % 1000000;
347 } else {
348 *val = 0;
349 *val2 = 0;
350 }
351
352 return IIO_VAL_INT_PLUS_MICRO;
353
354 default:
355 return -EINVAL;
356 }
357 }
358
dln2_adc_write_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int val,int val2,long mask)359 static int dln2_adc_write_raw(struct iio_dev *indio_dev,
360 struct iio_chan_spec const *chan,
361 int val,
362 int val2,
363 long mask)
364 {
365 int ret;
366 unsigned int microhertz;
367 struct dln2_adc *dln2 = iio_priv(indio_dev);
368
369 switch (mask) {
370 case IIO_CHAN_INFO_SAMP_FREQ:
371 microhertz = 1000000 * val + val2;
372
373 mutex_lock(&dln2->mutex);
374
375 dln2->sample_period =
376 microhertz ? 1000000000 / microhertz : UINT_MAX;
377 if (dln2->sample_period > 65535) {
378 dln2->sample_period = 65535;
379 dev_warn(&dln2->pdev->dev,
380 "clamping period to 65535ms\n");
381 }
382
383 /*
384 * The first requested channel is arbitrated as a shared
385 * trigger source, so only one event is registered with the
386 * DLN. The event handler will then read all enabled channel
387 * values using DLN2_ADC_CHANNEL_GET_ALL_VAL to maintain
388 * synchronization between ADC readings.
389 */
390 if (dln2->trigger_chan != -1)
391 ret = dln2_adc_set_chan_period(dln2,
392 dln2->trigger_chan, dln2->sample_period);
393 else
394 ret = 0;
395
396 mutex_unlock(&dln2->mutex);
397
398 return ret;
399
400 default:
401 return -EINVAL;
402 }
403 }
404
dln2_update_scan_mode(struct iio_dev * indio_dev,const unsigned long * scan_mask)405 static int dln2_update_scan_mode(struct iio_dev *indio_dev,
406 const unsigned long *scan_mask)
407 {
408 struct dln2_adc *dln2 = iio_priv(indio_dev);
409 int chan_count = indio_dev->num_channels - 1;
410 int ret, i, j;
411
412 mutex_lock(&dln2->mutex);
413
414 for (i = 0; i < chan_count; ++i) {
415 ret = dln2_adc_set_chan_enabled(dln2, i,
416 test_bit(i, scan_mask));
417 if (ret < 0) {
418 for (j = 0; j < i; ++j)
419 dln2_adc_set_chan_enabled(dln2, j, false);
420 mutex_unlock(&dln2->mutex);
421 dev_err(&dln2->pdev->dev,
422 "Unable to enable ADC channel %d\n", i);
423 return -EBUSY;
424 }
425 }
426
427 dln2_adc_update_demux(dln2);
428
429 mutex_unlock(&dln2->mutex);
430
431 return 0;
432 }
433
434 #define DLN2_ADC_CHAN(lval, idx) { \
435 lval.type = IIO_VOLTAGE; \
436 lval.channel = idx; \
437 lval.indexed = 1; \
438 lval.info_mask_separate = BIT(IIO_CHAN_INFO_RAW); \
439 lval.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) | \
440 BIT(IIO_CHAN_INFO_SAMP_FREQ); \
441 lval.scan_index = idx; \
442 lval.scan_type.sign = 'u'; \
443 lval.scan_type.realbits = DLN2_ADC_DATA_BITS; \
444 lval.scan_type.storagebits = 16; \
445 lval.scan_type.endianness = IIO_LE; \
446 }
447
448 /* Assignment version of IIO_CHAN_SOFT_TIMESTAMP */
449 #define IIO_CHAN_SOFT_TIMESTAMP_ASSIGN(lval, _si) { \
450 lval.type = IIO_TIMESTAMP; \
451 lval.channel = -1; \
452 lval.scan_index = _si; \
453 lval.scan_type.sign = 's'; \
454 lval.scan_type.realbits = 64; \
455 lval.scan_type.storagebits = 64; \
456 }
457
458 static const struct iio_info dln2_adc_info = {
459 .read_raw = dln2_adc_read_raw,
460 .write_raw = dln2_adc_write_raw,
461 .update_scan_mode = dln2_update_scan_mode,
462 };
463
dln2_adc_trigger_h(int irq,void * p)464 static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
465 {
466 struct iio_poll_func *pf = p;
467 struct iio_dev *indio_dev = pf->indio_dev;
468 struct {
469 __le16 values[DLN2_ADC_MAX_CHANNELS];
470 int64_t timestamp_space;
471 } data;
472 struct dln2_adc_get_all_vals dev_data;
473 struct dln2_adc *dln2 = iio_priv(indio_dev);
474 const struct dln2_adc_demux_table *t;
475 int ret, i;
476
477 mutex_lock(&dln2->mutex);
478 ret = dln2_adc_read_all(dln2, &dev_data);
479 mutex_unlock(&dln2->mutex);
480 if (ret < 0)
481 goto done;
482
483 memset(&data, 0, sizeof(data));
484
485 /* Demux operation */
486 for (i = 0; i < dln2->demux_count; ++i) {
487 t = &dln2->demux[i];
488 memcpy((void *)data.values + t->to,
489 (void *)dev_data.values + t->from, t->length);
490 }
491
492 iio_push_to_buffers_with_timestamp(indio_dev, &data,
493 iio_get_time_ns(indio_dev));
494
495 done:
496 iio_trigger_notify_done(indio_dev->trig);
497 return IRQ_HANDLED;
498 }
499
dln2_adc_triggered_buffer_postenable(struct iio_dev * indio_dev)500 static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
501 {
502 int ret;
503 struct dln2_adc *dln2 = iio_priv(indio_dev);
504 u16 conflict;
505 unsigned int trigger_chan;
506
507 mutex_lock(&dln2->mutex);
508
509 /* Enable ADC */
510 ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
511 if (ret < 0) {
512 mutex_unlock(&dln2->mutex);
513 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
514 if (conflict) {
515 dev_err(&dln2->pdev->dev,
516 "ADC pins conflict with mask %04X\n",
517 (int)conflict);
518 ret = -EBUSY;
519 }
520 return ret;
521 }
522
523 /* Assign trigger channel based on first enabled channel */
524 trigger_chan = find_first_bit(indio_dev->active_scan_mask,
525 iio_get_masklength(indio_dev));
526 if (trigger_chan < DLN2_ADC_MAX_CHANNELS) {
527 dln2->trigger_chan = trigger_chan;
528 ret = dln2_adc_set_chan_period(dln2, dln2->trigger_chan,
529 dln2->sample_period);
530 mutex_unlock(&dln2->mutex);
531 if (ret < 0) {
532 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
533 return ret;
534 }
535 } else {
536 dln2->trigger_chan = -1;
537 mutex_unlock(&dln2->mutex);
538 }
539
540 return 0;
541 }
542
dln2_adc_triggered_buffer_predisable(struct iio_dev * indio_dev)543 static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
544 {
545 int ret;
546 struct dln2_adc *dln2 = iio_priv(indio_dev);
547
548 mutex_lock(&dln2->mutex);
549
550 /* Disable trigger channel */
551 if (dln2->trigger_chan != -1) {
552 dln2_adc_set_chan_period(dln2, dln2->trigger_chan, 0);
553 dln2->trigger_chan = -1;
554 }
555
556 /* Disable ADC */
557 ret = dln2_adc_set_port_enabled(dln2, false, NULL);
558
559 mutex_unlock(&dln2->mutex);
560 if (ret < 0)
561 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
562
563 return ret;
564 }
565
566 static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = {
567 .postenable = dln2_adc_triggered_buffer_postenable,
568 .predisable = dln2_adc_triggered_buffer_predisable,
569 };
570
dln2_adc_event(struct platform_device * pdev,u16 echo,const void * data,int len)571 static void dln2_adc_event(struct platform_device *pdev, u16 echo,
572 const void *data, int len)
573 {
574 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
575 struct dln2_adc *dln2 = iio_priv(indio_dev);
576
577 /* Called via URB completion handler */
578 iio_trigger_poll(dln2->trig);
579 }
580
dln2_adc_probe(struct platform_device * pdev)581 static int dln2_adc_probe(struct platform_device *pdev)
582 {
583 struct device *dev = &pdev->dev;
584 struct dln2_adc *dln2;
585 struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
586 struct iio_dev *indio_dev;
587 int i, ret, chans;
588
589 indio_dev = devm_iio_device_alloc(dev, sizeof(*dln2));
590 if (!indio_dev) {
591 dev_err(dev, "failed allocating iio device\n");
592 return -ENOMEM;
593 }
594
595 dln2 = iio_priv(indio_dev);
596 dln2->pdev = pdev;
597 dln2->port = pdata->port;
598 dln2->trigger_chan = -1;
599 mutex_init(&dln2->mutex);
600
601 platform_set_drvdata(pdev, indio_dev);
602
603 ret = dln2_adc_set_port_resolution(dln2);
604 if (ret < 0) {
605 dev_err(dev, "failed to set ADC resolution to 10 bits\n");
606 return ret;
607 }
608
609 chans = dln2_adc_get_chan_count(dln2);
610 if (chans < 0) {
611 dev_err(dev, "failed to get channel count: %d\n", chans);
612 return chans;
613 }
614 if (chans > DLN2_ADC_MAX_CHANNELS) {
615 chans = DLN2_ADC_MAX_CHANNELS;
616 dev_warn(dev, "clamping channels to %d\n",
617 DLN2_ADC_MAX_CHANNELS);
618 }
619
620 for (i = 0; i < chans; ++i)
621 DLN2_ADC_CHAN(dln2->iio_channels[i], i)
622 IIO_CHAN_SOFT_TIMESTAMP_ASSIGN(dln2->iio_channels[i], i);
623
624 indio_dev->name = DLN2_ADC_MOD_NAME;
625 indio_dev->info = &dln2_adc_info;
626 indio_dev->modes = INDIO_DIRECT_MODE;
627 indio_dev->channels = dln2->iio_channels;
628 indio_dev->num_channels = chans + 1;
629 indio_dev->setup_ops = &dln2_adc_buffer_setup_ops;
630
631 dln2->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
632 indio_dev->name,
633 iio_device_id(indio_dev));
634 if (!dln2->trig) {
635 dev_err(dev, "failed to allocate trigger\n");
636 return -ENOMEM;
637 }
638 iio_trigger_set_drvdata(dln2->trig, dln2);
639 ret = devm_iio_trigger_register(dev, dln2->trig);
640 if (ret) {
641 dev_err(dev, "failed to register trigger: %d\n", ret);
642 return ret;
643 }
644 iio_trigger_set_immutable(indio_dev, dln2->trig);
645
646 ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
647 dln2_adc_trigger_h,
648 &dln2_adc_buffer_setup_ops);
649 if (ret) {
650 dev_err(dev, "failed to allocate triggered buffer: %d\n", ret);
651 return ret;
652 }
653
654 ret = dln2_register_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV,
655 dln2_adc_event);
656 if (ret) {
657 dev_err(dev, "failed to setup DLN2 periodic event: %d\n", ret);
658 return ret;
659 }
660
661 ret = iio_device_register(indio_dev);
662 if (ret) {
663 dev_err(dev, "failed to register iio device: %d\n", ret);
664 goto unregister_event;
665 }
666
667 return ret;
668
669 unregister_event:
670 dln2_unregister_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV);
671
672 return ret;
673 }
674
dln2_adc_remove(struct platform_device * pdev)675 static void dln2_adc_remove(struct platform_device *pdev)
676 {
677 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
678
679 iio_device_unregister(indio_dev);
680 dln2_unregister_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV);
681 }
682
683 static struct platform_driver dln2_adc_driver = {
684 .driver.name = DLN2_ADC_MOD_NAME,
685 .probe = dln2_adc_probe,
686 .remove = dln2_adc_remove,
687 };
688
689 module_platform_driver(dln2_adc_driver);
690
691 MODULE_AUTHOR("Jack Andersen <[email protected]");
692 MODULE_DESCRIPTION("Driver for the Diolan DLN2 ADC interface");
693 MODULE_LICENSE("GPL v2");
694 MODULE_ALIAS("platform:dln2-adc");
695