1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * RP1 CSI-2 Driver
4 *
5 * Copyright (c) 2021-2024 Raspberry Pi Ltd.
6 * Copyright (c) 2023-2024 Ideas on Board Oy
7 */
8
9 #include <linux/delay.h>
10 #include <linux/moduleparam.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/seq_file.h>
13
14 #include <media/videobuf2-dma-contig.h>
15
16 #include "cfe.h"
17 #include "csi2.h"
18
19 #include "cfe-trace.h"
20
21 static bool csi2_track_errors;
22 module_param_named(track_csi2_errors, csi2_track_errors, bool, 0);
23 MODULE_PARM_DESC(track_csi2_errors, "track csi-2 errors");
24
25 #define csi2_dbg(csi2, fmt, arg...) dev_dbg((csi2)->v4l2_dev->dev, fmt, ##arg)
26 #define csi2_err(csi2, fmt, arg...) dev_err((csi2)->v4l2_dev->dev, fmt, ##arg)
27
28 /* CSI2-DMA registers */
29 #define CSI2_STATUS 0x000
30 #define CSI2_QOS 0x004
31 #define CSI2_DISCARDS_OVERFLOW 0x008
32 #define CSI2_DISCARDS_INACTIVE 0x00c
33 #define CSI2_DISCARDS_UNMATCHED 0x010
34 #define CSI2_DISCARDS_LEN_LIMIT 0x014
35
36 #define CSI2_DISCARDS_AMOUNT_SHIFT 0
37 #define CSI2_DISCARDS_AMOUNT_MASK GENMASK(23, 0)
38 #define CSI2_DISCARDS_DT_SHIFT 24
39 #define CSI2_DISCARDS_DT_MASK GENMASK(29, 24)
40 #define CSI2_DISCARDS_VC_SHIFT 30
41 #define CSI2_DISCARDS_VC_MASK GENMASK(31, 30)
42
43 #define CSI2_LLEV_PANICS 0x018
44 #define CSI2_ULEV_PANICS 0x01c
45 #define CSI2_IRQ_MASK 0x020
46 #define CSI2_IRQ_MASK_IRQ_OVERFLOW BIT(0)
47 #define CSI2_IRQ_MASK_IRQ_DISCARD_OVERFLOW BIT(1)
48 #define CSI2_IRQ_MASK_IRQ_DISCARD_LENGTH_LIMIT BIT(2)
49 #define CSI2_IRQ_MASK_IRQ_DISCARD_UNMATCHED BIT(3)
50 #define CSI2_IRQ_MASK_IRQ_DISCARD_INACTIVE BIT(4)
51 #define CSI2_IRQ_MASK_IRQ_ALL \
52 (CSI2_IRQ_MASK_IRQ_OVERFLOW | CSI2_IRQ_MASK_IRQ_DISCARD_OVERFLOW | \
53 CSI2_IRQ_MASK_IRQ_DISCARD_LENGTH_LIMIT | \
54 CSI2_IRQ_MASK_IRQ_DISCARD_UNMATCHED | \
55 CSI2_IRQ_MASK_IRQ_DISCARD_INACTIVE)
56
57 #define CSI2_CTRL 0x024
58 #define CSI2_CH_CTRL(x) ((x) * 0x40 + 0x28)
59 #define CSI2_CH_ADDR0(x) ((x) * 0x40 + 0x2c)
60 #define CSI2_CH_ADDR1(x) ((x) * 0x40 + 0x3c)
61 #define CSI2_CH_STRIDE(x) ((x) * 0x40 + 0x30)
62 #define CSI2_CH_LENGTH(x) ((x) * 0x40 + 0x34)
63 #define CSI2_CH_DEBUG(x) ((x) * 0x40 + 0x38)
64 #define CSI2_CH_FRAME_SIZE(x) ((x) * 0x40 + 0x40)
65 #define CSI2_CH_COMP_CTRL(x) ((x) * 0x40 + 0x44)
66 #define CSI2_CH_FE_FRAME_ID(x) ((x) * 0x40 + 0x48)
67
68 /* CSI2_STATUS */
69 #define CSI2_STATUS_IRQ_FS(x) (BIT(0) << (x))
70 #define CSI2_STATUS_IRQ_FE(x) (BIT(4) << (x))
71 #define CSI2_STATUS_IRQ_FE_ACK(x) (BIT(8) << (x))
72 #define CSI2_STATUS_IRQ_LE(x) (BIT(12) << (x))
73 #define CSI2_STATUS_IRQ_LE_ACK(x) (BIT(16) << (x))
74 #define CSI2_STATUS_IRQ_CH_MASK(x) \
75 (CSI2_STATUS_IRQ_FS(x) | CSI2_STATUS_IRQ_FE(x) | \
76 CSI2_STATUS_IRQ_FE_ACK(x) | CSI2_STATUS_IRQ_LE(x) | \
77 CSI2_STATUS_IRQ_LE_ACK(x))
78 #define CSI2_STATUS_IRQ_OVERFLOW BIT(20)
79 #define CSI2_STATUS_IRQ_DISCARD_OVERFLOW BIT(21)
80 #define CSI2_STATUS_IRQ_DISCARD_LEN_LIMIT BIT(22)
81 #define CSI2_STATUS_IRQ_DISCARD_UNMATCHED BIT(23)
82 #define CSI2_STATUS_IRQ_DISCARD_INACTIVE BIT(24)
83
84 /* CSI2_CTRL */
85 #define CSI2_CTRL_EOP_IS_EOL BIT(0)
86
87 /* CSI2_CH_CTRL */
88 #define CSI2_CH_CTRL_DMA_EN BIT(0)
89 #define CSI2_CH_CTRL_FORCE BIT(3)
90 #define CSI2_CH_CTRL_AUTO_ARM BIT(4)
91 #define CSI2_CH_CTRL_IRQ_EN_FS BIT(13)
92 #define CSI2_CH_CTRL_IRQ_EN_FE BIT(14)
93 #define CSI2_CH_CTRL_IRQ_EN_FE_ACK BIT(15)
94 #define CSI2_CH_CTRL_IRQ_EN_LE BIT(16)
95 #define CSI2_CH_CTRL_IRQ_EN_LE_ACK BIT(17)
96 #define CSI2_CH_CTRL_FLUSH_FE BIT(28)
97 #define CSI2_CH_CTRL_PACK_LINE BIT(29)
98 #define CSI2_CH_CTRL_PACK_BYTES BIT(30)
99 #define CSI2_CH_CTRL_CH_MODE_MASK GENMASK(2, 1)
100 #define CSI2_CH_CTRL_VC_MASK GENMASK(6, 5)
101 #define CSI2_CH_CTRL_DT_MASK GENMASK(12, 7)
102 #define CSI2_CH_CTRL_LC_MASK GENMASK(27, 18)
103
104 /* CHx_COMPRESSION_CONTROL */
105 #define CSI2_CH_COMP_CTRL_OFFSET_MASK GENMASK(15, 0)
106 #define CSI2_CH_COMP_CTRL_SHIFT_MASK GENMASK(19, 16)
107 #define CSI2_CH_COMP_CTRL_MODE_MASK GENMASK(25, 24)
108
csi2_reg_read(struct csi2_device * csi2,u32 offset)109 static inline u32 csi2_reg_read(struct csi2_device *csi2, u32 offset)
110 {
111 return readl(csi2->base + offset);
112 }
113
csi2_reg_write(struct csi2_device * csi2,u32 offset,u32 val)114 static inline void csi2_reg_write(struct csi2_device *csi2, u32 offset, u32 val)
115 {
116 writel(val, csi2->base + offset);
117 }
118
set_field(u32 * valp,u32 field,u32 mask)119 static inline void set_field(u32 *valp, u32 field, u32 mask)
120 {
121 u32 val = *valp;
122
123 val &= ~mask;
124 val |= (field << __ffs(mask)) & mask;
125 *valp = val;
126 }
127
csi2_regs_show(struct seq_file * s,void * data)128 static int csi2_regs_show(struct seq_file *s, void *data)
129 {
130 struct csi2_device *csi2 = s->private;
131 int ret;
132
133 ret = pm_runtime_resume_and_get(csi2->v4l2_dev->dev);
134 if (ret)
135 return ret;
136
137 #define DUMP(reg) seq_printf(s, #reg " \t0x%08x\n", csi2_reg_read(csi2, reg))
138 #define DUMP_CH(idx, reg) seq_printf(s, #reg "(%u) \t0x%08x\n", idx, \
139 csi2_reg_read(csi2, reg(idx)))
140
141 DUMP(CSI2_STATUS);
142 DUMP(CSI2_DISCARDS_OVERFLOW);
143 DUMP(CSI2_DISCARDS_INACTIVE);
144 DUMP(CSI2_DISCARDS_UNMATCHED);
145 DUMP(CSI2_DISCARDS_LEN_LIMIT);
146 DUMP(CSI2_LLEV_PANICS);
147 DUMP(CSI2_ULEV_PANICS);
148 DUMP(CSI2_IRQ_MASK);
149 DUMP(CSI2_CTRL);
150
151 for (unsigned int i = 0; i < CSI2_NUM_CHANNELS; ++i) {
152 DUMP_CH(i, CSI2_CH_CTRL);
153 DUMP_CH(i, CSI2_CH_ADDR0);
154 DUMP_CH(i, CSI2_CH_ADDR1);
155 DUMP_CH(i, CSI2_CH_STRIDE);
156 DUMP_CH(i, CSI2_CH_LENGTH);
157 DUMP_CH(i, CSI2_CH_DEBUG);
158 DUMP_CH(i, CSI2_CH_FRAME_SIZE);
159 DUMP_CH(i, CSI2_CH_COMP_CTRL);
160 DUMP_CH(i, CSI2_CH_FE_FRAME_ID);
161 }
162
163 #undef DUMP
164 #undef DUMP_CH
165
166 pm_runtime_put(csi2->v4l2_dev->dev);
167
168 return 0;
169 }
170
171 DEFINE_SHOW_ATTRIBUTE(csi2_regs);
172
csi2_errors_show(struct seq_file * s,void * data)173 static int csi2_errors_show(struct seq_file *s, void *data)
174 {
175 struct csi2_device *csi2 = s->private;
176 unsigned long flags;
177 u32 discards_table[DISCARDS_TABLE_NUM_VCS][DISCARDS_TABLE_NUM_ENTRIES];
178 u32 discards_dt_table[DISCARDS_TABLE_NUM_ENTRIES];
179 u32 overflows;
180
181 spin_lock_irqsave(&csi2->errors_lock, flags);
182
183 memcpy(discards_table, csi2->discards_table, sizeof(discards_table));
184 memcpy(discards_dt_table, csi2->discards_dt_table,
185 sizeof(discards_dt_table));
186 overflows = csi2->overflows;
187
188 csi2->overflows = 0;
189 memset(csi2->discards_table, 0, sizeof(discards_table));
190 memset(csi2->discards_dt_table, 0, sizeof(discards_dt_table));
191
192 spin_unlock_irqrestore(&csi2->errors_lock, flags);
193
194 seq_printf(s, "Overflows %u\n", overflows);
195 seq_puts(s, "Discards:\n");
196 seq_puts(s, "VC OVLF LEN UNMATCHED INACTIVE\n");
197
198 for (unsigned int vc = 0; vc < DISCARDS_TABLE_NUM_VCS; ++vc) {
199 seq_printf(s, "%u %10u %10u %10u %10u\n", vc,
200 discards_table[vc][DISCARDS_TABLE_OVERFLOW],
201 discards_table[vc][DISCARDS_TABLE_LENGTH_LIMIT],
202 discards_table[vc][DISCARDS_TABLE_UNMATCHED],
203 discards_table[vc][DISCARDS_TABLE_INACTIVE]);
204 }
205
206 seq_printf(s, "Last DT %10u %10u %10u %10u\n",
207 discards_dt_table[DISCARDS_TABLE_OVERFLOW],
208 discards_dt_table[DISCARDS_TABLE_LENGTH_LIMIT],
209 discards_dt_table[DISCARDS_TABLE_UNMATCHED],
210 discards_dt_table[DISCARDS_TABLE_INACTIVE]);
211
212 return 0;
213 }
214
215 DEFINE_SHOW_ATTRIBUTE(csi2_errors);
216
csi2_isr_handle_errors(struct csi2_device * csi2,u32 status)217 static void csi2_isr_handle_errors(struct csi2_device *csi2, u32 status)
218 {
219 spin_lock(&csi2->errors_lock);
220
221 if (status & CSI2_STATUS_IRQ_OVERFLOW)
222 csi2->overflows++;
223
224 for (unsigned int i = 0; i < DISCARDS_TABLE_NUM_ENTRIES; ++i) {
225 static const u32 discard_bits[] = {
226 CSI2_STATUS_IRQ_DISCARD_OVERFLOW,
227 CSI2_STATUS_IRQ_DISCARD_LEN_LIMIT,
228 CSI2_STATUS_IRQ_DISCARD_UNMATCHED,
229 CSI2_STATUS_IRQ_DISCARD_INACTIVE,
230 };
231 static const u8 discard_regs[] = {
232 CSI2_DISCARDS_OVERFLOW,
233 CSI2_DISCARDS_LEN_LIMIT,
234 CSI2_DISCARDS_UNMATCHED,
235 CSI2_DISCARDS_INACTIVE,
236 };
237 u32 amount;
238 u8 dt, vc;
239 u32 v;
240
241 if (!(status & discard_bits[i]))
242 continue;
243
244 v = csi2_reg_read(csi2, discard_regs[i]);
245 csi2_reg_write(csi2, discard_regs[i], 0);
246
247 amount = (v & CSI2_DISCARDS_AMOUNT_MASK) >>
248 CSI2_DISCARDS_AMOUNT_SHIFT;
249 dt = (v & CSI2_DISCARDS_DT_MASK) >> CSI2_DISCARDS_DT_SHIFT;
250 vc = (v & CSI2_DISCARDS_VC_MASK) >> CSI2_DISCARDS_VC_SHIFT;
251
252 csi2->discards_table[vc][i] += amount;
253 csi2->discards_dt_table[i] = dt;
254 }
255
256 spin_unlock(&csi2->errors_lock);
257 }
258
csi2_isr(struct csi2_device * csi2,bool * sof,bool * eof)259 void csi2_isr(struct csi2_device *csi2, bool *sof, bool *eof)
260 {
261 u32 status;
262
263 status = csi2_reg_read(csi2, CSI2_STATUS);
264
265 /* Write value back to clear the interrupts */
266 csi2_reg_write(csi2, CSI2_STATUS, status);
267
268 for (unsigned int i = 0; i < CSI2_NUM_CHANNELS; i++) {
269 u32 dbg;
270
271 if ((status & CSI2_STATUS_IRQ_CH_MASK(i)) == 0)
272 continue;
273
274 dbg = csi2_reg_read(csi2, CSI2_CH_DEBUG(i));
275
276 trace_csi2_irq(i, status, dbg);
277
278 sof[i] = !!(status & CSI2_STATUS_IRQ_FS(i));
279 eof[i] = !!(status & CSI2_STATUS_IRQ_FE_ACK(i));
280 }
281
282 if (csi2_track_errors)
283 csi2_isr_handle_errors(csi2, status);
284 }
285
csi2_set_buffer(struct csi2_device * csi2,unsigned int channel,dma_addr_t dmaaddr,unsigned int stride,unsigned int size)286 void csi2_set_buffer(struct csi2_device *csi2, unsigned int channel,
287 dma_addr_t dmaaddr, unsigned int stride, unsigned int size)
288 {
289 u64 addr = dmaaddr;
290 /*
291 * ADDRESS0 must be written last as it triggers the double buffering
292 * mechanism for all buffer registers within the hardware.
293 */
294 addr >>= 4;
295 csi2_reg_write(csi2, CSI2_CH_LENGTH(channel), size >> 4);
296 csi2_reg_write(csi2, CSI2_CH_STRIDE(channel), stride >> 4);
297 csi2_reg_write(csi2, CSI2_CH_ADDR1(channel), addr >> 32);
298 csi2_reg_write(csi2, CSI2_CH_ADDR0(channel), addr & 0xffffffff);
299 }
300
csi2_set_compression(struct csi2_device * csi2,unsigned int channel,enum csi2_compression_mode mode,unsigned int shift,unsigned int offset)301 void csi2_set_compression(struct csi2_device *csi2, unsigned int channel,
302 enum csi2_compression_mode mode, unsigned int shift,
303 unsigned int offset)
304 {
305 u32 compression = 0;
306
307 set_field(&compression, CSI2_CH_COMP_CTRL_OFFSET_MASK, offset);
308 set_field(&compression, CSI2_CH_COMP_CTRL_SHIFT_MASK, shift);
309 set_field(&compression, CSI2_CH_COMP_CTRL_MODE_MASK, mode);
310 csi2_reg_write(csi2, CSI2_CH_COMP_CTRL(channel), compression);
311 }
312
csi2_start_channel(struct csi2_device * csi2,unsigned int channel,enum csi2_mode mode,bool auto_arm,bool pack_bytes,unsigned int width,unsigned int height,u8 vc,u8 dt)313 void csi2_start_channel(struct csi2_device *csi2, unsigned int channel,
314 enum csi2_mode mode, bool auto_arm, bool pack_bytes,
315 unsigned int width, unsigned int height,
316 u8 vc, u8 dt)
317 {
318 u32 ctrl;
319
320 csi2_dbg(csi2, "%s [%u]\n", __func__, channel);
321
322 csi2_reg_write(csi2, CSI2_CH_CTRL(channel), 0);
323 csi2_reg_write(csi2, CSI2_CH_DEBUG(channel), 0);
324 csi2_reg_write(csi2, CSI2_STATUS, CSI2_STATUS_IRQ_CH_MASK(channel));
325
326 /* Enable channel and FS/FE interrupts. */
327 ctrl = CSI2_CH_CTRL_DMA_EN | CSI2_CH_CTRL_IRQ_EN_FS |
328 CSI2_CH_CTRL_IRQ_EN_FE_ACK | CSI2_CH_CTRL_PACK_LINE;
329 /* PACK_BYTES ensures no striding for embedded data. */
330 if (pack_bytes)
331 ctrl |= CSI2_CH_CTRL_PACK_BYTES;
332
333 if (auto_arm)
334 ctrl |= CSI2_CH_CTRL_AUTO_ARM;
335
336 if (width && height) {
337 set_field(&ctrl, mode, CSI2_CH_CTRL_CH_MODE_MASK);
338 csi2_reg_write(csi2, CSI2_CH_FRAME_SIZE(channel),
339 (height << 16) | width);
340 } else {
341 set_field(&ctrl, 0x0, CSI2_CH_CTRL_CH_MODE_MASK);
342 csi2_reg_write(csi2, CSI2_CH_FRAME_SIZE(channel), 0);
343 }
344
345 set_field(&ctrl, vc, CSI2_CH_CTRL_VC_MASK);
346 set_field(&ctrl, dt, CSI2_CH_CTRL_DT_MASK);
347 csi2_reg_write(csi2, CSI2_CH_CTRL(channel), ctrl);
348 csi2->num_lines[channel] = height;
349 }
350
csi2_stop_channel(struct csi2_device * csi2,unsigned int channel)351 void csi2_stop_channel(struct csi2_device *csi2, unsigned int channel)
352 {
353 csi2_dbg(csi2, "%s [%u]\n", __func__, channel);
354
355 /* Channel disable. Use FORCE to allow stopping mid-frame. */
356 csi2_reg_write(csi2, CSI2_CH_CTRL(channel), CSI2_CH_CTRL_FORCE);
357 /* Latch the above change by writing to the ADDR0 register. */
358 csi2_reg_write(csi2, CSI2_CH_ADDR0(channel), 0);
359 /* Write this again, the HW needs it! */
360 csi2_reg_write(csi2, CSI2_CH_ADDR0(channel), 0);
361 }
362
csi2_open_rx(struct csi2_device * csi2)363 void csi2_open_rx(struct csi2_device *csi2)
364 {
365 csi2_reg_write(csi2, CSI2_IRQ_MASK,
366 csi2_track_errors ? CSI2_IRQ_MASK_IRQ_ALL : 0);
367
368 dphy_start(&csi2->dphy);
369
370 csi2_reg_write(csi2, CSI2_CTRL, CSI2_CTRL_EOP_IS_EOL);
371 }
372
csi2_close_rx(struct csi2_device * csi2)373 void csi2_close_rx(struct csi2_device *csi2)
374 {
375 dphy_stop(&csi2->dphy);
376
377 csi2_reg_write(csi2, CSI2_IRQ_MASK, 0);
378 }
379
csi2_init_state(struct v4l2_subdev * sd,struct v4l2_subdev_state * state)380 static int csi2_init_state(struct v4l2_subdev *sd,
381 struct v4l2_subdev_state *state)
382 {
383 struct v4l2_subdev_route routes[] = { {
384 .sink_pad = CSI2_PAD_SINK,
385 .sink_stream = 0,
386 .source_pad = CSI2_PAD_FIRST_SOURCE,
387 .source_stream = 0,
388 .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
389 } };
390
391 struct v4l2_subdev_krouting routing = {
392 .num_routes = ARRAY_SIZE(routes),
393 .routes = routes,
394 };
395
396 int ret;
397
398 ret = v4l2_subdev_set_routing_with_fmt(sd, state, &routing,
399 &cfe_default_format);
400 if (ret)
401 return ret;
402
403 return 0;
404 }
405
csi2_pad_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)406 static int csi2_pad_set_fmt(struct v4l2_subdev *sd,
407 struct v4l2_subdev_state *state,
408 struct v4l2_subdev_format *format)
409 {
410 if (format->pad == CSI2_PAD_SINK) {
411 /* Store the sink format and propagate it to the source. */
412
413 const struct cfe_fmt *cfe_fmt;
414
415 cfe_fmt = find_format_by_code(format->format.code);
416 if (!cfe_fmt) {
417 cfe_fmt = find_format_by_code(MEDIA_BUS_FMT_SRGGB10_1X10);
418 format->format.code = cfe_fmt->code;
419 }
420
421 struct v4l2_mbus_framefmt *fmt;
422
423 fmt = v4l2_subdev_state_get_format(state, format->pad,
424 format->stream);
425 if (!fmt)
426 return -EINVAL;
427
428 *fmt = format->format;
429
430 fmt = v4l2_subdev_state_get_opposite_stream_format(state,
431 format->pad,
432 format->stream);
433 if (!fmt)
434 return -EINVAL;
435
436 format->format.field = V4L2_FIELD_NONE;
437
438 *fmt = format->format;
439 } else {
440 /* Only allow changing the source pad mbus code. */
441
442 struct v4l2_mbus_framefmt *sink_fmt, *source_fmt;
443 u32 sink_code;
444 u32 code;
445
446 sink_fmt = v4l2_subdev_state_get_opposite_stream_format(state,
447 format->pad,
448 format->stream);
449 if (!sink_fmt)
450 return -EINVAL;
451
452 source_fmt = v4l2_subdev_state_get_format(state, format->pad,
453 format->stream);
454 if (!source_fmt)
455 return -EINVAL;
456
457 sink_code = sink_fmt->code;
458 code = format->format.code;
459
460 /*
461 * Only allow changing the mbus code to:
462 * - The sink's mbus code
463 * - The 16-bit version of the sink's mbus code
464 * - The compressed version of the sink's mbus code
465 */
466 if (code == sink_code ||
467 code == cfe_find_16bit_code(sink_code) ||
468 code == cfe_find_compressed_code(sink_code))
469 source_fmt->code = code;
470
471 format->format.code = source_fmt->code;
472 }
473
474 return 0;
475 }
476
csi2_set_routing(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,enum v4l2_subdev_format_whence which,struct v4l2_subdev_krouting * routing)477 static int csi2_set_routing(struct v4l2_subdev *sd,
478 struct v4l2_subdev_state *state,
479 enum v4l2_subdev_format_whence which,
480 struct v4l2_subdev_krouting *routing)
481 {
482 int ret;
483
484 ret = v4l2_subdev_routing_validate(sd, routing,
485 V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 |
486 V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING);
487 if (ret)
488 return ret;
489
490 /* Only stream ID 0 allowed on source pads */
491 for (unsigned int i = 0; i < routing->num_routes; ++i) {
492 const struct v4l2_subdev_route *route = &routing->routes[i];
493
494 if (route->source_stream != 0)
495 return -EINVAL;
496 }
497
498 ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing,
499 &cfe_default_format);
500 if (ret)
501 return ret;
502
503 return 0;
504 }
505
506 static const struct v4l2_subdev_pad_ops csi2_subdev_pad_ops = {
507 .get_fmt = v4l2_subdev_get_fmt,
508 .set_fmt = csi2_pad_set_fmt,
509 .set_routing = csi2_set_routing,
510 .link_validate = v4l2_subdev_link_validate_default,
511 };
512
513 static const struct media_entity_operations csi2_entity_ops = {
514 .link_validate = v4l2_subdev_link_validate,
515 .has_pad_interdep = v4l2_subdev_has_pad_interdep,
516 };
517
518 static const struct v4l2_subdev_ops csi2_subdev_ops = {
519 .pad = &csi2_subdev_pad_ops,
520 };
521
522 static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
523 .init_state = csi2_init_state,
524 };
525
csi2_init(struct csi2_device * csi2,struct dentry * debugfs)526 int csi2_init(struct csi2_device *csi2, struct dentry *debugfs)
527 {
528 unsigned int ret;
529
530 spin_lock_init(&csi2->errors_lock);
531
532 csi2->dphy.dev = csi2->v4l2_dev->dev;
533 dphy_probe(&csi2->dphy);
534
535 debugfs_create_file("csi2_regs", 0440, debugfs, csi2, &csi2_regs_fops);
536
537 if (csi2_track_errors)
538 debugfs_create_file("csi2_errors", 0440, debugfs, csi2,
539 &csi2_errors_fops);
540
541 csi2->pad[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
542
543 for (unsigned int i = CSI2_PAD_FIRST_SOURCE;
544 i < CSI2_PAD_FIRST_SOURCE + CSI2_PAD_NUM_SOURCES; i++)
545 csi2->pad[i].flags = MEDIA_PAD_FL_SOURCE;
546
547 ret = media_entity_pads_init(&csi2->sd.entity, ARRAY_SIZE(csi2->pad),
548 csi2->pad);
549 if (ret)
550 return ret;
551
552 /* Initialize subdev */
553 v4l2_subdev_init(&csi2->sd, &csi2_subdev_ops);
554 csi2->sd.internal_ops = &csi2_internal_ops;
555 csi2->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
556 csi2->sd.entity.ops = &csi2_entity_ops;
557 csi2->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_STREAMS;
558 csi2->sd.owner = THIS_MODULE;
559 snprintf(csi2->sd.name, sizeof(csi2->sd.name), "csi2");
560
561 ret = v4l2_subdev_init_finalize(&csi2->sd);
562 if (ret)
563 goto err_entity_cleanup;
564
565 ret = v4l2_device_register_subdev(csi2->v4l2_dev, &csi2->sd);
566 if (ret) {
567 csi2_err(csi2, "Failed register csi2 subdev (%d)\n", ret);
568 goto err_subdev_cleanup;
569 }
570
571 return 0;
572
573 err_subdev_cleanup:
574 v4l2_subdev_cleanup(&csi2->sd);
575 err_entity_cleanup:
576 media_entity_cleanup(&csi2->sd.entity);
577
578 return ret;
579 }
580
csi2_uninit(struct csi2_device * csi2)581 void csi2_uninit(struct csi2_device *csi2)
582 {
583 v4l2_device_unregister_subdev(&csi2->sd);
584 v4l2_subdev_cleanup(&csi2->sd);
585 media_entity_cleanup(&csi2->sd.entity);
586 }
587