1 // SPDX-License-Identifier: GPL-2.0-only
3 * RP1 Camera Front End Driver
5 * Copyright (C) 2021-2022 - Raspberry Pi Ltd.
9 #include <linux/atomic.h>
10 #include <linux/clk.h>
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/of_device.h>
21 #include <linux/of_graph.h>
22 #include <linux/phy/phy.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/seq_file.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <linux/videodev2.h>
31 #include <media/v4l2-async.h>
32 #include <media/v4l2-common.h>
33 #include <media/v4l2-ctrls.h>
34 #include <media/v4l2-dev.h>
35 #include <media/v4l2-device.h>
36 #include <media/v4l2-dv-timings.h>
37 #include <media/v4l2-event.h>
38 #include <media/v4l2-fwnode.h>
39 #include <media/v4l2-ioctl.h>
40 #include <media/videobuf2-dma-contig.h>
46 #include "pisp_fe_config.h"
47 #include "pisp_statistics.h"
49 #define CFE_MODULE_NAME "rp1-cfe"
50 #define CFE_VERSION "1.0"
52 bool cfe_debug_verbose;
54 #define cfe_dbg_verbose(fmt, arg...) \
56 if (cfe_debug_verbose) \
57 dev_dbg(&cfe->pdev->dev, fmt, ##arg); \
59 #define cfe_dbg(fmt, arg...) dev_dbg(&cfe->pdev->dev, fmt, ##arg)
60 #define cfe_info(fmt, arg...) dev_info(&cfe->pdev->dev, fmt, ##arg)
61 #define cfe_err(fmt, arg...) dev_err(&cfe->pdev->dev, fmt, ##arg)
63 /* MIPICFG registers */
64 #define MIPICFG_CFG 0x004
65 #define MIPICFG_INTR 0x028
66 #define MIPICFG_INTE 0x02c
67 #define MIPICFG_INTF 0x030
68 #define MIPICFG_INTS 0x034
70 #define MIPICFG_CFG_SEL_CSI BIT(0)
72 #define MIPICFG_INT_CSI_DMA BIT(0)
73 #define MIPICFG_INT_CSI_HOST BIT(2)
74 #define MIPICFG_INT_PISP_FE BIT(4)
76 #define BPL_ALIGNMENT 16
77 #define MAX_BYTESPERLINE 0xffffff00
78 #define MAX_BUFFER_SIZE 0xffffff00
80 * Max width is therefore determined by the max stride divided by the number of
83 * However, to avoid overflow issues let's use a 16k maximum. This lets us
84 * calculate 16k * 16k * 4 with 32bits. If we need higher maximums, a careful
85 * review and adjustment of the code is needed so that it will deal with
86 * overflows correctly.
88 #define MAX_WIDTH 16384
89 #define MAX_HEIGHT MAX_WIDTH
90 /* Define a nominal minimum image size */
93 /* Default size of the embedded buffer */
94 #define DEFAULT_EMBEDDED_SIZE 8192
96 const struct v4l2_mbus_framefmt cfe_default_format = {
99 .code = MEDIA_BUS_FMT_SRGGB10_1X10,
100 .field = V4L2_FIELD_NONE,
101 .colorspace = V4L2_COLORSPACE_RAW,
102 .ycbcr_enc = V4L2_YCBCR_ENC_601,
103 .quantization = V4L2_QUANTIZATION_FULL_RANGE,
104 .xfer_func = V4L2_XFER_FUNC_NONE,
107 const struct v4l2_mbus_framefmt cfe_default_meta_format = {
110 .code = MEDIA_BUS_FMT_SENSOR_DATA,
114 /* CSI2 HW output nodes first. */
119 /* FE only nodes from here on. */
127 struct node_description {
130 enum v4l2_buf_type buf_type;
132 unsigned int pad_flags;
133 unsigned int link_pad;
136 /* Must match the ordering of enum ids */
137 static const struct node_description node_desc[NUM_NODES] = {
140 .buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
141 .cap = V4L2_CAP_VIDEO_CAPTURE,
142 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
143 .link_pad = CSI2_NUM_CHANNELS + 0
145 /* This node is assigned for the embedded data channel! */
146 [CSI2_CH1_EMBEDDED] = {
148 .buf_type = V4L2_BUF_TYPE_META_CAPTURE,
149 .cap = V4L2_CAP_META_CAPTURE,
150 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
151 .link_pad = CSI2_NUM_CHANNELS + 1
155 .buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
156 .cap = V4L2_CAP_META_CAPTURE,
157 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
158 .link_pad = CSI2_NUM_CHANNELS + 2
162 .buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
163 .cap = V4L2_CAP_META_CAPTURE,
164 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
165 .link_pad = CSI2_NUM_CHANNELS + 3
169 .buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
170 .cap = V4L2_CAP_VIDEO_CAPTURE,
171 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
172 .link_pad = FE_OUTPUT0_PAD
176 .buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
177 .cap = V4L2_CAP_VIDEO_CAPTURE,
178 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
179 .link_pad = FE_OUTPUT1_PAD
183 .buf_type = V4L2_BUF_TYPE_META_CAPTURE,
184 .cap = V4L2_CAP_META_CAPTURE,
185 .pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
186 .link_pad = FE_STATS_PAD
190 .buf_type = V4L2_BUF_TYPE_META_OUTPUT,
191 .cap = V4L2_CAP_META_OUTPUT,
192 .pad_flags = MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MUST_CONNECT,
193 .link_pad = FE_CONFIG_PAD
197 #define is_fe_node(node) (((node)->id) >= FE_OUT0)
198 #define is_csi2_node(node) (!is_fe_node(node))
199 #define is_image_output_node(node) \
200 (node_desc[(node)->id].buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
201 #define is_meta_output_node(node) \
202 (node_desc[(node)->id].buf_type == V4L2_BUF_TYPE_META_CAPTURE)
203 #define is_meta_input_node(node) \
204 (node_desc[(node)->id].buf_type == V4L2_BUF_TYPE_META_OUTPUT)
205 #define is_meta_node(node) (is_meta_output_node(node) || is_meta_input_node(node))
207 /* To track state across all nodes. */
209 #define NODE_REGISTERED BIT(0)
210 #define NODE_ENABLED BIT(1)
211 #define NODE_STREAMING BIT(2)
212 #define FS_INT BIT(3)
213 #define FE_INT BIT(4)
216 struct vb2_v4l2_buffer vb;
217 struct list_head list;
220 struct cfe_config_buffer {
221 struct cfe_buffer buf;
222 struct pisp_fe_config config;
225 static inline struct cfe_buffer *to_cfe_buffer(struct vb2_buffer *vb)
227 return container_of(vb, struct cfe_buffer, vb.vb2_buf);
231 struct cfe_config_buffer *to_cfe_config_buffer(struct cfe_buffer *buf)
233 return container_of(buf, struct cfe_config_buffer, buf);
238 /* Pointer pointing to current v4l2_buffer */
239 struct cfe_buffer *cur_frm;
240 /* Pointer pointing to next v4l2_buffer */
241 struct cfe_buffer *next_frm;
242 /* Used to store current pixel format */
243 struct v4l2_format fmt;
244 /* Buffer queue used in video-buf */
245 struct vb2_queue buffer_queue;
246 /* Queue of filled frames */
247 struct list_head dma_queue;
248 /* lock used to access this structure */
250 /* Identifies video device for this channel */
251 struct video_device video_dev;
252 /* Pointer to the parent handle */
253 struct cfe_device *cfe;
254 struct media_pad pad;
258 struct dentry *debugfs;
261 /* V4l2 specific parameters */
262 struct v4l2_async_connection *asd;
264 /* peripheral base address */
265 void __iomem *mipi_cfg_base;
270 struct v4l2_device v4l2_dev;
271 struct media_device mdev;
272 struct media_pipeline pipe;
274 /* IRQ lock for node state and DMA queues */
275 spinlock_t state_lock;
280 struct platform_device *pdev;
281 /* subdevice async Notifier */
282 struct v4l2_async_notifier notifier;
284 /* ptr to sub device */
285 struct v4l2_subdev *sensor;
287 struct cfe_node node[NUM_NODES];
288 DECLARE_BITMAP(node_flags, NUM_STATES * NUM_NODES);
290 struct csi2_device csi2;
291 struct pisp_fe_device fe;
293 bool sensor_embedded_data;
296 unsigned int sequence;
300 static inline bool is_fe_enabled(struct cfe_device *cfe)
302 return cfe->fe_csi2_channel != -1;
305 static inline struct cfe_device *to_cfe_device(struct v4l2_device *v4l2_dev)
307 return container_of(v4l2_dev, struct cfe_device, v4l2_dev);
310 static inline u32 cfg_reg_read(struct cfe_device *cfe, u32 offset)
312 return readl(cfe->mipi_cfg_base + offset);
315 static inline void cfg_reg_write(struct cfe_device *cfe, u32 offset, u32 val)
317 writel(val, cfe->mipi_cfg_base + offset);
320 static bool check_state(struct cfe_device *cfe, unsigned long state,
321 unsigned int node_id)
325 for_each_set_bit(bit, &state, sizeof(state)) {
326 if (!test_bit(bit + (node_id * NUM_STATES), cfe->node_flags))
332 static void set_state(struct cfe_device *cfe, unsigned long state,
333 unsigned int node_id)
337 for_each_set_bit(bit, &state, sizeof(state))
338 set_bit(bit + (node_id * NUM_STATES), cfe->node_flags);
341 static void clear_state(struct cfe_device *cfe, unsigned long state,
342 unsigned int node_id)
346 for_each_set_bit(bit, &state, sizeof(state))
347 clear_bit(bit + (node_id * NUM_STATES), cfe->node_flags);
350 static bool test_any_node(struct cfe_device *cfe, unsigned long cond)
354 for (i = 0; i < NUM_NODES; i++) {
355 if (check_state(cfe, cond, i))
362 static bool test_all_nodes(struct cfe_device *cfe, unsigned long precond,
367 for (i = 0; i < NUM_NODES; i++) {
368 if (check_state(cfe, precond, i)) {
369 if (!check_state(cfe, cond, i))
377 static void clear_all_nodes(struct cfe_device *cfe, unsigned long precond,
382 for (i = 0; i < NUM_NODES; i++) {
383 if (check_state(cfe, precond, i))
384 clear_state(cfe, state, i);
388 static int mipi_cfg_regs_show(struct seq_file *s, void *data)
390 struct cfe_device *cfe = s->private;
393 ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
397 #define DUMP(reg) seq_printf(s, #reg " \t0x%08x\n", cfg_reg_read(cfe, reg))
405 pm_runtime_put(&cfe->pdev->dev);
410 static int format_show(struct seq_file *s, void *data)
412 struct cfe_device *cfe = s->private;
415 for (i = 0; i < NUM_NODES; i++) {
416 struct cfe_node *node = &cfe->node[i];
417 unsigned long sb, state = 0;
419 for (sb = 0; sb < NUM_STATES; sb++) {
420 if (check_state(cfe, BIT(sb), i))
424 seq_printf(s, "\nNode %u (%s) state: 0x%lx\n", i,
425 node_desc[i].name, state);
427 if (is_image_output_node(node))
428 seq_printf(s, "format: " V4L2_FOURCC_CONV " 0x%x\n"
429 "resolution: %ux%u\nbpl: %u\nsize: %u\n",
430 V4L2_FOURCC_CONV_ARGS(node->fmt.fmt.pix.pixelformat),
431 node->fmt.fmt.pix.pixelformat,
432 node->fmt.fmt.pix.width,
433 node->fmt.fmt.pix.height,
434 node->fmt.fmt.pix.bytesperline,
435 node->fmt.fmt.pix.sizeimage);
437 seq_printf(s, "format: " V4L2_FOURCC_CONV " 0x%x\nsize: %u\n",
438 V4L2_FOURCC_CONV_ARGS(node->fmt.fmt.meta.dataformat),
439 node->fmt.fmt.meta.dataformat,
440 node->fmt.fmt.meta.buffersize);
446 DEFINE_SHOW_ATTRIBUTE(mipi_cfg_regs);
447 DEFINE_SHOW_ATTRIBUTE(format);
449 /* Format setup functions */
450 const struct cfe_fmt *find_format_by_code(u32 code)
454 for (i = 0; i < ARRAY_SIZE(formats); i++) {
455 if (formats[i].code == code)
462 static const struct cfe_fmt *find_format_by_pix(u32 pixelformat)
466 for (i = 0; i < ARRAY_SIZE(formats); i++) {
467 if (formats[i].fourcc == pixelformat)
474 static int cfe_calc_format_size_bpl(struct cfe_device *cfe,
475 const struct cfe_fmt *fmt,
476 struct v4l2_format *f)
478 unsigned int min_bytesperline;
480 v4l_bound_align_image(&f->fmt.pix.width, MIN_WIDTH, MAX_WIDTH, 2,
481 &f->fmt.pix.height, MIN_HEIGHT, MAX_HEIGHT, 0, 0);
484 ALIGN((f->fmt.pix.width * fmt->depth) >> 3, BPL_ALIGNMENT);
486 if (f->fmt.pix.bytesperline > min_bytesperline &&
487 f->fmt.pix.bytesperline <= MAX_BYTESPERLINE)
488 f->fmt.pix.bytesperline =
489 ALIGN(f->fmt.pix.bytesperline, BPL_ALIGNMENT);
491 f->fmt.pix.bytesperline = min_bytesperline;
493 f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
495 cfe_dbg("%s: " V4L2_FOURCC_CONV " size: %ux%u bpl:%u img_size:%u\n",
496 __func__, V4L2_FOURCC_CONV_ARGS(f->fmt.pix.pixelformat),
497 f->fmt.pix.width, f->fmt.pix.height,
498 f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
503 static void cfe_schedule_next_csi2_job(struct cfe_device *cfe)
505 struct cfe_buffer *buf;
509 for (i = 0; i < CSI2_NUM_CHANNELS; i++) {
510 struct cfe_node *node = &cfe->node[i];
511 unsigned int stride, size;
513 if (!check_state(cfe, NODE_STREAMING, i))
516 buf = list_first_entry(&node->dma_queue, struct cfe_buffer,
518 node->next_frm = buf;
519 list_del(&buf->list);
521 cfe_dbg_verbose("%s: [%s] buffer:%p\n", __func__,
522 node_desc[node->id].name, &buf->vb.vb2_buf);
524 if (is_meta_node(node)) {
525 size = node->fmt.fmt.meta.buffersize;
528 size = node->fmt.fmt.pix.sizeimage;
529 stride = node->fmt.fmt.pix.bytesperline;
532 addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
533 csi2_set_buffer(&cfe->csi2, node->id, addr, stride, size);
537 static void cfe_schedule_next_pisp_job(struct cfe_device *cfe)
539 struct vb2_buffer *vb2_bufs[FE_NUM_PADS] = { 0 };
540 struct cfe_config_buffer *config_buf;
541 struct cfe_buffer *buf;
544 for (i = CSI2_NUM_CHANNELS; i < NUM_NODES; i++) {
545 struct cfe_node *node = &cfe->node[i];
547 if (!check_state(cfe, NODE_STREAMING, i))
550 buf = list_first_entry(&node->dma_queue, struct cfe_buffer,
553 cfe_dbg_verbose("%s: [%s] buffer:%p\n", __func__,
554 node_desc[node->id].name, &buf->vb.vb2_buf);
556 node->next_frm = buf;
557 vb2_bufs[node_desc[i].link_pad] = &buf->vb.vb2_buf;
558 list_del(&buf->list);
561 config_buf = to_cfe_config_buffer(cfe->node[FE_CONFIG].next_frm);
562 pisp_fe_submit_job(&cfe->fe, vb2_bufs, &config_buf->config);
565 static bool cfe_check_job_ready(struct cfe_device *cfe)
569 for (i = 0; i < NUM_NODES; i++) {
570 struct cfe_node *node = &cfe->node[i];
572 if (!check_state(cfe, NODE_ENABLED, i))
575 if (list_empty(&node->dma_queue)) {
576 cfe_dbg_verbose("%s: [%s] has no buffer, unable to schedule job\n",
577 __func__, node_desc[i].name);
585 static void cfe_prepare_next_job(struct cfe_device *cfe)
587 cfe->job_queued = true;
588 cfe_schedule_next_csi2_job(cfe);
589 if (is_fe_enabled(cfe))
590 cfe_schedule_next_pisp_job(cfe);
592 /* Flag if another job is ready after this. */
593 cfe->job_ready = cfe_check_job_ready(cfe);
595 cfe_dbg_verbose("%s: end with scheduled job\n", __func__);
598 static void cfe_process_buffer_complete(struct cfe_node *node,
599 unsigned int sequence)
601 struct cfe_device *cfe = node->cfe;
603 cfe_dbg_verbose("%s: [%s] buffer:%p\n", __func__,
604 node_desc[node->id].name, &node->cur_frm->vb.vb2_buf);
606 node->cur_frm->vb.sequence = sequence;
607 vb2_buffer_done(&node->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
610 static void cfe_queue_event_sof(struct cfe_node *node)
612 struct v4l2_event event = {
613 .type = V4L2_EVENT_FRAME_SYNC,
614 .u.frame_sync.frame_sequence = node->cfe->sequence,
617 v4l2_event_queue(&node->video_dev, &event);
620 static void cfe_sof_isr_handler(struct cfe_node *node)
622 struct cfe_device *cfe = node->cfe;
624 cfe_dbg_verbose("%s: [%s] seq %u\n", __func__, node_desc[node->id].name,
627 node->cur_frm = node->next_frm;
628 node->next_frm = NULL;
631 * If this is the first node to see a frame start, sample the
632 * timestamp to use for all frames across all channels.
634 if (!test_any_node(cfe, NODE_STREAMING | FS_INT))
635 cfe->ts = ktime_get_ns();
637 set_state(cfe, FS_INT, node->id);
639 /* If all nodes have seen a frame start, we can queue another job. */
640 if (test_all_nodes(cfe, NODE_STREAMING, FS_INT))
641 cfe->job_queued = false;
644 node->cur_frm->vb.vb2_buf.timestamp = cfe->ts;
646 if (is_image_output_node(node))
647 cfe_queue_event_sof(node);
650 static void cfe_eof_isr_handler(struct cfe_node *node)
652 struct cfe_device *cfe = node->cfe;
654 cfe_dbg_verbose("%s: [%s] seq %u\n", __func__, node_desc[node->id].name,
658 cfe_process_buffer_complete(node, cfe->sequence);
660 node->cur_frm = NULL;
661 set_state(cfe, FE_INT, node->id);
664 * If all nodes have seen a frame end, we can increment
665 * the sequence counter now.
667 if (test_all_nodes(cfe, NODE_STREAMING, FE_INT)) {
669 clear_all_nodes(cfe, NODE_STREAMING, FE_INT | FS_INT);
673 static irqreturn_t cfe_isr(int irq, void *dev)
675 struct cfe_device *cfe = dev;
677 bool sof[NUM_NODES] = {0}, eof[NUM_NODES] = {0}, lci[NUM_NODES] = {0};
680 sts = cfg_reg_read(cfe, MIPICFG_INTS);
682 if (sts & MIPICFG_INT_CSI_DMA)
683 csi2_isr(&cfe->csi2, sof, eof, lci);
685 if (sts & MIPICFG_INT_PISP_FE)
686 pisp_fe_isr(&cfe->fe, sof + CSI2_NUM_CHANNELS,
687 eof + CSI2_NUM_CHANNELS);
689 spin_lock(&cfe->state_lock);
691 for (i = 0; i < NUM_NODES; i++) {
692 struct cfe_node *node = &cfe->node[i];
695 * The check_state(NODE_STREAMING) is to ensure we do not loop
696 * over the CSI2_CHx nodes when the FE is active since they
697 * generate interrupts even though the node is not streaming.
699 if (!check_state(cfe, NODE_STREAMING, i) ||
700 !(sof[i] || eof[i] || lci[i]))
704 * There are 3 cases where we could get FS + FE_ACK at
706 * 1) FE of the current frame, and FS of the next frame.
707 * 2) FS + FE of the same frame.
708 * 3) FE of the current frame, and FS + FE of the next
709 * frame. To handle this, see the sof handler below.
711 * (1) is handled implicitly by the ordering of the FE and FS
716 * The condition below tests for (2). Run the FS handler
717 * first before the FE handler, both for the current
720 if (sof[i] && !check_state(cfe, FS_INT, i)) {
721 cfe_sof_isr_handler(node);
725 cfe_eof_isr_handler(node);
730 * The condition below tests for (3). In such cases, we
731 * come in here with FS flag set in the node state from
732 * the previous frame since it only gets cleared in
733 * eof_isr_handler(). Handle the FE for the previous
734 * frame first before the FS handler for the current
737 if (check_state(cfe, FS_INT, node->id)) {
738 cfe_dbg("%s: [%s] Handling missing previous FE interrupt\n",
739 __func__, node_desc[node->id].name);
740 cfe_eof_isr_handler(node);
743 cfe_sof_isr_handler(node);
746 if (!cfe->job_queued && cfe->job_ready)
747 cfe_prepare_next_job(cfe);
750 spin_unlock(&cfe->state_lock);
759 static void cfe_start_channel(struct cfe_node *node)
761 struct cfe_device *cfe = node->cfe;
762 struct v4l2_subdev_state *state;
763 struct v4l2_mbus_framefmt *source_fmt;
764 const struct cfe_fmt *fmt;
766 bool start_fe = is_fe_enabled(cfe) &&
767 test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING);
769 cfe_dbg("%s: [%s]\n", __func__, node_desc[node->id].name);
771 state = v4l2_subdev_lock_and_get_active_state(&cfe->csi2.sd);
774 unsigned int width, height;
776 WARN_ON(!is_fe_enabled(cfe));
777 cfe_dbg("%s: %s using csi2 channel %d\n",
778 __func__, node_desc[FE_OUT0].name,
779 cfe->fe_csi2_channel);
781 source_fmt = v4l2_subdev_get_pad_format(&cfe->csi2.sd, state, cfe->fe_csi2_channel);
782 fmt = find_format_by_code(source_fmt->code);
784 width = source_fmt->width;
785 height = source_fmt->height;
788 * Start the associated CSI2 Channel as well.
790 * Must write to the ADDR register to latch the ctrl values
791 * even if we are connected to the front end. Once running,
792 * this is handled by the CSI2 AUTO_ARM mode.
794 csi2_start_channel(&cfe->csi2, cfe->fe_csi2_channel,
795 fmt->csi_dt, CSI2_MODE_FE_STREAMING,
796 true, false, width, height);
797 csi2_set_buffer(&cfe->csi2, cfe->fe_csi2_channel, 0, 0, -1);
798 pisp_fe_start(&cfe->fe);
801 if (is_csi2_node(node)) {
802 unsigned int width = 0, height = 0;
804 u32 mode = CSI2_MODE_NORMAL;
806 source_fmt = v4l2_subdev_get_pad_format(&cfe->csi2.sd, state,
807 node_desc[node->id].link_pad - CSI2_NUM_CHANNELS);
808 fmt = find_format_by_code(source_fmt->code);
810 if (is_image_output_node(node)) {
811 width = source_fmt->width;
812 height = source_fmt->height;
814 if (node->fmt.fmt.pix.pixelformat ==
815 fmt->remap[CFE_REMAP_16BIT])
816 mode = CSI2_MODE_REMAP;
817 else if (node->fmt.fmt.pix.pixelformat ==
818 fmt->remap[CFE_REMAP_COMPRESSED]) {
819 mode = CSI2_MODE_COMPRESSED;
820 csi2_set_compression(&cfe->csi2, node->id,
821 CSI2_COMPRESSION_DELTA, 0,
825 /* Unconditionally start this CSI2 channel. */
826 csi2_start_channel(&cfe->csi2, node->id, fmt->csi_dt,
831 node->id == CSI2_CH1_EMBEDDED ? true : false,
835 v4l2_subdev_unlock_state(state);
837 spin_lock_irqsave(&cfe->state_lock, flags);
838 if (cfe->job_ready && test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING))
839 cfe_prepare_next_job(cfe);
840 spin_unlock_irqrestore(&cfe->state_lock, flags);
843 static void cfe_stop_channel(struct cfe_node *node, bool fe_stop)
845 struct cfe_device *cfe = node->cfe;
847 cfe_dbg("%s: [%s] fe_stop %u\n", __func__,
848 node_desc[node->id].name, fe_stop);
851 csi2_stop_channel(&cfe->csi2, cfe->fe_csi2_channel);
852 pisp_fe_stop(&cfe->fe);
855 if (is_csi2_node(node))
856 csi2_stop_channel(&cfe->csi2, node->id);
859 static void cfe_return_buffers(struct cfe_node *node,
860 enum vb2_buffer_state state)
862 struct cfe_device *cfe = node->cfe;
863 struct cfe_buffer *buf, *tmp;
866 cfe_dbg("%s: [%s]\n", __func__, node_desc[node->id].name);
868 spin_lock_irqsave(&cfe->state_lock, flags);
869 list_for_each_entry_safe(buf, tmp, &node->dma_queue, list) {
870 list_del(&buf->list);
871 vb2_buffer_done(&buf->vb.vb2_buf, state);
875 vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state);
876 if (node->next_frm && node->cur_frm != node->next_frm)
877 vb2_buffer_done(&node->next_frm->vb.vb2_buf, state);
879 node->cur_frm = NULL;
880 node->next_frm = NULL;
881 spin_unlock_irqrestore(&cfe->state_lock, flags);
888 static int cfe_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
889 unsigned int *nplanes, unsigned int sizes[],
890 struct device *alloc_devs[])
892 struct cfe_node *node = vb2_get_drv_priv(vq);
893 struct cfe_device *cfe = node->cfe;
894 unsigned int size = is_image_output_node(node) ?
895 node->fmt.fmt.pix.sizeimage :
896 node->fmt.fmt.meta.buffersize;
898 cfe_dbg("%s: [%s]\n", __func__, node_desc[node->id].name);
900 if (vq->num_buffers + *nbuffers < 3)
901 *nbuffers = 3 - vq->num_buffers;
904 if (sizes[0] < size) {
905 cfe_err("sizes[0] %i < size %u\n", sizes[0], size);
917 static int cfe_buffer_prepare(struct vb2_buffer *vb)
919 struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue);
920 struct cfe_device *cfe = node->cfe;
921 struct cfe_buffer *buf = to_cfe_buffer(vb);
924 cfe_dbg_verbose("%s: [%s] buffer:%p\n", __func__,
925 node_desc[node->id].name, vb);
927 size = is_image_output_node(node) ? node->fmt.fmt.pix.sizeimage :
928 node->fmt.fmt.meta.buffersize;
929 if (vb2_plane_size(vb, 0) < size) {
930 cfe_err("data will not fit into plane (%lu < %lu)\n",
931 vb2_plane_size(vb, 0), size);
935 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
937 if (node->id == FE_CONFIG) {
938 struct cfe_config_buffer *b = to_cfe_config_buffer(buf);
939 void *addr = vb2_plane_vaddr(vb, 0);
941 memcpy(&b->config, addr, sizeof(struct pisp_fe_config));
942 return pisp_fe_validate_config(&cfe->fe, &b->config,
943 &cfe->node[FE_OUT0].fmt,
944 &cfe->node[FE_OUT1].fmt);
950 static void cfe_buffer_queue(struct vb2_buffer *vb)
952 struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue);
953 struct cfe_device *cfe = node->cfe;
954 struct cfe_buffer *buf = to_cfe_buffer(vb);
957 cfe_dbg_verbose("%s: [%s] buffer:%p\n", __func__,
958 node_desc[node->id].name, vb);
960 spin_lock_irqsave(&cfe->state_lock, flags);
962 list_add_tail(&buf->list, &node->dma_queue);
965 cfe->job_ready = cfe_check_job_ready(cfe);
967 if (!cfe->job_queued && cfe->job_ready &&
968 test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING)) {
969 cfe_dbg("Preparing job immediately for channel %u\n",
971 cfe_prepare_next_job(cfe);
974 spin_unlock_irqrestore(&cfe->state_lock, flags);
977 static int cfe_start_streaming(struct vb2_queue *vq, unsigned int count)
979 struct v4l2_mbus_config mbus_config = { 0 };
980 struct cfe_node *node = vb2_get_drv_priv(vq);
981 struct cfe_device *cfe = node->cfe;
984 cfe_dbg("%s: [%s] begin.\n", __func__, node_desc[node->id].name);
986 if (!check_state(cfe, NODE_ENABLED, node->id)) {
987 cfe_err("%s node link is not enabled.\n",
988 node_desc[node->id].name);
992 ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
994 cfe_err("pm_runtime_resume_and_get failed\n");
998 ret = media_pipeline_start(&node->pad, &cfe->pipe);
1000 cfe_err("Failed to start media pipeline: %d\n", ret);
1004 clear_state(cfe, FS_INT | FE_INT, node->id);
1005 set_state(cfe, NODE_STREAMING, node->id);
1006 cfe_start_channel(node);
1008 if (!test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING)) {
1009 cfe_dbg("Not all nodes are set to streaming yet!\n");
1013 cfg_reg_write(cfe, MIPICFG_CFG, MIPICFG_CFG_SEL_CSI);
1014 cfg_reg_write(cfe, MIPICFG_INTE, MIPICFG_INT_CSI_DMA | MIPICFG_INT_PISP_FE);
1016 cfe->csi2.active_data_lanes = cfe->csi2.dphy.num_lanes;
1017 cfe_dbg("Running with %u data lanes\n", cfe->csi2.active_data_lanes);
1019 ret = v4l2_subdev_call(cfe->sensor, pad, get_mbus_config, 0,
1021 if (ret < 0 && ret != -ENOIOCTLCMD) {
1022 cfe_err("g_mbus_config failed\n");
1026 cfe->csi2.active_data_lanes = mbus_config.bus.mipi_csi2.num_data_lanes;
1027 if (!cfe->csi2.active_data_lanes)
1028 cfe->csi2.active_data_lanes = cfe->csi2.dphy.num_lanes;
1029 if (cfe->csi2.active_data_lanes > cfe->csi2.dphy.num_lanes) {
1030 cfe_err("Device has requested %u data lanes, which is >%u configured in DT\n",
1031 cfe->csi2.active_data_lanes, cfe->csi2.dphy.num_lanes);
1033 goto err_disable_cfe;
1036 cfe_dbg("Starting sensor streaming\n");
1038 csi2_open_rx(&cfe->csi2);
1041 ret = v4l2_subdev_call(cfe->sensor, video, s_stream, 1);
1043 cfe_err("stream on failed in subdev\n");
1044 goto err_disable_cfe;
1047 cfe_dbg("%s: [%s] end.\n", __func__, node_desc[node->id].name);
1052 csi2_close_rx(&cfe->csi2);
1053 cfe_stop_channel(node, true);
1054 media_pipeline_stop(&node->pad);
1056 pm_runtime_put(&cfe->pdev->dev);
1058 cfe_return_buffers(node, VB2_BUF_STATE_QUEUED);
1059 clear_state(cfe, NODE_STREAMING, node->id);
1064 static void cfe_stop_streaming(struct vb2_queue *vq)
1066 struct cfe_node *node = vb2_get_drv_priv(vq);
1067 struct cfe_device *cfe = node->cfe;
1068 unsigned long flags;
1071 cfe_dbg("%s: [%s] begin.\n", __func__, node_desc[node->id].name);
1073 spin_lock_irqsave(&cfe->state_lock, flags);
1074 fe_stop = is_fe_enabled(cfe) &&
1075 test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING);
1077 cfe->job_ready = false;
1078 clear_state(cfe, NODE_STREAMING, node->id);
1079 spin_unlock_irqrestore(&cfe->state_lock, flags);
1081 cfe_stop_channel(node, fe_stop);
1083 if (!test_any_node(cfe, NODE_STREAMING)) {
1084 /* Stop streaming the sensor and disable the peripheral. */
1085 if (v4l2_subdev_call(cfe->sensor, video, s_stream, 0) < 0)
1086 cfe_err("stream off failed in subdev\n");
1088 csi2_close_rx(&cfe->csi2);
1090 cfg_reg_write(cfe, MIPICFG_INTE, 0);
1093 media_pipeline_stop(&node->pad);
1095 /* Clear all queued buffers for the node */
1096 cfe_return_buffers(node, VB2_BUF_STATE_ERROR);
1098 pm_runtime_put(&cfe->pdev->dev);
1100 cfe_dbg("%s: [%s] end.\n", __func__, node_desc[node->id].name);
1103 static const struct vb2_ops cfe_video_qops = {
1104 .wait_prepare = vb2_ops_wait_prepare,
1105 .wait_finish = vb2_ops_wait_finish,
1106 .queue_setup = cfe_queue_setup,
1107 .buf_prepare = cfe_buffer_prepare,
1108 .buf_queue = cfe_buffer_queue,
1109 .start_streaming = cfe_start_streaming,
1110 .stop_streaming = cfe_stop_streaming,
1117 static int cfe_querycap(struct file *file, void *priv,
1118 struct v4l2_capability *cap)
1120 struct cfe_node *node = video_drvdata(file);
1121 struct cfe_device *cfe = node->cfe;
1123 strscpy(cap->driver, CFE_MODULE_NAME, sizeof(cap->driver));
1124 strscpy(cap->card, CFE_MODULE_NAME, sizeof(cap->card));
1126 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
1127 dev_name(&cfe->pdev->dev));
1129 cap->capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE |
1130 V4L2_CAP_META_OUTPUT;
1135 static int cfe_enum_fmt_vid_cap(struct file *file, void *priv,
1136 struct v4l2_fmtdesc *f)
1138 struct cfe_node *node = video_drvdata(file);
1139 struct cfe_device *cfe = node->cfe;
1142 if (!is_image_output_node(node))
1145 cfe_dbg("%s: [%s]\n", __func__, node_desc[node->id].name);
1147 for (i = 0, j = 0; i < ARRAY_SIZE(formats); i++) {
1148 if (f->mbus_code && formats[i].code != f->mbus_code)
1151 if (formats[i].flags & CFE_FORMAT_FLAG_META_OUT ||
1152 formats[i].flags & CFE_FORMAT_FLAG_META_CAP)
1155 if (is_fe_node(node) &&
1156 !(formats[i].flags & CFE_FORMAT_FLAG_FE_OUT))
1159 if (j == f->index) {
1160 f->pixelformat = formats[i].fourcc;
1161 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1170 static int cfe_g_fmt(struct file *file, void *priv,
1171 struct v4l2_format *f)
1173 struct cfe_node *node = video_drvdata(file);
1174 struct cfe_device *cfe = node->cfe;
1176 cfe_dbg("%s: [%s]\n", __func__, node_desc[node->id].name);
1178 if (f->type != node->buffer_queue.type)
1186 static int try_fmt_vid_cap(struct cfe_node *node, struct v4l2_format *f)
1188 struct cfe_device *cfe = node->cfe;
1189 const struct cfe_fmt *fmt;
1191 cfe_dbg("%s: [%s] %ux%u, V4L2 pix " V4L2_FOURCC_CONV "\n",
1192 __func__, node_desc[node->id].name,
1193 f->fmt.pix.width, f->fmt.pix.height,
1194 V4L2_FOURCC_CONV_ARGS(f->fmt.pix.pixelformat));
1196 if (!is_image_output_node(node))
1200 * Default to a format that works for both CSI2 and FE.
1202 fmt = find_format_by_pix(f->fmt.pix.pixelformat);
1204 fmt = find_format_by_code(MEDIA_BUS_FMT_SBGGR10_1X10);
1206 f->fmt.pix.pixelformat = fmt->fourcc;
1208 if (is_fe_node(node) && fmt->remap[CFE_REMAP_16BIT]) {
1209 f->fmt.pix.pixelformat = fmt->remap[CFE_REMAP_16BIT];
1210 fmt = find_format_by_pix(f->fmt.pix.pixelformat);
1213 f->fmt.pix.field = V4L2_FIELD_NONE;
1215 cfe_calc_format_size_bpl(cfe, fmt, f);
1220 static int cfe_s_fmt_vid_cap(struct file *file, void *priv,
1221 struct v4l2_format *f)
1223 struct cfe_node *node = video_drvdata(file);
1224 struct cfe_device *cfe = node->cfe;
1225 struct vb2_queue *q = &node->buffer_queue;
1228 cfe_dbg("%s: [%s]\n", __func__, node_desc[node->id].name);
1233 ret = try_fmt_vid_cap(node, f);
1239 cfe_dbg("%s: Set %ux%u, V4L2 pix " V4L2_FOURCC_CONV "\n", __func__,
1240 node->fmt.fmt.pix.width, node->fmt.fmt.pix.height,
1241 V4L2_FOURCC_CONV_ARGS(node->fmt.fmt.pix.pixelformat));
1246 static int cfe_try_fmt_vid_cap(struct file *file, void *priv,
1247 struct v4l2_format *f)
1249 struct cfe_node *node = video_drvdata(file);
1250 struct cfe_device *cfe = node->cfe;
1252 cfe_dbg("%s: [%s]\n", __func__, node_desc[node->id].name);
1254 return try_fmt_vid_cap(node, f);
1257 static int cfe_enum_fmt_meta(struct file *file, void *priv,
1258 struct v4l2_fmtdesc *f)
1260 struct cfe_node *node = video_drvdata(file);
1261 struct cfe_device *cfe = node->cfe;
1263 cfe_dbg("%s: [%s]\n", __func__, node_desc[node->id].name);
1265 if (!is_meta_node(node) || f->index != 0)
1269 case CSI2_CH1_EMBEDDED:
1270 f->pixelformat = V4L2_META_FMT_SENSOR_DATA;
1273 f->pixelformat = V4L2_META_FMT_RPI_FE_STATS;
1276 f->pixelformat = V4L2_META_FMT_RPI_FE_CFG;
1283 static int try_fmt_meta(struct cfe_node *node, struct v4l2_format *f)
1286 case CSI2_CH1_EMBEDDED:
1287 f->fmt.meta.dataformat = V4L2_META_FMT_SENSOR_DATA;
1288 if (!f->fmt.meta.buffersize)
1289 f->fmt.meta.buffersize = DEFAULT_EMBEDDED_SIZE;
1290 f->fmt.meta.buffersize =
1291 min_t(u32, f->fmt.meta.buffersize, MAX_BUFFER_SIZE);
1292 f->fmt.meta.buffersize =
1293 ALIGN(f->fmt.meta.buffersize, BPL_ALIGNMENT);
1296 f->fmt.meta.dataformat = V4L2_META_FMT_RPI_FE_STATS;
1297 f->fmt.meta.buffersize = sizeof(struct pisp_statistics);
1300 f->fmt.meta.dataformat = V4L2_META_FMT_RPI_FE_CFG;
1301 f->fmt.meta.buffersize = sizeof(struct pisp_fe_config);
1308 static int cfe_s_fmt_meta(struct file *file, void *priv, struct v4l2_format *f)
1310 struct cfe_node *node = video_drvdata(file);
1311 struct cfe_device *cfe = node->cfe;
1312 struct vb2_queue *q = &node->buffer_queue;
1315 cfe_dbg("%s: [%s]\n", __func__, node_desc[node->id].name);
1320 if (f->type != node->buffer_queue.type)
1323 ret = try_fmt_meta(node, f);
1329 cfe_dbg("%s: Set " V4L2_FOURCC_CONV "\n", __func__,
1330 V4L2_FOURCC_CONV_ARGS(node->fmt.fmt.meta.dataformat));
1335 static int cfe_try_fmt_meta(struct file *file, void *priv,
1336 struct v4l2_format *f)
1338 struct cfe_node *node = video_drvdata(file);
1339 struct cfe_device *cfe = node->cfe;
1341 cfe_dbg("%s: [%s]\n", __func__, node_desc[node->id].name);
1342 return try_fmt_meta(node, f);
1345 static int cfe_enum_framesizes(struct file *file, void *priv,
1346 struct v4l2_frmsizeenum *fsize)
1348 struct cfe_node *node = video_drvdata(file);
1349 struct cfe_device *cfe = node->cfe;
1350 const struct cfe_fmt *fmt;
1352 cfe_dbg("%s [%s]\n", __func__, node_desc[node->id].name);
1354 if (fsize->index > 0)
1357 /* check for valid format */
1358 fmt = find_format_by_pix(fsize->pixel_format);
1360 cfe_dbg("Invalid pixel code: %x\n", fsize->pixel_format);
1364 /* TODO: Do we have limits on the step_width? */
1366 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
1367 fsize->stepwise.min_width = MIN_WIDTH;
1368 fsize->stepwise.max_width = MAX_WIDTH;
1369 fsize->stepwise.step_width = 2;
1370 fsize->stepwise.min_height = MIN_HEIGHT;
1371 fsize->stepwise.max_height = MAX_HEIGHT;
1372 fsize->stepwise.step_height = 1;
1377 static int cfe_subscribe_event(struct v4l2_fh *fh,
1378 const struct v4l2_event_subscription *sub)
1380 struct cfe_node *node = video_get_drvdata(fh->vdev);
1382 switch (sub->type) {
1383 case V4L2_EVENT_FRAME_SYNC:
1384 if (!is_image_output_node(node))
1387 return v4l2_event_subscribe(fh, sub, 2, NULL);
1388 case V4L2_EVENT_SOURCE_CHANGE:
1389 if (is_meta_input_node(node))
1392 return v4l2_event_subscribe(fh, sub, 4, NULL);
1395 return v4l2_ctrl_subscribe_event(fh, sub);
1398 static const struct v4l2_ioctl_ops cfe_ioctl_ops = {
1399 .vidioc_querycap = cfe_querycap,
1400 .vidioc_enum_fmt_vid_cap = cfe_enum_fmt_vid_cap,
1401 .vidioc_g_fmt_vid_cap = cfe_g_fmt,
1402 .vidioc_s_fmt_vid_cap = cfe_s_fmt_vid_cap,
1403 .vidioc_try_fmt_vid_cap = cfe_try_fmt_vid_cap,
1405 .vidioc_enum_fmt_meta_cap = cfe_enum_fmt_meta,
1406 .vidioc_g_fmt_meta_cap = cfe_g_fmt,
1407 .vidioc_s_fmt_meta_cap = cfe_s_fmt_meta,
1408 .vidioc_try_fmt_meta_cap = cfe_try_fmt_meta,
1410 .vidioc_enum_fmt_meta_out = cfe_enum_fmt_meta,
1411 .vidioc_g_fmt_meta_out = cfe_g_fmt,
1412 .vidioc_s_fmt_meta_out = cfe_s_fmt_meta,
1413 .vidioc_try_fmt_meta_out = cfe_try_fmt_meta,
1415 .vidioc_enum_framesizes = cfe_enum_framesizes,
1417 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1418 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1419 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1420 .vidioc_querybuf = vb2_ioctl_querybuf,
1421 .vidioc_qbuf = vb2_ioctl_qbuf,
1422 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1423 .vidioc_expbuf = vb2_ioctl_expbuf,
1424 .vidioc_streamon = vb2_ioctl_streamon,
1425 .vidioc_streamoff = vb2_ioctl_streamoff,
1427 .vidioc_subscribe_event = cfe_subscribe_event,
1428 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1431 static void cfe_notify(struct v4l2_subdev *sd, unsigned int notification,
1434 struct cfe_device *cfe = to_cfe_device(sd->v4l2_dev);
1437 switch (notification) {
1438 case V4L2_DEVICE_NOTIFY_EVENT:
1439 for (i = 0; i < NUM_NODES; i++) {
1440 struct cfe_node *node = &cfe->node[i];
1442 if (check_state(cfe, NODE_REGISTERED, i))
1445 v4l2_event_queue(&node->video_dev, arg);
1453 /* cfe capture driver file operations */
1454 static const struct v4l2_file_operations cfe_fops = {
1455 .owner = THIS_MODULE,
1456 .open = v4l2_fh_open,
1457 .release = vb2_fop_release,
1458 .poll = vb2_fop_poll,
1459 .unlocked_ioctl = video_ioctl2,
1460 .mmap = vb2_fop_mmap,
1463 static int cfe_video_link_validate(struct media_link *link)
1465 struct video_device *vd = container_of(link->sink->entity,
1466 struct video_device, entity);
1467 struct cfe_node *node = container_of(vd, struct cfe_node, video_dev);
1468 struct cfe_device *cfe = node->cfe;
1469 struct v4l2_mbus_framefmt *source_fmt;
1470 struct v4l2_subdev_state *state;
1471 struct v4l2_subdev *source_sd;
1474 cfe_dbg("%s: [%s] link \"%s\":%u -> \"%s\":%u\n", __func__,
1475 node_desc[node->id].name,
1476 link->source->entity->name, link->source->index,
1477 link->sink->entity->name, link->sink->index);
1479 if (!media_entity_remote_source_pad_unique(link->sink->entity)) {
1480 cfe_err("video node %s pad not connected\n", vd->name);
1484 source_sd = media_entity_to_v4l2_subdev(link->source->entity);
1486 state = v4l2_subdev_lock_and_get_active_state(source_sd);
1488 source_fmt = v4l2_subdev_get_pad_format(source_sd, state,
1489 link->source->index);
1495 if (is_image_output_node(node)) {
1496 struct v4l2_pix_format *pix_fmt = &node->fmt.fmt.pix;
1497 const struct cfe_fmt *fmt;
1499 if (source_fmt->width != pix_fmt->width ||
1500 source_fmt->height != pix_fmt->height) {
1501 cfe_err("Wrong width or height %ux%u (remote pad set to %ux%u)\n",
1502 pix_fmt->width, pix_fmt->height,
1504 source_fmt->height);
1509 fmt = find_format_by_code(source_fmt->code);
1510 if (!fmt || fmt->fourcc != pix_fmt->pixelformat) {
1511 cfe_err("Format mismatch!\n");
1515 } else if (node->id == CSI2_CH1_EMBEDDED) {
1516 struct v4l2_meta_format *meta_fmt = &node->fmt.fmt.meta;
1518 if (source_fmt->width * source_fmt->height !=
1519 meta_fmt->buffersize ||
1520 source_fmt->code != MEDIA_BUS_FMT_SENSOR_DATA) {
1521 cfe_err("WARNING: Wrong metadata width/height/code %ux%u %08x (remote pad set to %ux%u %08x)\n",
1522 meta_fmt->buffersize, 1,
1523 MEDIA_BUS_FMT_SENSOR_DATA,
1527 /* TODO: this should throw an error eventually */
1532 v4l2_subdev_unlock_state(state);
1537 static const struct media_entity_operations cfe_media_entity_ops = {
1538 .link_validate = cfe_video_link_validate,
1541 static int cfe_video_link_notify(struct media_link *link, u32 flags,
1542 unsigned int notification)
1544 struct media_device *mdev = link->graph_obj.mdev;
1545 struct cfe_device *cfe = container_of(mdev, struct cfe_device, mdev);
1546 struct media_entity *fe = &cfe->fe.sd.entity;
1547 struct media_entity *csi2 = &cfe->csi2.sd.entity;
1548 unsigned long lock_flags;
1551 if (notification != MEDIA_DEV_NOTIFY_POST_LINK_CH)
1554 cfe_dbg("%s: %s[%u] -> %s[%u] 0x%x", __func__,
1555 link->source->entity->name, link->source->index,
1556 link->sink->entity->name, link->sink->index, flags);
1558 spin_lock_irqsave(&cfe->state_lock, lock_flags);
1560 for (i = 0; i < NUM_NODES; i++) {
1561 if (link->sink->entity != &cfe->node[i].video_dev.entity &&
1562 link->source->entity != &cfe->node[i].video_dev.entity)
1565 if (link->flags & MEDIA_LNK_FL_ENABLED)
1566 set_state(cfe, NODE_ENABLED, i);
1568 clear_state(cfe, NODE_ENABLED, i);
1573 spin_unlock_irqrestore(&cfe->state_lock, lock_flags);
1575 if (link->source->entity != csi2)
1577 if (link->sink->index != 0)
1579 if (link->source->index == node_desc[CSI2_CH1_EMBEDDED].link_pad)
1582 cfe->fe_csi2_channel = -1;
1583 if (link->sink->entity == fe && (link->flags & MEDIA_LNK_FL_ENABLED)) {
1584 if (link->source->index == node_desc[CSI2_CH0].link_pad)
1585 cfe->fe_csi2_channel = CSI2_CH0;
1586 else if (link->source->index == node_desc[CSI2_CH2].link_pad)
1587 cfe->fe_csi2_channel = CSI2_CH2;
1588 else if (link->source->index == node_desc[CSI2_CH3].link_pad)
1589 cfe->fe_csi2_channel = CSI2_CH3;
1592 if (is_fe_enabled(cfe))
1593 cfe_dbg("%s: Found CSI2:%d -> FE:0 link\n", __func__,
1594 cfe->fe_csi2_channel);
1596 cfe_dbg("%s: Unable to find CSI2:x -> FE:0 link\n", __func__);
1601 static const struct media_device_ops cfe_media_device_ops = {
1602 .link_notify = cfe_video_link_notify,
1605 static void cfe_release(struct kref *kref)
1607 struct cfe_device *cfe = container_of(kref, struct cfe_device, kref);
1609 media_device_cleanup(&cfe->mdev);
1614 static void cfe_put(struct cfe_device *cfe)
1616 kref_put(&cfe->kref, cfe_release);
1619 static void cfe_get(struct cfe_device *cfe)
1621 kref_get(&cfe->kref);
1624 static void cfe_node_release(struct video_device *vdev)
1626 struct cfe_node *node = video_get_drvdata(vdev);
1631 static int cfe_register_node(struct cfe_device *cfe, int id)
1633 struct video_device *vdev;
1634 const struct cfe_fmt *fmt;
1635 struct vb2_queue *q;
1636 struct cfe_node *node = &cfe->node[id];
1642 if (is_image_output_node(node)) {
1643 fmt = find_format_by_code(cfe_default_format.code);
1645 cfe_err("Failed to find format code\n");
1649 node->fmt.fmt.pix.pixelformat = fmt->fourcc;
1650 v4l2_fill_pix_format(&node->fmt.fmt.pix, &cfe_default_format);
1652 ret = try_fmt_vid_cap(node, &node->fmt);
1656 ret = try_fmt_meta(node, &node->fmt);
1660 node->fmt.type = node_desc[id].buf_type;
1662 mutex_init(&node->lock);
1664 q = &node->buffer_queue;
1665 q->type = node_desc[id].buf_type;
1666 q->io_modes = VB2_MMAP | VB2_DMABUF;
1668 q->ops = &cfe_video_qops;
1669 q->mem_ops = &vb2_dma_contig_memops;
1670 q->buf_struct_size = id == FE_CONFIG ? sizeof(struct cfe_config_buffer)
1671 : sizeof(struct cfe_buffer);
1672 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1673 q->lock = &node->lock;
1674 q->min_buffers_needed = 1;
1675 q->dev = &cfe->pdev->dev;
1677 ret = vb2_queue_init(q);
1679 cfe_err("vb2_queue_init() failed\n");
1683 INIT_LIST_HEAD(&node->dma_queue);
1685 vdev = &node->video_dev;
1686 vdev->release = cfe_node_release;
1687 vdev->fops = &cfe_fops;
1688 vdev->ioctl_ops = &cfe_ioctl_ops;
1689 vdev->entity.ops = &cfe_media_entity_ops;
1690 vdev->v4l2_dev = &cfe->v4l2_dev;
1691 vdev->vfl_dir = (is_image_output_node(node) || is_meta_output_node(node))
1692 ? VFL_DIR_RX : VFL_DIR_TX;
1694 vdev->lock = &node->lock;
1695 vdev->device_caps = node_desc[id].cap;
1696 vdev->device_caps |= V4L2_CAP_STREAMING | V4L2_CAP_IO_MC;
1698 /* Define the device names */
1699 snprintf(vdev->name, sizeof(vdev->name), "%s-%s", CFE_MODULE_NAME,
1700 node_desc[id].name);
1702 video_set_drvdata(vdev, node);
1703 if (node->id == FE_OUT0)
1704 vdev->entity.flags |= MEDIA_ENT_FL_DEFAULT;
1705 node->pad.flags = node_desc[id].pad_flags;
1706 media_entity_pads_init(&vdev->entity, 1, &node->pad);
1708 if (is_meta_node(node)) {
1709 v4l2_disable_ioctl(&node->video_dev,
1710 VIDIOC_ENUM_FRAMEINTERVALS);
1711 v4l2_disable_ioctl(&node->video_dev,
1712 VIDIOC_ENUM_FRAMESIZES);
1715 ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1717 cfe_err("Unable to register video device %s\n", vdev->name);
1721 cfe_info("Registered [%s] node id %d successfully as /dev/video%u\n",
1722 vdev->name, id, vdev->num);
1725 * Acquire a reference to cfe, which will be released when the video
1726 * device will be unregistered and userspace will have closed all open
1730 set_state(cfe, NODE_REGISTERED, id);
1735 static void cfe_unregister_nodes(struct cfe_device *cfe)
1739 for (i = 0; i < NUM_NODES; i++) {
1740 struct cfe_node *node = &cfe->node[i];
1742 if (check_state(cfe, NODE_REGISTERED, i)) {
1743 clear_state(cfe, NODE_REGISTERED, i);
1744 video_unregister_device(&node->video_dev);
1749 static int cfe_link_node_pads(struct cfe_device *cfe)
1754 for (i = 0; i < CSI2_NUM_CHANNELS; i++) {
1755 struct cfe_node *node = &cfe->node[i];
1757 if (!check_state(cfe, NODE_REGISTERED, i))
1760 if (i < cfe->sensor->entity.num_pads) {
1761 /* Sensor -> CSI2 */
1762 ret = media_create_pad_link(&cfe->sensor->entity, i,
1763 &cfe->csi2.sd.entity, i,
1764 MEDIA_LNK_FL_IMMUTABLE |
1765 MEDIA_LNK_FL_ENABLED);
1770 /* CSI2 channel # -> /dev/video# */
1771 ret = media_create_pad_link(&cfe->csi2.sd.entity,
1772 node_desc[i].link_pad,
1773 &node->video_dev.entity, 0, 0);
1777 if (node->id != CSI2_CH1_EMBEDDED) {
1778 /* CSI2 channel # -> FE Input */
1779 ret = media_create_pad_link(&cfe->csi2.sd.entity,
1780 node_desc[i].link_pad,
1788 for (; i < NUM_NODES; i++) {
1789 struct cfe_node *node = &cfe->node[i];
1790 struct media_entity *src, *dst;
1791 unsigned int src_pad, dst_pad;
1793 if (node_desc[i].pad_flags & MEDIA_PAD_FL_SINK) {
1794 /* FE -> /dev/video# */
1795 src = &cfe->fe.sd.entity;
1796 src_pad = node_desc[i].link_pad;
1797 dst = &node->video_dev.entity;
1800 /* /dev/video# -> FE */
1801 dst = &cfe->fe.sd.entity;
1802 dst_pad = node_desc[i].link_pad;
1803 src = &node->video_dev.entity;
1807 ret = media_create_pad_link(src, src_pad, dst, dst_pad, 0);
1815 static int cfe_probe_complete(struct cfe_device *cfe)
1820 cfe->v4l2_dev.notify = cfe_notify;
1822 cfe->sensor_embedded_data = (cfe->sensor->entity.num_pads >= 2);
1824 for (i = 0; i < NUM_NODES; i++) {
1825 ret = cfe_register_node(cfe, i);
1827 cfe_err("Unable to register video node %u.\n", i);
1832 ret = cfe_link_node_pads(cfe);
1834 cfe_err("Unable to link node pads.\n");
1838 ret = v4l2_device_register_subdev_nodes(&cfe->v4l2_dev);
1840 cfe_err("Unable to register subdev nodes.\n");
1847 cfe_unregister_nodes(cfe);
1851 static int cfe_async_bound(struct v4l2_async_notifier *notifier,
1852 struct v4l2_subdev *subdev,
1853 struct v4l2_async_connection *asd)
1855 struct cfe_device *cfe = to_cfe_device(notifier->v4l2_dev);
1858 cfe_info("Rejecting subdev %s (Already set!!)", subdev->name);
1862 cfe->sensor = subdev;
1863 cfe_info("Using sensor %s for capture\n", subdev->name);
1868 static int cfe_async_complete(struct v4l2_async_notifier *notifier)
1870 struct cfe_device *cfe = to_cfe_device(notifier->v4l2_dev);
1872 return cfe_probe_complete(cfe);
1875 static const struct v4l2_async_notifier_operations cfe_async_ops = {
1876 .bound = cfe_async_bound,
1877 .complete = cfe_async_complete,
1880 static int of_cfe_connect_subdevs(struct cfe_device *cfe)
1882 struct platform_device *pdev = cfe->pdev;
1883 struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
1884 struct device_node *node = pdev->dev.of_node;
1885 struct device_node *ep_node;
1886 struct device_node *sensor_node;
1890 /* Get the local endpoint and remote device. */
1891 ep_node = of_graph_get_next_endpoint(node, NULL);
1893 cfe_err("can't get next endpoint\n");
1897 cfe_dbg("ep_node is %pOF\n", ep_node);
1899 sensor_node = of_graph_get_remote_port_parent(ep_node);
1901 cfe_err("can't get remote parent\n");
1905 cfe_info("found subdevice %pOF\n", sensor_node);
1907 /* Parse the local endpoint and validate its configuration. */
1908 v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), &ep);
1910 cfe->csi2.multipacket_line =
1911 fwnode_property_present(of_fwnode_handle(ep_node),
1912 "multipacket-line");
1914 if (ep.bus_type != V4L2_MBUS_CSI2_DPHY) {
1915 cfe_err("endpoint node type != CSI2\n");
1919 for (lane = 0; lane < ep.bus.mipi_csi2.num_data_lanes; lane++) {
1920 if (ep.bus.mipi_csi2.data_lanes[lane] != lane + 1) {
1921 cfe_err("subdevice %pOF: data lanes reordering not supported\n",
1927 /* TODO: Get the frequency from devicetree */
1928 cfe->csi2.dphy.dphy_freq = 999;
1929 cfe->csi2.dphy.num_lanes = ep.bus.mipi_csi2.num_data_lanes;
1930 cfe->csi2.bus_flags = ep.bus.mipi_csi2.flags;
1932 cfe_dbg("subdevice %pOF: %u data lanes, flags=0x%08x, multipacket_line=%u\n",
1933 sensor_node, cfe->csi2.dphy.num_lanes, cfe->csi2.bus_flags,
1934 cfe->csi2.multipacket_line);
1936 /* Initialize and register the async notifier. */
1937 v4l2_async_nf_init(&cfe->notifier, &cfe->v4l2_dev);
1938 cfe->notifier.ops = &cfe_async_ops;
1940 cfe->asd = v4l2_async_nf_add_fwnode(&cfe->notifier,
1941 of_fwnode_handle(sensor_node),
1942 struct v4l2_async_connection);
1943 if (IS_ERR(cfe->asd)) {
1944 cfe_err("Error adding subdevice: %d\n", ret);
1948 ret = v4l2_async_nf_register(&cfe->notifier);
1950 cfe_err("Error registering async notifier: %d\n", ret);
1955 of_node_put(sensor_node);
1956 of_node_put(ep_node);
1961 static int cfe_probe(struct platform_device *pdev)
1963 struct cfe_device *cfe;
1964 char debugfs_name[32];
1967 cfe = kzalloc(sizeof(*cfe), GFP_KERNEL);
1971 platform_set_drvdata(pdev, cfe);
1973 kref_init(&cfe->kref);
1975 cfe->fe_csi2_channel = -1;
1976 spin_lock_init(&cfe->state_lock);
1978 cfe->csi2.base = devm_platform_ioremap_resource(pdev, 0);
1979 if (IS_ERR(cfe->csi2.base)) {
1980 dev_err(&pdev->dev, "Failed to get dma io block\n");
1981 ret = PTR_ERR(cfe->csi2.base);
1985 cfe->csi2.dphy.base = devm_platform_ioremap_resource(pdev, 1);
1986 if (IS_ERR(cfe->csi2.dphy.base)) {
1987 dev_err(&pdev->dev, "Failed to get host io block\n");
1988 ret = PTR_ERR(cfe->csi2.dphy.base);
1992 cfe->mipi_cfg_base = devm_platform_ioremap_resource(pdev, 2);
1993 if (IS_ERR(cfe->mipi_cfg_base)) {
1994 dev_err(&pdev->dev, "Failed to get mipi cfg io block\n");
1995 ret = PTR_ERR(cfe->mipi_cfg_base);
1999 cfe->fe.base = devm_platform_ioremap_resource(pdev, 3);
2000 if (IS_ERR(cfe->fe.base)) {
2001 dev_err(&pdev->dev, "Failed to get pisp fe io block\n");
2002 ret = PTR_ERR(cfe->fe.base);
2006 ret = platform_get_irq(pdev, 0);
2008 dev_err(&pdev->dev, "No IRQ resource\n");
2013 ret = devm_request_irq(&pdev->dev, ret, cfe_isr, 0, "rp1-cfe", cfe);
2015 dev_err(&pdev->dev, "Unable to request interrupt\n");
2020 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2022 dev_err(&pdev->dev, "DMA enable failed\n");
2026 /* TODO: Enable clock only when running. */
2027 cfe->clk = devm_clk_get(&pdev->dev, NULL);
2028 if (IS_ERR(cfe->clk))
2029 return dev_err_probe(&pdev->dev, PTR_ERR(cfe->clk),
2030 "clock not found\n");
2032 cfe->mdev.dev = &pdev->dev;
2033 cfe->mdev.ops = &cfe_media_device_ops;
2034 strscpy(cfe->mdev.model, CFE_MODULE_NAME, sizeof(cfe->mdev.model));
2035 strscpy(cfe->mdev.serial, "", sizeof(cfe->mdev.serial));
2036 snprintf(cfe->mdev.bus_info, sizeof(cfe->mdev.bus_info), "platform:%s",
2037 dev_name(&pdev->dev));
2039 media_device_init(&cfe->mdev);
2041 cfe->v4l2_dev.mdev = &cfe->mdev;
2043 ret = v4l2_device_register(&pdev->dev, &cfe->v4l2_dev);
2045 cfe_err("Unable to register v4l2 device.\n");
2049 snprintf(debugfs_name, sizeof(debugfs_name), "rp1-cfe:%s",
2050 dev_name(&pdev->dev));
2051 cfe->debugfs = debugfs_create_dir(debugfs_name, NULL);
2052 debugfs_create_file("format", 0444, cfe->debugfs, cfe, &format_fops);
2053 debugfs_create_file("regs", 0444, cfe->debugfs, cfe,
2054 &mipi_cfg_regs_fops);
2056 /* Enable the block power domain */
2057 pm_runtime_enable(&pdev->dev);
2059 ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
2061 goto err_runtime_disable;
2063 cfe->csi2.v4l2_dev = &cfe->v4l2_dev;
2064 ret = csi2_init(&cfe->csi2, cfe->debugfs);
2066 cfe_err("Failed to init csi2 (%d)\n", ret);
2067 goto err_runtime_put;
2070 cfe->fe.v4l2_dev = &cfe->v4l2_dev;
2071 ret = pisp_fe_init(&cfe->fe, cfe->debugfs);
2073 cfe_err("Failed to init pisp fe (%d)\n", ret);
2074 goto err_csi2_uninit;
2077 cfe->mdev.hw_revision = cfe->fe.hw_revision;
2078 ret = media_device_register(&cfe->mdev);
2080 cfe_err("Unable to register media-controller device.\n");
2081 goto err_pisp_fe_uninit;
2084 ret = of_cfe_connect_subdevs(cfe);
2086 cfe_err("Failed to connect subdevs\n");
2087 goto err_media_unregister;
2090 pm_runtime_put(&cfe->pdev->dev);
2094 err_media_unregister:
2095 media_device_unregister(&cfe->mdev);
2097 pisp_fe_uninit(&cfe->fe);
2099 csi2_uninit(&cfe->csi2);
2101 pm_runtime_put(&cfe->pdev->dev);
2102 err_runtime_disable:
2103 pm_runtime_disable(&pdev->dev);
2104 debugfs_remove(cfe->debugfs);
2105 v4l2_device_unregister(&cfe->v4l2_dev);
2112 static int cfe_remove(struct platform_device *pdev)
2114 struct cfe_device *cfe = platform_get_drvdata(pdev);
2116 debugfs_remove(cfe->debugfs);
2118 v4l2_async_nf_unregister(&cfe->notifier);
2119 media_device_unregister(&cfe->mdev);
2120 cfe_unregister_nodes(cfe);
2122 pisp_fe_uninit(&cfe->fe);
2123 csi2_uninit(&cfe->csi2);
2125 pm_runtime_disable(&pdev->dev);
2127 v4l2_device_unregister(&cfe->v4l2_dev);
2134 static int cfe_runtime_suspend(struct device *dev)
2136 struct platform_device *pdev = to_platform_device(dev);
2137 struct cfe_device *cfe = platform_get_drvdata(pdev);
2139 clk_disable_unprepare(cfe->clk);
2144 static int cfe_runtime_resume(struct device *dev)
2146 struct platform_device *pdev = to_platform_device(dev);
2147 struct cfe_device *cfe = platform_get_drvdata(pdev);
2150 ret = clk_prepare_enable(cfe->clk);
2152 dev_err(dev, "Unable to enable clock\n");
2159 static const struct dev_pm_ops cfe_pm_ops = {
2160 SET_RUNTIME_PM_OPS(cfe_runtime_suspend, cfe_runtime_resume, NULL)
2161 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
2164 static const struct of_device_id cfe_of_match[] = {
2165 { .compatible = "raspberrypi,rp1-cfe" },
2168 MODULE_DEVICE_TABLE(of, cfe_of_match);
2170 static struct platform_driver cfe_driver = {
2172 .remove = cfe_remove,
2174 .name = CFE_MODULE_NAME,
2175 .of_match_table = cfe_of_match,
2180 module_platform_driver(cfe_driver);
2182 MODULE_AUTHOR("Naushir Patuck <naush@raspberrypi.com>");
2183 MODULE_DESCRIPTION("RP1 Camera Front End driver");
2184 MODULE_LICENSE("GPL");
2185 MODULE_VERSION(CFE_VERSION);