1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017,2020 Intel Corporation
5 * Based partially on Intel IPU4 driver written by
6 * Sakari Ailus <sakari.ailus@linux.intel.com>
7 * Samu Onkalo <samu.onkalo@intel.com>
8 * Jouni Högander <jouni.hogander@intel.com>
9 * Jouni Ukkonen <jouni.ukkonen@intel.com>
10 * Antti Laakso <antti.laakso@intel.com>
14 #include <linux/bitops.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/pfn.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/property.h>
24 #include <linux/vmalloc.h>
25 #include <media/v4l2-ctrls.h>
26 #include <media/v4l2-device.h>
27 #include <media/v4l2-event.h>
28 #include <media/v4l2-fwnode.h>
29 #include <media/v4l2-ioctl.h>
30 #include <media/videobuf2-dma-sg.h>
32 #include "../ipu-bridge.h"
33 #include "ipu3-cio2.h"
35 struct ipu3_cio2_fmt {
43 * These are raw formats used in Intel's third generation of
44 * Image Processing Unit known as IPU3.
45 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
46 * last LSB 6 bits unused.
48 static const struct ipu3_cio2_fmt formats[] = {
49 { /* put default entry at beginning */
50 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
51 .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
55 .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
56 .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
60 .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
61 .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
65 .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
66 .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
70 .mbus_code = MEDIA_BUS_FMT_Y10_1X10,
71 .fourcc = V4L2_PIX_FMT_IPU3_Y10,
78 * cio2_find_format - lookup color format by fourcc or/and media bus code
79 * @pixelformat: fourcc to match, ignored if null
80 * @mbus_code: media bus code to match, ignored if null
82 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
87 for (i = 0; i < ARRAY_SIZE(formats); i++) {
88 if (pixelformat && *pixelformat != formats[i].fourcc)
90 if (mbus_code && *mbus_code != formats[i].mbus_code)
99 static inline u32 cio2_bytesperline(const unsigned int width)
102 * 64 bytes for every 50 pixels, the line length
103 * in bytes is multiple of 64 (line end alignment).
105 return DIV_ROUND_UP(width, 50) * 64;
108 /**************** FBPT operations ****************/
110 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
112 struct device *dev = &cio2->pci_dev->dev;
114 if (cio2->dummy_lop) {
115 dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
116 cio2->dummy_lop_bus_addr);
117 cio2->dummy_lop = NULL;
119 if (cio2->dummy_page) {
120 dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
121 cio2->dummy_page_bus_addr);
122 cio2->dummy_page = NULL;
126 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
128 struct device *dev = &cio2->pci_dev->dev;
131 cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
132 &cio2->dummy_page_bus_addr,
134 cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
135 &cio2->dummy_lop_bus_addr,
137 if (!cio2->dummy_page || !cio2->dummy_lop) {
138 cio2_fbpt_exit_dummy(cio2);
142 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
143 * Initialize each entry to dummy_page bus base address.
145 for (i = 0; i < CIO2_LOP_ENTRIES; i++)
146 cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
151 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
152 struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
155 * The CPU first initializes some fields in fbpt, then sets
156 * the VALID bit, this barrier is to ensure that the DMA(device)
157 * does not see the VALID bit enabled before other fields are
158 * initialized; otherwise it could lead to havoc.
163 * Request interrupts for start and completion
164 * Valid bit is applicable only to 1st entry
166 entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
167 CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
170 /* Initialize fpbt entries to point to dummy frame */
171 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
172 struct cio2_fbpt_entry
173 entry[CIO2_MAX_LOPS])
177 entry[0].first_entry.first_page_offset = 0;
178 entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
179 entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
181 for (i = 0; i < CIO2_MAX_LOPS; i++)
182 entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
184 cio2_fbpt_entry_enable(cio2, entry);
187 /* Initialize fpbt entries to point to a given buffer */
188 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
189 struct cio2_buffer *b,
190 struct cio2_fbpt_entry
191 entry[CIO2_MAX_LOPS])
193 struct vb2_buffer *vb = &b->vbb.vb2_buf;
194 unsigned int length = vb->planes[0].length;
197 entry[0].first_entry.first_page_offset = b->offset;
198 remaining = length + entry[0].first_entry.first_page_offset;
199 entry[1].second_entry.num_of_pages = PFN_UP(remaining);
201 * last_page_available_bytes has the offset of the last byte in the
202 * last page which is still accessible by DMA. DMA cannot access
203 * beyond this point. Valid range for this is from 0 to 4095.
204 * 0 indicates 1st byte in the page is DMA accessible.
205 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
206 * is available for DMA transfer.
208 remaining = offset_in_page(remaining) ?: PAGE_SIZE;
209 entry[1].second_entry.last_page_available_bytes = remaining - 1;
213 while (remaining > 0) {
214 entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
215 remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
221 * The first not meaningful FBPT entry should point to a valid LOP
223 entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
225 cio2_fbpt_entry_enable(cio2, entry);
228 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
230 struct device *dev = &cio2->pci_dev->dev;
232 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
240 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
242 dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
245 /**************** CSI2 hardware setup ****************/
248 * The CSI2 receiver has several parameters affecting
249 * the receiver timings. These depend on the MIPI bus frequency
250 * F in Hz (sensor transmitter rate) as follows:
251 * register value = (A/1e9 + B * UI) / COUNT_ACC
253 * UI = 1 / (2 * F) in seconds
254 * COUNT_ACC = counter accuracy in seconds
255 * For IPU3 COUNT_ACC = 0.0625
257 * A and B are coefficients from the table below,
258 * depending whether the register minimum or maximum value is
262 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
263 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
265 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
266 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
267 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
268 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
269 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
270 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
271 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
272 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
274 * We use the minimum values of both A and B.
278 * shift for keeping value range suitable for 32-bit integer arithmetic
280 #define LIMIT_SHIFT 8
282 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
284 const u32 accinv = 16; /* invert of counter resolution */
285 const u32 uiinv = 500000000; /* 1e9 / 2 */
288 freq >>= LIMIT_SHIFT;
290 if (WARN_ON(freq <= 0 || freq > S32_MAX))
293 * b could be 0, -2 or -8, so |accinv * b| is always
294 * less than (1 << ds) and thus |r| < 500000000.
296 r = accinv * b * (uiinv >> LIMIT_SHIFT);
298 /* max value of a is 95 */
304 /* Calculate the delay value for termination enable of clock lane HS Rx */
305 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
306 struct cio2_csi2_timing *timing,
307 unsigned int bpp, unsigned int lanes)
309 struct device *dev = &cio2->pci_dev->dev;
315 freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
317 dev_err(dev, "error %lld, invalid link_freq\n", freq);
321 timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
322 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
324 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
325 timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
326 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
328 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
329 timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
330 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
332 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
333 timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
334 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
336 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
338 dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
339 dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
340 dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
341 dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
346 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
348 static const int NUM_VCS = 4;
349 static const int SID; /* Stream id */
350 static const int ENTRY;
351 static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
352 CIO2_FBPT_SUBENTRY_UNIT);
353 const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
354 const struct ipu3_cio2_fmt *fmt;
355 void __iomem *const base = cio2->base;
356 u8 lanes, csi2bus = q->csi2.port;
357 u8 sensor_vc = SENSOR_VIR_CH_DFLT;
358 struct cio2_csi2_timing timing;
361 fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
365 lanes = q->csi2.lanes;
367 r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
371 writel(timing.clk_termen, q->csi_rx_base +
372 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
373 writel(timing.clk_settle, q->csi_rx_base +
374 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
376 for (i = 0; i < lanes; i++) {
377 writel(timing.dat_termen, q->csi_rx_base +
378 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
379 writel(timing.dat_settle, q->csi_rx_base +
380 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
383 writel(CIO2_PBM_WMCTRL1_MIN_2CK |
384 CIO2_PBM_WMCTRL1_MID1_2CK |
385 CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
386 writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
387 CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
388 CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
389 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
390 CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
391 CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
392 writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
393 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
394 CIO2_PBM_ARB_CTRL_LE_EN |
395 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
396 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
397 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
398 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
399 base + CIO2_REG_PBM_ARB_CTRL);
400 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
401 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
402 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
403 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
405 writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
406 writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
408 /* Configure MIPI backend */
409 for (i = 0; i < NUM_VCS; i++)
410 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
412 /* There are 16 short packet LUT entry */
413 for (i = 0; i < 16; i++)
414 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
415 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
416 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
417 q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
419 writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
420 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
421 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
422 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
423 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
424 writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
426 writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
427 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
428 base + CIO2_REG_INT_EN);
430 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
431 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
432 base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
433 writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
434 sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
435 fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
436 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
437 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
438 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
439 writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
441 writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
442 writel(CIO2_CGC_PRIM_TGE |
446 CIO2_CGC_CSI2_INTERFRAME_TGE |
447 CIO2_CGC_CSI2_PORT_DCGE |
452 CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
453 CIO2_CGC_CSI_CLKGATE_HOLDOFF
454 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
455 writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
456 writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
457 CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
458 CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
459 CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
460 base + CIO2_REG_LTRVAL01);
461 writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
462 CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
463 CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
464 CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
465 base + CIO2_REG_LTRVAL23);
467 for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
468 writel(0, base + CIO2_REG_CDMABA(i));
469 writel(0, base + CIO2_REG_CDMAC0(i));
470 writel(0, base + CIO2_REG_CDMAC1(i));
474 writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
476 writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
477 FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
478 CIO2_CDMAC0_DMA_INTR_ON_FE |
479 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
481 CIO2_CDMAC0_DMA_INTR_ON_FS |
482 CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
484 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
485 base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
487 writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
489 writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
490 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
491 CIO2_PXM_FRF_CFG_MSK_ECC_RE |
492 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
493 base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
495 /* Clear interrupts */
496 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
497 writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
498 writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
499 writel(~0, base + CIO2_REG_INT_STS);
501 /* Enable devices, starting from the last device in the pipe */
502 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
503 writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
508 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
510 struct device *dev = &cio2->pci_dev->dev;
511 void __iomem *const base = cio2->base;
516 /* Disable CSI receiver and MIPI backend devices */
517 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
518 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
519 writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
520 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
523 writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
524 ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
525 value, value & CIO2_CDMAC0_DMA_HALTED,
528 dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
530 for (i = 0; i < CIO2_NUM_PORTS; i++) {
531 writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
532 CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
533 writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
534 CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
538 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
540 struct device *dev = &cio2->pci_dev->dev;
541 struct cio2_queue *q = cio2->cur_queue;
542 struct cio2_fbpt_entry *entry;
543 u64 ns = ktime_get_ns();
545 if (dma_chan >= CIO2_QUEUES) {
546 dev_err(dev, "bad DMA channel %i\n", dma_chan);
550 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
551 if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
552 dev_warn(dev, "no ready buffers found on DMA channel %u\n",
557 /* Find out which buffer(s) are ready */
559 struct cio2_buffer *b;
561 b = q->bufs[q->bufs_first];
563 unsigned int received = entry[1].second_entry.num_of_bytes;
564 unsigned long payload =
565 vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
567 q->bufs[q->bufs_first] = NULL;
568 atomic_dec(&q->bufs_queued);
569 dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
571 b->vbb.vb2_buf.timestamp = ns;
572 b->vbb.field = V4L2_FIELD_NONE;
573 b->vbb.sequence = atomic_read(&q->frame_sequence);
574 if (payload != received)
576 "payload length is %lu, received %u\n",
578 vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
580 atomic_inc(&q->frame_sequence);
581 cio2_fbpt_entry_init_dummy(cio2, entry);
582 q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
583 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
584 } while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
587 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
590 * For the user space camera control algorithms it is essential
591 * to know when the reception of a frame has begun. That's often
592 * the best timing information to get from the hardware.
594 struct v4l2_event event = {
595 .type = V4L2_EVENT_FRAME_SYNC,
596 .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
599 v4l2_event_queue(q->subdev.devnode, &event);
602 static const char *const cio2_irq_errs[] = {
603 "single packet header error corrected",
604 "multiple packet header errors detected",
605 "payload checksum (CRC) error",
607 "reserved short packet data type detected",
608 "reserved long packet data type detected",
609 "incomplete long packet detected",
612 "DPHY start of transmission error",
613 "DPHY synchronization error",
615 "escape mode trigger event",
616 "escape mode ultra-low power state for data lane(s)",
617 "escape mode ultra-low power state exit for clock lane",
618 "inter-frame short packet discarded",
619 "inter-frame long packet discarded",
620 "non-matching Long Packet stalled",
623 static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
625 unsigned long csi2_status = status;
628 for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
629 dev_err(dev, "CSI-2 receiver port %i: %s\n",
630 port, cio2_irq_errs[i]);
632 if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
633 dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
637 static const char *const cio2_port_errs[] = {
639 "DPHY not recoverable",
640 "ECC not recoverable",
647 static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
649 unsigned long port_status = status;
652 for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
653 dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
656 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
658 struct device *dev = &cio2->pci_dev->dev;
659 void __iomem *const base = cio2->base;
661 if (int_status & CIO2_INT_IOOE) {
663 * Interrupt on Output Error:
664 * 1) SRAM is full and FS received, or
665 * 2) An invalid bit detected by DMA.
667 u32 oe_status, oe_clear;
669 oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
670 oe_status = oe_clear;
672 if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
673 dev_err(dev, "DMA output error: 0x%x\n",
674 (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
675 >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
676 oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
678 if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
679 dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
680 (oe_status & CIO2_INT_EXT_OE_OES_MASK)
681 >> CIO2_INT_EXT_OE_OES_SHIFT);
682 oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
684 writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
686 dev_warn(dev, "unknown interrupt 0x%x on OE\n",
688 int_status &= ~CIO2_INT_IOOE;
691 if (int_status & CIO2_INT_IOC_MASK) {
692 /* DMA IO done -- frame ready */
696 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
697 if (int_status & CIO2_INT_IOC(d)) {
698 clr |= CIO2_INT_IOC(d);
699 cio2_buffer_done(cio2, d);
704 if (int_status & CIO2_INT_IOS_IOLN_MASK) {
705 /* DMA IO starts or reached specified line */
709 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
710 if (int_status & CIO2_INT_IOS_IOLN(d)) {
711 clr |= CIO2_INT_IOS_IOLN(d);
712 if (d == CIO2_DMA_CHAN)
713 cio2_queue_event_sof(cio2,
719 if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
720 /* CSI2 receiver (error) interrupt */
724 ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
726 for (port = 0; port < CIO2_NUM_PORTS; port++) {
727 u32 port_status = (ie_status >> (port * 8)) & 0xff;
729 cio2_irq_log_port_errs(dev, port, port_status);
731 if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
732 void __iomem *csi_rx_base =
733 base + CIO2_REG_PIPE_BASE(port);
736 csi2_status = readl(csi_rx_base +
737 CIO2_REG_IRQCTRL_STATUS);
739 cio2_irq_log_irq_errs(dev, port, csi2_status);
742 csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
746 writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
748 int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
752 dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
755 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
757 struct cio2_device *cio2 = cio2_ptr;
758 void __iomem *const base = cio2->base;
759 struct device *dev = &cio2->pci_dev->dev;
762 int_status = readl(base + CIO2_REG_INT_STS);
763 dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
768 writel(int_status, base + CIO2_REG_INT_STS);
769 cio2_irq_handle_once(cio2, int_status);
770 int_status = readl(base + CIO2_REG_INT_STS);
772 dev_dbg(dev, "pending status 0x%x\n", int_status);
773 } while (int_status);
778 /**************** Videobuf2 interface ****************/
780 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
781 enum vb2_buffer_state state)
785 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
787 atomic_dec(&q->bufs_queued);
788 vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
795 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
796 unsigned int *num_buffers,
797 unsigned int *num_planes,
798 unsigned int sizes[],
799 struct device *alloc_devs[])
801 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
802 struct device *dev = &cio2->pci_dev->dev;
803 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
806 if (*num_planes && *num_planes < q->format.num_planes)
809 for (i = 0; i < q->format.num_planes; ++i) {
810 if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
812 sizes[i] = q->format.plane_fmt[i].sizeimage;
816 *num_planes = q->format.num_planes;
817 *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
819 /* Initialize buffer queue */
820 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
822 cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
824 atomic_set(&q->bufs_queued, 0);
831 /* Called after each buffer is allocated */
832 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
834 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
835 struct device *dev = &cio2->pci_dev->dev;
836 struct cio2_buffer *b = to_cio2_buffer(vb);
837 unsigned int pages = PFN_UP(vb->planes[0].length);
838 unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
840 struct sg_dma_page_iter sg_iter;
843 if (lops <= 0 || lops > CIO2_MAX_LOPS) {
844 dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
845 vb->planes[0].length);
846 return -ENOSPC; /* Should never happen */
849 memset(b->lop, 0, sizeof(b->lop));
850 /* Allocate LOP table */
851 for (i = 0; i < lops; i++) {
852 b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
853 &b->lop_bus_addr[i], GFP_KERNEL);
859 sg = vb2_dma_sg_plane_desc(vb, 0);
863 if (sg->nents && sg->sgl)
864 b->offset = sg->sgl->offset;
867 for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
870 b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
872 if (j == CIO2_LOP_ENTRIES) {
878 b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
882 dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
886 /* Transfer buffer ownership to cio2 */
887 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
889 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
890 struct device *dev = &cio2->pci_dev->dev;
891 struct cio2_queue *q =
892 container_of(vb->vb2_queue, struct cio2_queue, vbq);
893 struct cio2_buffer *b = to_cio2_buffer(vb);
894 struct cio2_fbpt_entry *entry;
896 unsigned int i, j, next = q->bufs_next;
897 int bufs_queued = atomic_inc_return(&q->bufs_queued);
900 dev_dbg(dev, "queue buffer %d\n", vb->index);
903 * This code queues the buffer to the CIO2 DMA engine, which starts
904 * running once streaming has started. It is possible that this code
905 * gets pre-empted due to increased CPU load. Upon this, the driver
906 * does not get an opportunity to queue new buffers to the CIO2 DMA
907 * engine. When the DMA engine encounters an FBPT entry without the
908 * VALID bit set, the DMA engine halts, which requires a restart of
909 * the DMA engine and sensor, to continue streaming.
910 * This is not desired and is highly unlikely given that there are
911 * 32 FBPT entries that the DMA engine needs to process, to run into
912 * an FBPT entry, without the VALID bit set. We try to mitigate this
913 * by disabling interrupts for the duration of this queueing.
915 local_irq_save(flags);
917 fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
918 >> CIO2_CDMARI_FBPT_RP_SHIFT)
919 & CIO2_CDMARI_FBPT_RP_MASK;
922 * fbpt_rp is the fbpt entry that the dma is currently working
923 * on, but since it could jump to next entry at any time,
924 * assume that we might already be there.
926 fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
928 if (bufs_queued <= 1 || fbpt_rp == next)
929 /* Buffers were drained */
930 next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
932 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
934 * We have allocated CIO2_MAX_BUFFERS circularly for the
935 * hw, the user has requested N buffer queue. The driver
936 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
937 * user queues a buffer, there necessarily is a free buffer.
939 if (!q->bufs[next]) {
941 entry = &q->fbpt[next * CIO2_MAX_LOPS];
942 cio2_fbpt_entry_init_buf(cio2, b, entry);
943 local_irq_restore(flags);
944 q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
945 for (j = 0; j < vb->num_planes; j++)
946 vb2_set_plane_payload(vb, j,
947 q->format.plane_fmt[j].sizeimage);
951 dev_dbg(dev, "entry %i was full!\n", next);
952 next = (next + 1) % CIO2_MAX_BUFFERS;
955 local_irq_restore(flags);
956 dev_err(dev, "error: all cio2 entries were full!\n");
957 atomic_dec(&q->bufs_queued);
958 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
961 /* Called when each buffer is freed */
962 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
964 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
965 struct device *dev = &cio2->pci_dev->dev;
966 struct cio2_buffer *b = to_cio2_buffer(vb);
970 for (i = 0; i < CIO2_MAX_LOPS; i++) {
972 dma_free_coherent(dev, PAGE_SIZE,
973 b->lop[i], b->lop_bus_addr[i]);
977 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
979 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
980 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
981 struct device *dev = &cio2->pci_dev->dev;
985 atomic_set(&q->frame_sequence, 0);
987 r = pm_runtime_resume_and_get(dev);
989 dev_info(dev, "failed to set power %d\n", r);
993 r = video_device_pipeline_start(&q->vdev, &q->pipe);
997 r = cio2_hw_init(cio2, q);
1001 /* Start streaming on sensor */
1002 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1004 goto fail_csi2_subdev;
1006 cio2->streaming = true;
1011 cio2_hw_exit(cio2, q);
1013 video_device_pipeline_stop(&q->vdev);
1015 dev_dbg(dev, "failed to start streaming (%d)\n", r);
1016 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1017 pm_runtime_put(dev);
1022 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1024 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1025 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1026 struct device *dev = &cio2->pci_dev->dev;
1028 if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1029 dev_err(dev, "failed to stop sensor streaming\n");
1031 cio2_hw_exit(cio2, q);
1032 synchronize_irq(cio2->pci_dev->irq);
1033 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1034 video_device_pipeline_stop(&q->vdev);
1035 pm_runtime_put(dev);
1036 cio2->streaming = false;
1039 static const struct vb2_ops cio2_vb2_ops = {
1040 .buf_init = cio2_vb2_buf_init,
1041 .buf_queue = cio2_vb2_buf_queue,
1042 .buf_cleanup = cio2_vb2_buf_cleanup,
1043 .queue_setup = cio2_vb2_queue_setup,
1044 .start_streaming = cio2_vb2_start_streaming,
1045 .stop_streaming = cio2_vb2_stop_streaming,
1046 .wait_prepare = vb2_ops_wait_prepare,
1047 .wait_finish = vb2_ops_wait_finish,
1050 /**************** V4L2 interface ****************/
1052 static int cio2_v4l2_querycap(struct file *file, void *fh,
1053 struct v4l2_capability *cap)
1055 strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1056 strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1061 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1062 struct v4l2_fmtdesc *f)
1064 if (f->index >= ARRAY_SIZE(formats))
1067 f->pixelformat = formats[f->index].fourcc;
1072 /* The format is validated in cio2_video_link_validate() */
1073 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1075 struct cio2_queue *q = file_to_cio2_queue(file);
1077 f->fmt.pix_mp = q->format;
1082 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1084 const struct ipu3_cio2_fmt *fmt;
1085 struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1087 fmt = cio2_find_format(&mpix->pixelformat, NULL);
1091 /* Only supports up to 4224x3136 */
1092 if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1093 mpix->width = CIO2_IMAGE_MAX_WIDTH;
1094 if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
1095 mpix->height = CIO2_IMAGE_MAX_HEIGHT;
1097 mpix->num_planes = 1;
1098 mpix->pixelformat = fmt->fourcc;
1099 mpix->colorspace = V4L2_COLORSPACE_RAW;
1100 mpix->field = V4L2_FIELD_NONE;
1101 mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1102 mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1106 mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1107 mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1108 mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1113 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1115 struct cio2_queue *q = file_to_cio2_queue(file);
1117 cio2_v4l2_try_fmt(file, fh, f);
1118 q->format = f->fmt.pix_mp;
1124 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1126 if (input->index > 0)
1129 strscpy(input->name, "camera", sizeof(input->name));
1130 input->type = V4L2_INPUT_TYPE_CAMERA;
1136 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1144 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1146 return input == 0 ? 0 : -EINVAL;
1149 static const struct v4l2_file_operations cio2_v4l2_fops = {
1150 .owner = THIS_MODULE,
1151 .unlocked_ioctl = video_ioctl2,
1152 .open = v4l2_fh_open,
1153 .release = vb2_fop_release,
1154 .poll = vb2_fop_poll,
1155 .mmap = vb2_fop_mmap,
1158 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1159 .vidioc_querycap = cio2_v4l2_querycap,
1160 .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1161 .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1162 .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1163 .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1164 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1165 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1166 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1167 .vidioc_querybuf = vb2_ioctl_querybuf,
1168 .vidioc_qbuf = vb2_ioctl_qbuf,
1169 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1170 .vidioc_streamon = vb2_ioctl_streamon,
1171 .vidioc_streamoff = vb2_ioctl_streamoff,
1172 .vidioc_expbuf = vb2_ioctl_expbuf,
1173 .vidioc_enum_input = cio2_video_enum_input,
1174 .vidioc_g_input = cio2_video_g_input,
1175 .vidioc_s_input = cio2_video_s_input,
1178 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1180 struct v4l2_event_subscription *sub)
1182 if (sub->type != V4L2_EVENT_FRAME_SYNC)
1185 /* Line number. For now only zero accepted. */
1189 return v4l2_event_subscribe(fh, sub, 0, NULL);
1192 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1194 struct v4l2_mbus_framefmt *format;
1195 const struct v4l2_mbus_framefmt fmt_default = {
1198 .code = formats[0].mbus_code,
1199 .field = V4L2_FIELD_NONE,
1200 .colorspace = V4L2_COLORSPACE_RAW,
1201 .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1202 .quantization = V4L2_QUANTIZATION_DEFAULT,
1203 .xfer_func = V4L2_XFER_FUNC_DEFAULT,
1206 /* Initialize try_fmt */
1207 format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SINK);
1208 *format = fmt_default;
1211 format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SOURCE);
1212 *format = fmt_default;
1218 * cio2_subdev_get_fmt - Handle get format by pads subdev method
1219 * @sd : pointer to v4l2 subdev structure
1220 * @cfg: V4L2 subdev pad config
1221 * @fmt: pointer to v4l2 subdev format structure
1222 * return -EINVAL or zero on success
1224 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1225 struct v4l2_subdev_state *sd_state,
1226 struct v4l2_subdev_format *fmt)
1228 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1230 mutex_lock(&q->subdev_lock);
1232 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1233 fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
1236 fmt->format = q->subdev_fmt;
1238 mutex_unlock(&q->subdev_lock);
1244 * cio2_subdev_set_fmt - Handle set format by pads subdev method
1245 * @sd : pointer to v4l2 subdev structure
1246 * @cfg: V4L2 subdev pad config
1247 * @fmt: pointer to v4l2 subdev format structure
1248 * return -EINVAL or zero on success
1250 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1251 struct v4l2_subdev_state *sd_state,
1252 struct v4l2_subdev_format *fmt)
1254 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1255 struct v4l2_mbus_framefmt *mbus;
1256 u32 mbus_code = fmt->format.code;
1260 * Only allow setting sink pad format;
1261 * source always propagates from sink
1263 if (fmt->pad == CIO2_PAD_SOURCE)
1264 return cio2_subdev_get_fmt(sd, sd_state, fmt);
1266 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1267 mbus = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
1269 mbus = &q->subdev_fmt;
1271 fmt->format.code = formats[0].mbus_code;
1273 for (i = 0; i < ARRAY_SIZE(formats); i++) {
1274 if (formats[i].mbus_code == mbus_code) {
1275 fmt->format.code = mbus_code;
1280 fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
1281 fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
1282 fmt->format.field = V4L2_FIELD_NONE;
1284 mutex_lock(&q->subdev_lock);
1285 *mbus = fmt->format;
1286 mutex_unlock(&q->subdev_lock);
1291 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1292 struct v4l2_subdev_state *sd_state,
1293 struct v4l2_subdev_mbus_code_enum *code)
1295 if (code->index >= ARRAY_SIZE(formats))
1298 code->code = formats[code->index].mbus_code;
1302 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1303 struct v4l2_subdev_format *fmt)
1305 if (is_media_entity_v4l2_subdev(pad->entity)) {
1306 struct v4l2_subdev *sd =
1307 media_entity_to_v4l2_subdev(pad->entity);
1309 memset(fmt, 0, sizeof(*fmt));
1310 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1311 fmt->pad = pad->index;
1312 return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1318 static int cio2_video_link_validate(struct media_link *link)
1320 struct media_entity *entity = link->sink->entity;
1321 struct video_device *vd = media_entity_to_video_device(entity);
1322 struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1323 struct cio2_device *cio2 = video_get_drvdata(vd);
1324 struct device *dev = &cio2->pci_dev->dev;
1325 struct v4l2_subdev_format source_fmt;
1328 if (!media_pad_remote_pad_first(entity->pads)) {
1329 dev_info(dev, "video node %s pad not connected\n", vd->name);
1333 ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1337 if (source_fmt.format.width != q->format.width ||
1338 source_fmt.format.height != q->format.height) {
1339 dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
1340 q->format.width, q->format.height,
1341 source_fmt.format.width, source_fmt.format.height);
1345 if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1351 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1352 .subscribe_event = cio2_subdev_subscribe_event,
1353 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
1356 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1357 .open = cio2_subdev_open,
1360 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1361 .link_validate = v4l2_subdev_link_validate_default,
1362 .get_fmt = cio2_subdev_get_fmt,
1363 .set_fmt = cio2_subdev_set_fmt,
1364 .enum_mbus_code = cio2_subdev_enum_mbus_code,
1367 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1368 .core = &cio2_subdev_core_ops,
1369 .pad = &cio2_subdev_pad_ops,
1372 /******* V4L2 sub-device asynchronous registration callbacks***********/
1374 struct sensor_async_subdev {
1375 struct v4l2_async_connection asd;
1376 struct csi2_bus_info csi2;
1379 #define to_sensor_asd(__asd) \
1380 container_of_const(__asd, struct sensor_async_subdev, asd)
1382 /* The .bound() notifier callback when a match is found */
1383 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1384 struct v4l2_subdev *sd,
1385 struct v4l2_async_connection *asd)
1387 struct cio2_device *cio2 = to_cio2_device(notifier);
1388 struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1389 struct cio2_queue *q;
1391 if (cio2->queue[s_asd->csi2.port].sensor)
1394 q = &cio2->queue[s_asd->csi2.port];
1396 q->csi2 = s_asd->csi2;
1398 q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1403 /* The .unbind callback */
1404 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1405 struct v4l2_subdev *sd,
1406 struct v4l2_async_connection *asd)
1408 struct cio2_device *cio2 = to_cio2_device(notifier);
1409 struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1411 cio2->queue[s_asd->csi2.port].sensor = NULL;
1414 /* .complete() is called after all subdevices have been located */
1415 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1417 struct cio2_device *cio2 = to_cio2_device(notifier);
1418 struct device *dev = &cio2->pci_dev->dev;
1419 struct sensor_async_subdev *s_asd;
1420 struct v4l2_async_connection *asd;
1421 struct cio2_queue *q;
1424 list_for_each_entry(asd, &cio2->notifier.done_list, asc_entry) {
1425 s_asd = to_sensor_asd(asd);
1426 q = &cio2->queue[s_asd->csi2.port];
1428 ret = media_entity_get_fwnode_pad(&q->sensor->entity,
1429 s_asd->asd.match.fwnode,
1430 MEDIA_PAD_FL_SOURCE);
1432 dev_err(dev, "no pad for endpoint %pfw (%d)\n",
1433 s_asd->asd.match.fwnode, ret);
1437 ret = media_create_pad_link(&q->sensor->entity, ret,
1438 &q->subdev.entity, CIO2_PAD_SINK,
1441 dev_err(dev, "failed to create link for %s (endpoint %pfw, error %d)\n",
1442 q->sensor->name, s_asd->asd.match.fwnode, ret);
1447 return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1450 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1451 .bound = cio2_notifier_bound,
1452 .unbind = cio2_notifier_unbind,
1453 .complete = cio2_notifier_complete,
1456 static int cio2_parse_firmware(struct cio2_device *cio2)
1458 struct device *dev = &cio2->pci_dev->dev;
1462 for (i = 0; i < CIO2_NUM_PORTS; i++) {
1463 struct v4l2_fwnode_endpoint vep = {
1464 .bus_type = V4L2_MBUS_CSI2_DPHY
1466 struct sensor_async_subdev *s_asd;
1467 struct fwnode_handle *ep;
1469 ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
1470 FWNODE_GRAPH_ENDPOINT_NEXT);
1474 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1478 s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
1480 sensor_async_subdev);
1481 if (IS_ERR(s_asd)) {
1482 ret = PTR_ERR(s_asd);
1486 s_asd->csi2.port = vep.base.port;
1487 s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1489 fwnode_handle_put(ep);
1494 fwnode_handle_put(ep);
1499 * Proceed even without sensors connected to allow the device to
1502 cio2->notifier.ops = &cio2_async_ops;
1503 ret = v4l2_async_nf_register(&cio2->notifier);
1505 dev_err(dev, "failed to register async notifier : %d\n", ret);
1510 /**************** Queue initialization ****************/
1511 static const struct media_entity_operations cio2_media_ops = {
1512 .link_validate = v4l2_subdev_link_validate,
1515 static const struct media_entity_operations cio2_video_entity_ops = {
1516 .link_validate = cio2_video_link_validate,
1519 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1521 static const u32 default_width = 1936;
1522 static const u32 default_height = 1096;
1523 const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1524 struct device *dev = &cio2->pci_dev->dev;
1525 struct video_device *vdev = &q->vdev;
1526 struct vb2_queue *vbq = &q->vbq;
1527 struct v4l2_subdev *subdev = &q->subdev;
1528 struct v4l2_mbus_framefmt *fmt;
1531 /* Initialize miscellaneous variables */
1532 mutex_init(&q->lock);
1533 mutex_init(&q->subdev_lock);
1535 /* Initialize formats to default values */
1536 fmt = &q->subdev_fmt;
1537 fmt->width = default_width;
1538 fmt->height = default_height;
1539 fmt->code = dflt_fmt.mbus_code;
1540 fmt->field = V4L2_FIELD_NONE;
1542 q->format.width = default_width;
1543 q->format.height = default_height;
1544 q->format.pixelformat = dflt_fmt.fourcc;
1545 q->format.colorspace = V4L2_COLORSPACE_RAW;
1546 q->format.field = V4L2_FIELD_NONE;
1547 q->format.num_planes = 1;
1548 q->format.plane_fmt[0].bytesperline =
1549 cio2_bytesperline(q->format.width);
1550 q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1553 /* Initialize fbpt */
1554 r = cio2_fbpt_init(cio2, q);
1558 /* Initialize media entities */
1559 q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1560 MEDIA_PAD_FL_MUST_CONNECT;
1561 q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1562 subdev->entity.ops = &cio2_media_ops;
1563 subdev->internal_ops = &cio2_subdev_internal_ops;
1564 r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1566 dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
1567 goto fail_subdev_media_entity;
1570 q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1571 vdev->entity.ops = &cio2_video_entity_ops;
1572 r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1574 dev_err(dev, "failed initialize videodev media entity (%d)\n",
1576 goto fail_vdev_media_entity;
1579 /* Initialize subdev */
1580 v4l2_subdev_init(subdev, &cio2_subdev_ops);
1581 subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1582 subdev->owner = THIS_MODULE;
1583 snprintf(subdev->name, sizeof(subdev->name),
1584 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1585 subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1586 v4l2_set_subdevdata(subdev, cio2);
1587 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1589 dev_err(dev, "failed initialize subdev (%d)\n", r);
1593 /* Initialize vbq */
1594 vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1595 vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1596 vbq->ops = &cio2_vb2_ops;
1597 vbq->mem_ops = &vb2_dma_sg_memops;
1598 vbq->buf_struct_size = sizeof(struct cio2_buffer);
1599 vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1600 vbq->min_buffers_needed = 1;
1601 vbq->drv_priv = cio2;
1602 vbq->lock = &q->lock;
1603 r = vb2_queue_init(vbq);
1605 dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
1609 /* Initialize vdev */
1610 snprintf(vdev->name, sizeof(vdev->name),
1611 "%s %td", CIO2_NAME, q - cio2->queue);
1612 vdev->release = video_device_release_empty;
1613 vdev->fops = &cio2_v4l2_fops;
1614 vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1615 vdev->lock = &cio2->lock;
1616 vdev->v4l2_dev = &cio2->v4l2_dev;
1617 vdev->queue = &q->vbq;
1618 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1619 video_set_drvdata(vdev, cio2);
1620 r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1622 dev_err(dev, "failed to register video device (%d)\n", r);
1626 /* Create link from CIO2 subdev to output node */
1627 r = media_create_pad_link(
1628 &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1629 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1636 vb2_video_unregister_device(&q->vdev);
1638 v4l2_device_unregister_subdev(subdev);
1640 media_entity_cleanup(&vdev->entity);
1641 fail_vdev_media_entity:
1642 media_entity_cleanup(&subdev->entity);
1643 fail_subdev_media_entity:
1644 cio2_fbpt_exit(q, dev);
1646 mutex_destroy(&q->subdev_lock);
1647 mutex_destroy(&q->lock);
1652 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1654 vb2_video_unregister_device(&q->vdev);
1655 media_entity_cleanup(&q->vdev.entity);
1656 v4l2_device_unregister_subdev(&q->subdev);
1657 media_entity_cleanup(&q->subdev.entity);
1658 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1659 mutex_destroy(&q->subdev_lock);
1660 mutex_destroy(&q->lock);
1663 static int cio2_queues_init(struct cio2_device *cio2)
1667 for (i = 0; i < CIO2_QUEUES; i++) {
1668 r = cio2_queue_init(cio2, &cio2->queue[i]);
1673 if (i == CIO2_QUEUES)
1676 for (i--; i >= 0; i--)
1677 cio2_queue_exit(cio2, &cio2->queue[i]);
1682 static void cio2_queues_exit(struct cio2_device *cio2)
1686 for (i = 0; i < CIO2_QUEUES; i++)
1687 cio2_queue_exit(cio2, &cio2->queue[i]);
1690 static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
1692 struct fwnode_handle *endpoint;
1694 if (IS_ERR_OR_NULL(fwnode))
1697 endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
1699 fwnode_handle_put(endpoint);
1703 return cio2_check_fwnode_graph(fwnode->secondary);
1706 /**************** PCI interface ****************/
1708 static int cio2_pci_probe(struct pci_dev *pci_dev,
1709 const struct pci_device_id *id)
1711 struct device *dev = &pci_dev->dev;
1712 struct fwnode_handle *fwnode = dev_fwnode(dev);
1713 struct cio2_device *cio2;
1717 * On some platforms no connections to sensors are defined in firmware,
1718 * if the device has no endpoints then we can try to build those as
1719 * software_nodes parsed from SSDB.
1721 r = cio2_check_fwnode_graph(fwnode);
1723 if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
1724 dev_err(dev, "fwnode graph has no endpoints connected\n");
1728 r = ipu_bridge_init(pci_dev);
1733 cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
1736 cio2->pci_dev = pci_dev;
1738 r = pcim_enable_device(pci_dev);
1740 dev_err(dev, "failed to enable device (%d)\n", r);
1744 dev_info(dev, "device 0x%x (rev: 0x%x)\n",
1745 pci_dev->device, pci_dev->revision);
1747 r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1749 dev_err(dev, "failed to remap I/O memory (%d)\n", r);
1753 cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
1755 pci_set_drvdata(pci_dev, cio2);
1757 pci_set_master(pci_dev);
1759 r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
1761 dev_err(dev, "failed to set DMA mask (%d)\n", r);
1765 r = pci_enable_msi(pci_dev);
1767 dev_err(dev, "failed to enable MSI (%d)\n", r);
1771 r = cio2_fbpt_init_dummy(cio2);
1775 mutex_init(&cio2->lock);
1777 cio2->media_dev.dev = dev;
1778 strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1779 sizeof(cio2->media_dev.model));
1780 cio2->media_dev.hw_revision = 0;
1782 media_device_init(&cio2->media_dev);
1783 r = media_device_register(&cio2->media_dev);
1785 goto fail_mutex_destroy;
1787 cio2->v4l2_dev.mdev = &cio2->media_dev;
1788 r = v4l2_device_register(dev, &cio2->v4l2_dev);
1790 dev_err(dev, "failed to register V4L2 device (%d)\n", r);
1791 goto fail_media_device_unregister;
1794 r = cio2_queues_init(cio2);
1796 goto fail_v4l2_device_unregister;
1798 v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
1800 /* Register notifier for subdevices we care */
1801 r = cio2_parse_firmware(cio2);
1803 goto fail_clean_notifier;
1805 r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
1808 dev_err(dev, "failed to request IRQ (%d)\n", r);
1809 goto fail_clean_notifier;
1812 pm_runtime_put_noidle(dev);
1813 pm_runtime_allow(dev);
1817 fail_clean_notifier:
1818 v4l2_async_nf_unregister(&cio2->notifier);
1819 v4l2_async_nf_cleanup(&cio2->notifier);
1820 cio2_queues_exit(cio2);
1821 fail_v4l2_device_unregister:
1822 v4l2_device_unregister(&cio2->v4l2_dev);
1823 fail_media_device_unregister:
1824 media_device_unregister(&cio2->media_dev);
1825 media_device_cleanup(&cio2->media_dev);
1827 mutex_destroy(&cio2->lock);
1828 cio2_fbpt_exit_dummy(cio2);
1833 static void cio2_pci_remove(struct pci_dev *pci_dev)
1835 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1837 media_device_unregister(&cio2->media_dev);
1838 v4l2_async_nf_unregister(&cio2->notifier);
1839 v4l2_async_nf_cleanup(&cio2->notifier);
1840 cio2_queues_exit(cio2);
1841 cio2_fbpt_exit_dummy(cio2);
1842 v4l2_device_unregister(&cio2->v4l2_dev);
1843 media_device_cleanup(&cio2->media_dev);
1844 mutex_destroy(&cio2->lock);
1846 pm_runtime_forbid(&pci_dev->dev);
1847 pm_runtime_get_noresume(&pci_dev->dev);
1850 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1852 struct pci_dev *pci_dev = to_pci_dev(dev);
1853 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1854 void __iomem *const base = cio2->base;
1857 writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1858 dev_dbg(dev, "cio2 runtime suspend.\n");
1860 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1861 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1862 pm |= CIO2_PMCSR_D3;
1863 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1868 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1870 struct pci_dev *pci_dev = to_pci_dev(dev);
1871 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1872 void __iomem *const base = cio2->base;
1875 writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1876 dev_dbg(dev, "cio2 runtime resume.\n");
1878 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1879 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1880 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1886 * Helper function to advance all the elements of a circular buffer by "start"
1889 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1895 { start, elems - 1 },
1898 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1900 /* Loop as long as we have out-of-place entries */
1901 while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1905 * Find the number of entries that can be arranged on this
1908 size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1910 /* Swap the entries in two parts of the array. */
1911 for (i = 0; i < size0; i++) {
1912 u8 *d = ptr + elem_size * (arr[1].begin + i);
1913 u8 *s = ptr + elem_size * (arr[0].begin + i);
1916 for (j = 0; j < elem_size; j++)
1920 if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1921 /* The end of the first array remains unarranged. */
1922 arr[0].begin += size0;
1925 * The first array is fully arranged so we proceed
1926 * handling the next one.
1928 arr[0].begin = arr[1].begin;
1929 arr[0].end = arr[1].begin + size0 - 1;
1930 arr[1].begin += size0;
1935 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1939 for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1940 i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1944 if (i == CIO2_MAX_BUFFERS)
1948 arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1949 CIO2_MAX_BUFFERS, j);
1950 arrange(q->bufs, sizeof(struct cio2_buffer *),
1951 CIO2_MAX_BUFFERS, j);
1955 * DMA clears the valid bit when accessing the buffer.
1956 * When stopping stream in suspend callback, some of the buffers
1957 * may be in invalid state. After resume, when DMA meets the invalid
1958 * buffer, it will halt and stop receiving new data.
1959 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1961 for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1962 cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1965 static int __maybe_unused cio2_suspend(struct device *dev)
1967 struct pci_dev *pci_dev = to_pci_dev(dev);
1968 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1969 struct cio2_queue *q = cio2->cur_queue;
1972 dev_dbg(dev, "cio2 suspend\n");
1973 if (!cio2->streaming)
1977 r = v4l2_subdev_call(q->sensor, video, s_stream, 0);
1979 dev_err(dev, "failed to stop sensor streaming\n");
1983 cio2_hw_exit(cio2, q);
1984 synchronize_irq(pci_dev->irq);
1986 pm_runtime_force_suspend(dev);
1989 * Upon resume, hw starts to process the fbpt entries from beginning,
1990 * so relocate the queued buffs to the fbpt head before suspend.
1992 cio2_fbpt_rearrange(cio2, q);
1999 static int __maybe_unused cio2_resume(struct device *dev)
2001 struct cio2_device *cio2 = dev_get_drvdata(dev);
2002 struct cio2_queue *q = cio2->cur_queue;
2005 dev_dbg(dev, "cio2 resume\n");
2006 if (!cio2->streaming)
2009 r = pm_runtime_force_resume(dev);
2011 dev_err(dev, "failed to set power %d\n", r);
2015 r = cio2_hw_init(cio2, q);
2017 dev_err(dev, "fail to init cio2 hw\n");
2021 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
2023 dev_err(dev, "fail to start sensor streaming\n");
2024 cio2_hw_exit(cio2, q);
2030 static const struct dev_pm_ops cio2_pm_ops = {
2031 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2032 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2035 static const struct pci_device_id cio2_pci_id_table[] = {
2036 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2040 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2042 static struct pci_driver cio2_pci_driver = {
2044 .id_table = cio2_pci_id_table,
2045 .probe = cio2_pci_probe,
2046 .remove = cio2_pci_remove,
2052 module_pci_driver(cio2_pci_driver);
2054 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2055 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2056 MODULE_AUTHOR("Jian Xu Zheng");
2057 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2058 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2059 MODULE_LICENSE("GPL v2");
2060 MODULE_DESCRIPTION("IPU3 CIO2 driver");
2061 MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);