1 // SPDX-License-Identifier: GPL-2.0
3 * Raspberry Pi HEVC driver
5 * Copyright (C) 2020 Raspberry Pi (Trading) Ltd
7 * Based on the Cedrus VPU driver, that is:
9 * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
10 * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
11 * Copyright (C) 2018 Bootlin
13 #include <linux/clk.h>
14 #include <linux/component.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/interrupt.h>
18 #include <linux/of_reserved_mem.h>
19 #include <linux/of_device.h>
20 #include <linux/of_platform.h>
21 #include <linux/platform_device.h>
22 #include <linux/regmap.h>
23 #include <linux/reset.h>
25 #include <media/videobuf2-core.h>
26 #include <media/v4l2-mem2mem.h>
29 #include "rpivid_hw.h"
31 static void pre_irq(struct rpivid_dev *dev, struct rpivid_hw_irq_ent *ient,
32 rpivid_irq_callback cb, void *v,
33 struct rpivid_hw_irq_ctrl *ictl)
38 v4l2_err(&dev->v4l2_dev, "Attempt to claim IRQ when already claimed\n");
45 spin_lock_irqsave(&ictl->lock, flags);
48 spin_unlock_irqrestore(&ictl->lock, flags);
51 /* Should be called from inside ictl->lock */
52 static inline bool sched_enabled(const struct rpivid_hw_irq_ctrl * const ictl)
54 return ictl->no_sched <= 0 && ictl->enable;
57 /* Should be called from inside ictl->lock & after checking sched_enabled() */
58 static inline void set_claimed(struct rpivid_hw_irq_ctrl * const ictl)
65 /* Should be called from inside ictl->lock */
66 static struct rpivid_hw_irq_ent *get_sched(struct rpivid_hw_irq_ctrl * const ictl)
68 struct rpivid_hw_irq_ent *ient;
70 if (!sched_enabled(ictl))
76 ictl->claim = ient->next;
82 /* Run a callback & check to see if there is anything else to run */
83 static void sched_cb(struct rpivid_dev * const dev,
84 struct rpivid_hw_irq_ctrl * const ictl,
85 struct rpivid_hw_irq_ent *ient)
90 ient->cb(dev, ient->v);
92 spin_lock_irqsave(&ictl->lock, flags);
94 /* Always dec no_sched after cb exec - must have been set
98 ient = get_sched(ictl);
100 spin_unlock_irqrestore(&ictl->lock, flags);
104 /* Should only ever be called from its own IRQ cb so no lock required */
105 static void pre_thread(struct rpivid_dev *dev,
106 struct rpivid_hw_irq_ent *ient,
107 rpivid_irq_callback cb, void *v,
108 struct rpivid_hw_irq_ctrl *ictl)
113 ictl->thread_reqed = true;
114 ictl->no_sched++; /* This is unwound in do_thread */
117 // Called in irq context
118 static void do_irq(struct rpivid_dev * const dev,
119 struct rpivid_hw_irq_ctrl * const ictl)
121 struct rpivid_hw_irq_ent *ient;
124 spin_lock_irqsave(&ictl->lock, flags);
127 spin_unlock_irqrestore(&ictl->lock, flags);
129 sched_cb(dev, ictl, ient);
132 static void do_claim(struct rpivid_dev * const dev,
133 struct rpivid_hw_irq_ent *ient,
134 const rpivid_irq_callback cb, void * const v,
135 struct rpivid_hw_irq_ctrl * const ictl)
143 spin_lock_irqsave(&ictl->lock, flags);
146 // If we have a Q then add to end
147 ictl->tail->next = ient;
150 } else if (!sched_enabled(ictl)) {
151 // Empty Q but other activity in progress so Q
156 // Nothing else going on - schedule immediately and
157 // prevent anything else scheduling claims
161 spin_unlock_irqrestore(&ictl->lock, flags);
163 sched_cb(dev, ictl, ient);
167 * n < 0 set to unlimited (default on init)
168 * n = 0 if previously unlimited then disable otherwise nop
169 * n > 0 if previously unlimited then set to n enables
170 * otherwise add n enables
171 * The enable count is automatically decremented every time a claim is run
173 static void do_enable_claim(struct rpivid_dev * const dev,
175 struct rpivid_hw_irq_ctrl * const ictl)
178 struct rpivid_hw_irq_ent *ient;
180 spin_lock_irqsave(&ictl->lock, flags);
181 ictl->enable = n < 0 ? -1 : ictl->enable <= 0 ? n : ictl->enable + n;
182 ient = get_sched(ictl);
183 spin_unlock_irqrestore(&ictl->lock, flags);
185 sched_cb(dev, ictl, ient);
188 static void ictl_init(struct rpivid_hw_irq_ctrl * const ictl, int enables)
190 spin_lock_init(&ictl->lock);
195 ictl->enable = enables;
196 ictl->thread_reqed = false;
199 static void ictl_uninit(struct rpivid_hw_irq_ctrl * const ictl)
204 #if !OPT_DEBUG_POLL_IRQ
205 static irqreturn_t rpivid_irq_irq(int irq, void *data)
207 struct rpivid_dev * const dev = data;
210 ictrl = irq_read(dev, ARG_IC_ICTRL);
211 if (!(ictrl & ARG_IC_ICTRL_ALL_IRQ_MASK)) {
212 v4l2_warn(&dev->v4l2_dev, "IRQ but no IRQ bits set\n");
216 // Cancel any/all irqs
217 irq_write(dev, ARG_IC_ICTRL, ictrl & ~ARG_IC_ICTRL_SET_ZERO_MASK);
219 // Service Active2 before Active1 so Phase 1 can transition to Phase 2
221 if (ictrl & ARG_IC_ICTRL_ACTIVE2_INT_SET)
222 do_irq(dev, &dev->ic_active2);
223 if (ictrl & ARG_IC_ICTRL_ACTIVE1_INT_SET)
224 do_irq(dev, &dev->ic_active1);
226 return dev->ic_active1.thread_reqed || dev->ic_active2.thread_reqed ?
227 IRQ_WAKE_THREAD : IRQ_HANDLED;
230 static void do_thread(struct rpivid_dev * const dev,
231 struct rpivid_hw_irq_ctrl *const ictl)
234 struct rpivid_hw_irq_ent *ient = NULL;
236 spin_lock_irqsave(&ictl->lock, flags);
238 if (ictl->thread_reqed) {
240 ictl->thread_reqed = false;
244 spin_unlock_irqrestore(&ictl->lock, flags);
246 sched_cb(dev, ictl, ient);
249 static irqreturn_t rpivid_irq_thread(int irq, void *data)
251 struct rpivid_dev * const dev = data;
253 do_thread(dev, &dev->ic_active1);
254 do_thread(dev, &dev->ic_active2);
260 /* May only be called from Active1 CB
261 * IRQs should not be expected until execution continues in the cb
263 void rpivid_hw_irq_active1_thread(struct rpivid_dev *dev,
264 struct rpivid_hw_irq_ent *ient,
265 rpivid_irq_callback thread_cb, void *ctx)
267 pre_thread(dev, ient, thread_cb, ctx, &dev->ic_active1);
270 void rpivid_hw_irq_active1_enable_claim(struct rpivid_dev *dev,
273 do_enable_claim(dev, n, &dev->ic_active1);
276 void rpivid_hw_irq_active1_claim(struct rpivid_dev *dev,
277 struct rpivid_hw_irq_ent *ient,
278 rpivid_irq_callback ready_cb, void *ctx)
280 do_claim(dev, ient, ready_cb, ctx, &dev->ic_active1);
283 void rpivid_hw_irq_active1_irq(struct rpivid_dev *dev,
284 struct rpivid_hw_irq_ent *ient,
285 rpivid_irq_callback irq_cb, void *ctx)
287 pre_irq(dev, ient, irq_cb, ctx, &dev->ic_active1);
290 void rpivid_hw_irq_active2_claim(struct rpivid_dev *dev,
291 struct rpivid_hw_irq_ent *ient,
292 rpivid_irq_callback ready_cb, void *ctx)
294 do_claim(dev, ient, ready_cb, ctx, &dev->ic_active2);
297 void rpivid_hw_irq_active2_irq(struct rpivid_dev *dev,
298 struct rpivid_hw_irq_ent *ient,
299 rpivid_irq_callback irq_cb, void *ctx)
301 pre_irq(dev, ient, irq_cb, ctx, &dev->ic_active2);
304 int rpivid_hw_probe(struct rpivid_dev *dev)
306 struct resource *res;
311 ictl_init(&dev->ic_active1, RPIVID_P2BUF_COUNT);
312 ictl_init(&dev->ic_active2, RPIVID_ICTL_ENABLE_UNLIMITED);
314 res = platform_get_resource_byname(dev->pdev, IORESOURCE_MEM, "intc");
318 dev->base_irq = devm_ioremap(dev->dev, res->start, resource_size(res));
319 if (IS_ERR(dev->base_irq))
320 return PTR_ERR(dev->base_irq);
322 res = platform_get_resource_byname(dev->pdev, IORESOURCE_MEM, "hevc");
326 dev->base_h265 = devm_ioremap(dev->dev, res->start, resource_size(res));
327 if (IS_ERR(dev->base_h265))
328 return PTR_ERR(dev->base_h265);
330 dev->clock = devm_clk_get(&dev->pdev->dev, "hevc");
331 if (IS_ERR(dev->clock))
332 return PTR_ERR(dev->clock);
334 dev->cache_align = dma_get_cache_alignment();
336 // Disable IRQs & reset anything pending
338 ARG_IC_ICTRL_ACTIVE1_EN_SET | ARG_IC_ICTRL_ACTIVE2_EN_SET);
339 irq_stat = irq_read(dev, 0);
340 irq_write(dev, 0, irq_stat);
342 #if !OPT_DEBUG_POLL_IRQ
343 irq_dec = platform_get_irq(dev->pdev, 0);
346 ret = devm_request_threaded_irq(dev->dev, irq_dec,
349 0, dev_name(dev->dev), dev);
351 dev_err(dev->dev, "Failed to request IRQ - %d\n", ret);
359 void rpivid_hw_remove(struct rpivid_dev *dev)
361 // IRQ auto freed on unload so no need to do it here
362 // ioremap auto freed on unload
363 ictl_uninit(&dev->ic_active1);
364 ictl_uninit(&dev->ic_active2);