Merge tag 'v5.15.57' into rpi-5.15.y
[platform/kernel/linux-rpi.git] / drivers / staging / media / rpivid / rpivid_hw.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Raspberry Pi HEVC driver
4  *
5  * Copyright (C) 2020 Raspberry Pi (Trading) Ltd
6  *
7  * Based on the Cedrus VPU driver, that is:
8  *
9  * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
10  * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
11  * Copyright (C) 2018 Bootlin
12  */
13 #include <linux/clk.h>
14 #include <linux/component.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/of_reserved_mem.h>
19 #include <linux/of_device.h>
20 #include <linux/of_platform.h>
21 #include <linux/platform_device.h>
22 #include <linux/regmap.h>
23 #include <linux/reset.h>
24
25 #include <media/videobuf2-core.h>
26 #include <media/v4l2-mem2mem.h>
27
28 #include "rpivid.h"
29 #include "rpivid_hw.h"
30
31 static void pre_irq(struct rpivid_dev *dev, struct rpivid_hw_irq_ent *ient,
32                     rpivid_irq_callback cb, void *v,
33                     struct rpivid_hw_irq_ctrl *ictl)
34 {
35         unsigned long flags;
36
37         if (ictl->irq) {
38                 v4l2_err(&dev->v4l2_dev, "Attempt to claim IRQ when already claimed\n");
39                 return;
40         }
41
42         ient->cb = cb;
43         ient->v = v;
44
45         spin_lock_irqsave(&ictl->lock, flags);
46         ictl->irq = ient;
47         ictl->no_sched++;
48         spin_unlock_irqrestore(&ictl->lock, flags);
49 }
50
51 /* Should be called from inside ictl->lock */
52 static inline bool sched_enabled(const struct rpivid_hw_irq_ctrl * const ictl)
53 {
54         return ictl->no_sched <= 0 && ictl->enable;
55 }
56
57 /* Should be called from inside ictl->lock & after checking sched_enabled() */
58 static inline void set_claimed(struct rpivid_hw_irq_ctrl * const ictl)
59 {
60         if (ictl->enable > 0)
61                 --ictl->enable;
62         ictl->no_sched = 1;
63 }
64
65 /* Should be called from inside ictl->lock */
66 static struct rpivid_hw_irq_ent *get_sched(struct rpivid_hw_irq_ctrl * const ictl)
67 {
68         struct rpivid_hw_irq_ent *ient;
69
70         if (!sched_enabled(ictl))
71                 return NULL;
72
73         ient = ictl->claim;
74         if (!ient)
75                 return NULL;
76         ictl->claim = ient->next;
77
78         set_claimed(ictl);
79         return ient;
80 }
81
82 /* Run a callback & check to see if there is anything else to run */
83 static void sched_cb(struct rpivid_dev * const dev,
84                      struct rpivid_hw_irq_ctrl * const ictl,
85                      struct rpivid_hw_irq_ent *ient)
86 {
87         while (ient) {
88                 unsigned long flags;
89
90                 ient->cb(dev, ient->v);
91
92                 spin_lock_irqsave(&ictl->lock, flags);
93
94                 /* Always dec no_sched after cb exec - must have been set
95                  * on entry to cb
96                  */
97                 --ictl->no_sched;
98                 ient = get_sched(ictl);
99
100                 spin_unlock_irqrestore(&ictl->lock, flags);
101         }
102 }
103
104 /* Should only ever be called from its own IRQ cb so no lock required */
105 static void pre_thread(struct rpivid_dev *dev,
106                        struct rpivid_hw_irq_ent *ient,
107                        rpivid_irq_callback cb, void *v,
108                        struct rpivid_hw_irq_ctrl *ictl)
109 {
110         ient->cb = cb;
111         ient->v = v;
112         ictl->irq = ient;
113         ictl->thread_reqed = true;
114         ictl->no_sched++;       /* This is unwound in do_thread */
115 }
116
117 // Called in irq context
118 static void do_irq(struct rpivid_dev * const dev,
119                    struct rpivid_hw_irq_ctrl * const ictl)
120 {
121         struct rpivid_hw_irq_ent *ient;
122         unsigned long flags;
123
124         spin_lock_irqsave(&ictl->lock, flags);
125         ient = ictl->irq;
126         ictl->irq = NULL;
127         spin_unlock_irqrestore(&ictl->lock, flags);
128
129         sched_cb(dev, ictl, ient);
130 }
131
132 static void do_claim(struct rpivid_dev * const dev,
133                      struct rpivid_hw_irq_ent *ient,
134                      const rpivid_irq_callback cb, void * const v,
135                      struct rpivid_hw_irq_ctrl * const ictl)
136 {
137         unsigned long flags;
138
139         ient->next = NULL;
140         ient->cb = cb;
141         ient->v = v;
142
143         spin_lock_irqsave(&ictl->lock, flags);
144
145         if (ictl->claim) {
146                 // If we have a Q then add to end
147                 ictl->tail->next = ient;
148                 ictl->tail = ient;
149                 ient = NULL;
150         } else if (!sched_enabled(ictl)) {
151                 // Empty Q but other activity in progress so Q
152                 ictl->claim = ient;
153                 ictl->tail = ient;
154                 ient = NULL;
155         } else {
156                 // Nothing else going on - schedule immediately and
157                 // prevent anything else scheduling claims
158                 set_claimed(ictl);
159         }
160
161         spin_unlock_irqrestore(&ictl->lock, flags);
162
163         sched_cb(dev, ictl, ient);
164 }
165
166 /* Enable n claims.
167  * n < 0   set to unlimited (default on init)
168  * n = 0   if previously unlimited then disable otherwise nop
169  * n > 0   if previously unlimited then set to n enables
170  *         otherwise add n enables
171  * The enable count is automatically decremented every time a claim is run
172  */
173 static void do_enable_claim(struct rpivid_dev * const dev,
174                             int n,
175                             struct rpivid_hw_irq_ctrl * const ictl)
176 {
177         unsigned long flags;
178         struct rpivid_hw_irq_ent *ient;
179
180         spin_lock_irqsave(&ictl->lock, flags);
181         ictl->enable = n < 0 ? -1 : ictl->enable <= 0 ? n : ictl->enable + n;
182         ient = get_sched(ictl);
183         spin_unlock_irqrestore(&ictl->lock, flags);
184
185         sched_cb(dev, ictl, ient);
186 }
187
188 static void ictl_init(struct rpivid_hw_irq_ctrl * const ictl, int enables)
189 {
190         spin_lock_init(&ictl->lock);
191         ictl->claim = NULL;
192         ictl->tail = NULL;
193         ictl->irq = NULL;
194         ictl->no_sched = 0;
195         ictl->enable = enables;
196         ictl->thread_reqed = false;
197 }
198
199 static void ictl_uninit(struct rpivid_hw_irq_ctrl * const ictl)
200 {
201         // Nothing to do
202 }
203
204 #if !OPT_DEBUG_POLL_IRQ
205 static irqreturn_t rpivid_irq_irq(int irq, void *data)
206 {
207         struct rpivid_dev * const dev = data;
208         __u32 ictrl;
209
210         ictrl = irq_read(dev, ARG_IC_ICTRL);
211         if (!(ictrl & ARG_IC_ICTRL_ALL_IRQ_MASK)) {
212                 v4l2_warn(&dev->v4l2_dev, "IRQ but no IRQ bits set\n");
213                 return IRQ_NONE;
214         }
215
216         // Cancel any/all irqs
217         irq_write(dev, ARG_IC_ICTRL, ictrl & ~ARG_IC_ICTRL_SET_ZERO_MASK);
218
219         // Service Active2 before Active1 so Phase 1 can transition to Phase 2
220         // without delay
221         if (ictrl & ARG_IC_ICTRL_ACTIVE2_INT_SET)
222                 do_irq(dev, &dev->ic_active2);
223         if (ictrl & ARG_IC_ICTRL_ACTIVE1_INT_SET)
224                 do_irq(dev, &dev->ic_active1);
225
226         return dev->ic_active1.thread_reqed || dev->ic_active2.thread_reqed ?
227                 IRQ_WAKE_THREAD : IRQ_HANDLED;
228 }
229
230 static void do_thread(struct rpivid_dev * const dev,
231                       struct rpivid_hw_irq_ctrl *const ictl)
232 {
233         unsigned long flags;
234         struct rpivid_hw_irq_ent *ient = NULL;
235
236         spin_lock_irqsave(&ictl->lock, flags);
237
238         if (ictl->thread_reqed) {
239                 ient = ictl->irq;
240                 ictl->thread_reqed = false;
241                 ictl->irq = NULL;
242         }
243
244         spin_unlock_irqrestore(&ictl->lock, flags);
245
246         sched_cb(dev, ictl, ient);
247 }
248
249 static irqreturn_t rpivid_irq_thread(int irq, void *data)
250 {
251         struct rpivid_dev * const dev = data;
252
253         do_thread(dev, &dev->ic_active1);
254         do_thread(dev, &dev->ic_active2);
255
256         return IRQ_HANDLED;
257 }
258 #endif
259
260 /* May only be called from Active1 CB
261  * IRQs should not be expected until execution continues in the cb
262  */
263 void rpivid_hw_irq_active1_thread(struct rpivid_dev *dev,
264                                   struct rpivid_hw_irq_ent *ient,
265                                   rpivid_irq_callback thread_cb, void *ctx)
266 {
267         pre_thread(dev, ient, thread_cb, ctx, &dev->ic_active1);
268 }
269
270 void rpivid_hw_irq_active1_enable_claim(struct rpivid_dev *dev,
271                                         int n)
272 {
273         do_enable_claim(dev, n, &dev->ic_active1);
274 }
275
276 void rpivid_hw_irq_active1_claim(struct rpivid_dev *dev,
277                                  struct rpivid_hw_irq_ent *ient,
278                                  rpivid_irq_callback ready_cb, void *ctx)
279 {
280         do_claim(dev, ient, ready_cb, ctx, &dev->ic_active1);
281 }
282
283 void rpivid_hw_irq_active1_irq(struct rpivid_dev *dev,
284                                struct rpivid_hw_irq_ent *ient,
285                                rpivid_irq_callback irq_cb, void *ctx)
286 {
287         pre_irq(dev, ient, irq_cb, ctx, &dev->ic_active1);
288 }
289
290 void rpivid_hw_irq_active2_claim(struct rpivid_dev *dev,
291                                  struct rpivid_hw_irq_ent *ient,
292                                  rpivid_irq_callback ready_cb, void *ctx)
293 {
294         do_claim(dev, ient, ready_cb, ctx, &dev->ic_active2);
295 }
296
297 void rpivid_hw_irq_active2_irq(struct rpivid_dev *dev,
298                                struct rpivid_hw_irq_ent *ient,
299                                rpivid_irq_callback irq_cb, void *ctx)
300 {
301         pre_irq(dev, ient, irq_cb, ctx, &dev->ic_active2);
302 }
303
304 int rpivid_hw_probe(struct rpivid_dev *dev)
305 {
306         struct resource *res;
307         __u32 irq_stat;
308         int irq_dec;
309         int ret = 0;
310
311         ictl_init(&dev->ic_active1, RPIVID_P2BUF_COUNT);
312         ictl_init(&dev->ic_active2, RPIVID_ICTL_ENABLE_UNLIMITED);
313
314         res = platform_get_resource_byname(dev->pdev, IORESOURCE_MEM, "intc");
315         if (!res)
316                 return -ENODEV;
317
318         dev->base_irq = devm_ioremap(dev->dev, res->start, resource_size(res));
319         if (IS_ERR(dev->base_irq))
320                 return PTR_ERR(dev->base_irq);
321
322         res = platform_get_resource_byname(dev->pdev, IORESOURCE_MEM, "hevc");
323         if (!res)
324                 return -ENODEV;
325
326         dev->base_h265 = devm_ioremap(dev->dev, res->start, resource_size(res));
327         if (IS_ERR(dev->base_h265))
328                 return PTR_ERR(dev->base_h265);
329
330         dev->clock = devm_clk_get(&dev->pdev->dev, "hevc");
331         if (IS_ERR(dev->clock))
332                 return PTR_ERR(dev->clock);
333
334         dev->cache_align = dma_get_cache_alignment();
335
336         // Disable IRQs & reset anything pending
337         irq_write(dev, 0,
338                   ARG_IC_ICTRL_ACTIVE1_EN_SET | ARG_IC_ICTRL_ACTIVE2_EN_SET);
339         irq_stat = irq_read(dev, 0);
340         irq_write(dev, 0, irq_stat);
341
342 #if !OPT_DEBUG_POLL_IRQ
343         irq_dec = platform_get_irq(dev->pdev, 0);
344         if (irq_dec <= 0)
345                 return irq_dec;
346         ret = devm_request_threaded_irq(dev->dev, irq_dec,
347                                         rpivid_irq_irq,
348                                         rpivid_irq_thread,
349                                         0, dev_name(dev->dev), dev);
350         if (ret) {
351                 dev_err(dev->dev, "Failed to request IRQ - %d\n", ret);
352
353                 return ret;
354         }
355 #endif
356         return ret;
357 }
358
359 void rpivid_hw_remove(struct rpivid_dev *dev)
360 {
361         // IRQ auto freed on unload so no need to do it here
362         // ioremap auto freed on unload
363         ictl_uninit(&dev->ic_active1);
364         ictl_uninit(&dev->ic_active2);
365 }
366