1 // SPDX-License-Identifier: GPL-2.0
3 * c8sectpfe-core.c - C8SECTPFE STi DVB driver
5 * Copyright (c) STMicroelectronics 2015
7 * Author:Peter Bennett <peter.bennett@st.com>
8 * Peter Griffin <peter.griffin@linaro.org>
11 #include <linux/atomic.h>
12 #include <linux/clk.h>
13 #include <linux/completion.h>
14 #include <linux/delay.h>
15 #include <linux/device.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dvb/dmx.h>
18 #include <linux/dvb/frontend.h>
19 #include <linux/errno.h>
20 #include <linux/firmware.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/of_gpio.h>
26 #include <linux/of_platform.h>
27 #include <linux/platform_device.h>
28 #include <linux/usb.h>
29 #include <linux/slab.h>
30 #include <linux/time.h>
31 #include <linux/wait.h>
32 #include <linux/pinctrl/pinctrl.h>
34 #include "c8sectpfe-core.h"
35 #include "c8sectpfe-common.h"
36 #include "c8sectpfe-debugfs.h"
37 #include <media/dmxdev.h>
38 #include <media/dvb_demux.h>
39 #include <media/dvb_frontend.h>
40 #include <media/dvb_net.h>
42 #define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
43 MODULE_FIRMWARE(FIRMWARE_MEMDMA);
45 #define PID_TABLE_SIZE 1024
48 static int load_c8sectpfe_fw(struct c8sectpfei *fei);
50 #define TS_PKT_SIZE 188
51 #define HEADER_SIZE (4)
52 #define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
54 #define FEI_ALIGNMENT (32)
55 /* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
56 #define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
60 static void c8sectpfe_timer_interrupt(struct timer_list *t)
62 struct c8sectpfei *fei = from_timer(fei, t, timer);
63 struct channel_info *channel;
66 /* iterate through input block channels */
67 for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
68 channel = fei->channel_data[chan_num];
70 /* is this descriptor initialised and TP enabled */
71 if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
72 tasklet_schedule(&channel->tsklet);
75 fei->timer.expires = jiffies + msecs_to_jiffies(POLL_MSECS);
76 add_timer(&fei->timer);
79 static void channel_swdemux_tsklet(struct tasklet_struct *t)
81 struct channel_info *channel = from_tasklet(channel, t, tsklet);
82 struct c8sectpfei *fei;
84 int pos, num_packets, n, size;
87 if (unlikely(!channel || !channel->irec))
92 wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
93 rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
95 pos = rp - channel->back_buffer_busaddr;
99 wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
102 num_packets = size / PACKET_SIZE;
104 /* manage cache so data is visible to CPU */
105 dma_sync_single_for_cpu(fei->dev,
110 buf = channel->back_buffer_aligned;
113 "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\trp=0x%lx, wp=0x%lx\n",
114 channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
116 for (n = 0; n < num_packets; n++) {
117 dvb_dmx_swfilter_packets(
119 demux[channel->demux_mapping].dvb_demux,
125 /* advance the read pointer */
126 if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
127 writel(channel->back_buffer_busaddr, channel->irec +
128 DMA_PRDS_BUSRP_TP(0));
130 writel(wp, channel->irec + DMA_PRDS_BUSRP_TP(0));
133 static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
135 struct dvb_demux *demux = dvbdmxfeed->demux;
136 struct stdemux *stdemux = (struct stdemux *)demux->priv;
137 struct c8sectpfei *fei = stdemux->c8sectpfei;
138 struct channel_info *channel;
140 unsigned long *bitmap;
143 switch (dvbdmxfeed->type) {
149 dev_err(fei->dev, "%s:%d Error bailing\n"
150 , __func__, __LINE__);
154 if (dvbdmxfeed->type == DMX_TYPE_TS) {
155 switch (dvbdmxfeed->pes_type) {
158 case DMX_PES_TELETEXT:
163 dev_err(fei->dev, "%s:%d Error bailing\n"
164 , __func__, __LINE__);
169 if (!atomic_read(&fei->fw_loaded)) {
170 ret = load_c8sectpfe_fw(fei);
175 mutex_lock(&fei->lock);
177 channel = fei->channel_data[stdemux->tsin_index];
179 bitmap = channel->pid_buffer_aligned;
181 /* 8192 is a special PID */
182 if (dvbdmxfeed->pid == 8192) {
183 tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
184 tmp &= ~C8SECTPFE_PID_ENABLE;
185 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
188 bitmap_set(bitmap, dvbdmxfeed->pid, 1);
191 /* manage cache so PID bitmap is visible to HW */
192 dma_sync_single_for_device(fei->dev,
193 channel->pid_buffer_busaddr,
199 if (fei->global_feed_count == 0) {
200 fei->timer.expires = jiffies +
201 msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
203 add_timer(&fei->timer);
206 if (stdemux->running_feed_count == 0) {
208 dev_dbg(fei->dev, "Starting channel=%p\n", channel);
210 tasklet_setup(&channel->tsklet, channel_swdemux_tsklet);
212 /* Reset the internal inputblock sram pointers */
213 writel(channel->fifo,
214 fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
215 writel(channel->fifo + FIFO_LEN - 1,
216 fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
218 writel(channel->fifo,
219 fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
220 writel(channel->fifo,
221 fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
224 /* reset read / write memdma ptrs for this channel */
225 writel(channel->back_buffer_busaddr, channel->irec +
226 DMA_PRDS_BUSBASE_TP(0));
228 tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
229 writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
231 writel(channel->back_buffer_busaddr, channel->irec +
232 DMA_PRDS_BUSWP_TP(0));
234 /* Issue a reset and enable InputBlock */
235 writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
236 , fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
238 /* and enable the tp */
239 writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
241 dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
242 , __func__, __LINE__, stdemux);
245 stdemux->running_feed_count++;
246 fei->global_feed_count++;
248 mutex_unlock(&fei->lock);
253 static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
256 struct dvb_demux *demux = dvbdmxfeed->demux;
257 struct stdemux *stdemux = (struct stdemux *)demux->priv;
258 struct c8sectpfei *fei = stdemux->c8sectpfei;
259 struct channel_info *channel;
263 unsigned long *bitmap;
265 if (!atomic_read(&fei->fw_loaded)) {
266 ret = load_c8sectpfe_fw(fei);
271 mutex_lock(&fei->lock);
273 channel = fei->channel_data[stdemux->tsin_index];
275 bitmap = channel->pid_buffer_aligned;
277 if (dvbdmxfeed->pid == 8192) {
278 tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
279 tmp |= C8SECTPFE_PID_ENABLE;
280 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
282 bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
285 /* manage cache so data is visible to HW */
286 dma_sync_single_for_device(fei->dev,
287 channel->pid_buffer_busaddr,
291 if (--stdemux->running_feed_count == 0) {
293 channel = fei->channel_data[stdemux->tsin_index];
295 /* TP re-configuration on page 168 of functional spec */
297 /* disable IB (prevents more TS data going to memdma) */
298 writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
300 /* disable this channels descriptor */
301 writel(0, channel->irec + DMA_PRDS_TPENABLE);
303 tasklet_disable(&channel->tsklet);
305 /* now request memdma channel goes idle */
306 idlereq = (1 << channel->tsin_id) | IDLEREQ;
307 writel(idlereq, fei->io + DMA_IDLE_REQ);
309 /* wait for idle irq handler to signal completion */
310 ret = wait_for_completion_timeout(&channel->idle_completion,
311 msecs_to_jiffies(100));
315 "Timeout waiting for idle irq on tsin%d\n",
318 reinit_completion(&channel->idle_completion);
320 /* reset read / write ptrs for this channel */
322 writel(channel->back_buffer_busaddr,
323 channel->irec + DMA_PRDS_BUSBASE_TP(0));
325 tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
326 writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
328 writel(channel->back_buffer_busaddr,
329 channel->irec + DMA_PRDS_BUSWP_TP(0));
332 "%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
333 __func__, __LINE__, stdemux, channel->tsin_id);
335 /* turn off all PIDS in the bitmap */
336 memset(channel->pid_buffer_aligned, 0, PID_TABLE_SIZE);
338 /* manage cache so data is visible to HW */
339 dma_sync_single_for_device(fei->dev,
340 channel->pid_buffer_busaddr,
347 if (--fei->global_feed_count == 0) {
348 dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
349 , __func__, __LINE__, fei->global_feed_count);
351 del_timer(&fei->timer);
354 mutex_unlock(&fei->lock);
359 static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
363 for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
364 if (!fei->channel_data[i])
367 if (fei->channel_data[i]->tsin_id == tsin_num)
368 return fei->channel_data[i];
374 static void c8sectpfe_getconfig(struct c8sectpfei *fei)
376 struct c8sectpfe_hw *hw = &fei->hw_stats;
378 hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
379 hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
380 hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
381 hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
382 hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
383 hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
384 hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
386 dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
387 dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
388 dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
389 dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
391 dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
392 dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
393 dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
394 dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
398 static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
400 struct c8sectpfei *fei = priv;
401 struct channel_info *chan;
403 unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
405 /* page 168 of functional spec: Clear the idle request
406 by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
408 /* signal idle completion */
409 for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
411 chan = find_channel(fei, bit);
414 complete(&chan->idle_completion);
417 writel(0, fei->io + DMA_IDLE_REQ);
423 static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
428 if (tsin->back_buffer_busaddr)
429 if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
430 dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
431 FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
433 kfree(tsin->back_buffer_start);
435 if (tsin->pid_buffer_busaddr)
436 if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
437 dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
438 PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
440 kfree(tsin->pid_buffer_start);
445 static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
446 struct channel_info *tsin)
450 char tsin_pin_name[MAX_NAME];
455 dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
456 , __func__, __LINE__, tsin, tsin->tsin_id);
458 init_completion(&tsin->idle_completion);
460 tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE + FEI_ALIGNMENT, GFP_KERNEL);
461 if (!tsin->back_buffer_start) {
466 /* Ensure backbuffer is 32byte aligned */
467 tsin->back_buffer_aligned = tsin->back_buffer_start + FEI_ALIGNMENT;
469 tsin->back_buffer_aligned = PTR_ALIGN(tsin->back_buffer_aligned, FEI_ALIGNMENT);
471 tsin->back_buffer_busaddr = dma_map_single(fei->dev,
472 tsin->back_buffer_aligned,
476 if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
477 dev_err(fei->dev, "failed to map back_buffer\n");
483 * The pid buffer can be configured (in hw) for byte or bit
484 * per pid. By powers of deduction we conclude stih407 family
485 * is configured (at SoC design stage) for bit per pid.
487 tsin->pid_buffer_start = kzalloc(PID_TABLE_SIZE + PID_TABLE_SIZE, GFP_KERNEL);
488 if (!tsin->pid_buffer_start) {
494 * PID buffer needs to be aligned to size of the pid table
495 * which at bit per pid is 1024 bytes (8192 pids / 8).
496 * PIDF_BASE register enforces this alignment when writing
500 tsin->pid_buffer_aligned = tsin->pid_buffer_start + PID_TABLE_SIZE;
502 tsin->pid_buffer_aligned = PTR_ALIGN(tsin->pid_buffer_aligned, PID_TABLE_SIZE);
504 tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
505 tsin->pid_buffer_aligned,
509 if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
510 dev_err(fei->dev, "failed to map pid_bitmap\n");
515 /* manage cache so pid bitmap is visible to HW */
516 dma_sync_single_for_device(fei->dev,
517 tsin->pid_buffer_busaddr,
521 snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
522 (tsin->serial_not_parallel ? "serial" : "parallel"));
524 tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
525 if (IS_ERR(tsin->pstate)) {
526 dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
527 , __func__, tsin_pin_name);
528 ret = PTR_ERR(tsin->pstate);
532 ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
535 dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
540 /* Enable this input block */
541 tmp = readl(fei->io + SYS_INPUT_CLKEN);
542 tmp |= BIT(tsin->tsin_id);
543 writel(tmp, fei->io + SYS_INPUT_CLKEN);
545 if (tsin->serial_not_parallel)
546 tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
548 if (tsin->invert_ts_clk)
549 tmp |= C8SECTPFE_INVERT_TSCLK;
551 if (tsin->async_not_sync)
552 tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
554 tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
556 writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
558 writel(C8SECTPFE_SYNC(0x9) |
559 C8SECTPFE_DROP(0x9) |
560 C8SECTPFE_TOKEN(0x47),
561 fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
563 writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
565 /* Place the FIFO's at the end of the irec descriptors */
567 tsin->fifo = (tsin->tsin_id * FIFO_LEN);
569 writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
570 writel(tsin->fifo + FIFO_LEN - 1,
571 fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
573 writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
574 writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
576 writel(tsin->pid_buffer_busaddr,
577 fei->io + PIDF_BASE(tsin->tsin_id));
579 dev_dbg(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
580 tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
581 &tsin->pid_buffer_busaddr);
583 /* Configure and enable HW PID filtering */
586 * The PID value is created by assembling the first 8 bytes of
587 * the TS packet into a 64-bit word in big-endian format. A
588 * slice of that 64-bit word is taken from
589 * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
591 tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
592 | C8SECTPFE_PID_OFFSET(40));
594 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
596 dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
598 readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
599 readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
600 readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
601 readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
603 /* Get base addpress of pointer record block from DMEM */
604 tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
605 readl(fei->io + DMA_PTRREC_BASE);
607 /* fill out pointer record data structure */
609 /* advance pointer record block to our channel */
610 tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
612 writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
614 writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
616 writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
618 writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
620 /* read/write pointers with physical bus address */
622 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
624 tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
625 writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
627 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
628 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
630 /* initialize tasklet */
631 tasklet_setup(&tsin->tsklet, channel_swdemux_tsklet);
636 free_input_block(fei, tsin);
640 static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
642 struct c8sectpfei *fei = priv;
644 dev_err(fei->dev, "%s: error handling not yet implemented\n"
648 * TODO FIXME we should detect some error conditions here
649 * and ideally do something about them!
655 static int c8sectpfe_probe(struct platform_device *pdev)
657 struct device *dev = &pdev->dev;
658 struct device_node *child, *np = dev->of_node;
659 struct c8sectpfei *fei;
660 struct resource *res;
662 struct channel_info *tsin;
664 /* Allocate the c8sectpfei structure */
665 fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
671 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
672 fei->io = devm_ioremap_resource(dev, res);
674 return PTR_ERR(fei->io);
676 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
678 fei->sram = devm_ioremap_resource(dev, res);
679 if (IS_ERR(fei->sram))
680 return PTR_ERR(fei->sram);
682 fei->sram_size = resource_size(res);
684 fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
685 if (fei->idle_irq < 0)
686 return fei->idle_irq;
688 fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
689 if (fei->error_irq < 0)
690 return fei->error_irq;
692 platform_set_drvdata(pdev, fei);
694 fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
695 if (IS_ERR(fei->c8sectpfeclk)) {
696 dev_err(dev, "c8sectpfe clk not found\n");
697 return PTR_ERR(fei->c8sectpfeclk);
700 ret = clk_prepare_enable(fei->c8sectpfeclk);
702 dev_err(dev, "Failed to enable c8sectpfe clock\n");
706 /* to save power disable all IP's (on by default) */
707 writel(0, fei->io + SYS_INPUT_CLKEN);
709 /* Enable memdma clock */
710 writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
712 /* clear internal sram */
713 memset_io(fei->sram, 0x0, fei->sram_size);
715 c8sectpfe_getconfig(fei);
717 ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
718 0, "c8sectpfe-idle-irq", fei);
720 dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
721 goto err_clk_disable;
724 ret = devm_request_irq(dev, fei->error_irq,
725 c8sectpfe_error_irq_handler, 0,
726 "c8sectpfe-error-irq", fei);
728 dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
729 goto err_clk_disable;
732 fei->tsin_count = of_get_child_count(np);
734 if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
735 fei->tsin_count > fei->hw_stats.num_ib) {
737 dev_err(dev, "More tsin declared than exist on SoC!\n");
739 goto err_clk_disable;
742 fei->pinctrl = devm_pinctrl_get(dev);
744 if (IS_ERR(fei->pinctrl)) {
745 dev_err(dev, "Error getting tsin pins\n");
746 ret = PTR_ERR(fei->pinctrl);
747 goto err_clk_disable;
750 for_each_child_of_node(np, child) {
751 struct device_node *i2c_bus;
753 fei->channel_data[index] = devm_kzalloc(dev,
754 sizeof(struct channel_info),
757 if (!fei->channel_data[index]) {
762 tsin = fei->channel_data[index];
766 ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
768 dev_err(&pdev->dev, "No tsin_num found\n");
772 /* sanity check value */
773 if (tsin->tsin_id > fei->hw_stats.num_ib) {
775 "tsin-num %d specified greater than number\n\tof input block hw in SoC! (%d)",
776 tsin->tsin_id, fei->hw_stats.num_ib);
781 tsin->invert_ts_clk = of_property_read_bool(child,
784 tsin->serial_not_parallel = of_property_read_bool(child,
785 "serial-not-parallel");
787 tsin->async_not_sync = of_property_read_bool(child,
790 ret = of_property_read_u32(child, "dvb-card",
793 dev_err(&pdev->dev, "No dvb-card found\n");
797 i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
799 dev_err(&pdev->dev, "No i2c-bus found\n");
804 of_find_i2c_adapter_by_node(i2c_bus);
805 if (!tsin->i2c_adapter) {
806 dev_err(&pdev->dev, "No i2c adapter found\n");
807 of_node_put(i2c_bus);
811 of_node_put(i2c_bus);
813 tsin->rst_gpio = of_get_named_gpio(child, "reset-gpios", 0);
815 ret = gpio_is_valid(tsin->rst_gpio);
818 "reset gpio for tsin%d not valid (gpio=%d)\n",
819 tsin->tsin_id, tsin->rst_gpio);
824 ret = devm_gpio_request_one(dev, tsin->rst_gpio,
825 GPIOF_OUT_INIT_LOW, "NIM reset");
826 if (ret && ret != -EBUSY) {
827 dev_err(dev, "Can't request tsin%d reset gpio\n"
828 , fei->channel_data[index]->tsin_id);
833 /* toggle reset lines */
834 gpio_direction_output(tsin->rst_gpio, 0);
835 usleep_range(3500, 5000);
836 gpio_direction_output(tsin->rst_gpio, 1);
837 usleep_range(3000, 5000);
840 tsin->demux_mapping = index;
843 "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\tserial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
844 fei->channel_data[index], index,
845 tsin->tsin_id, tsin->invert_ts_clk,
846 tsin->serial_not_parallel, tsin->async_not_sync,
852 /* Setup timer interrupt */
853 timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
855 mutex_init(&fei->lock);
857 /* Get the configuration information about the tuners */
858 ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
860 c8sectpfe_start_feed,
861 c8sectpfe_stop_feed);
863 dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
865 goto err_clk_disable;
868 c8sectpfe_debugfs_init(fei);
875 clk_disable_unprepare(fei->c8sectpfeclk);
879 static int c8sectpfe_remove(struct platform_device *pdev)
881 struct c8sectpfei *fei = platform_get_drvdata(pdev);
882 struct channel_info *channel;
885 wait_for_completion(&fei->fw_ack);
887 c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
890 * Now loop through and un-configure each of the InputBlock resources
892 for (i = 0; i < fei->tsin_count; i++) {
893 channel = fei->channel_data[i];
894 free_input_block(fei, channel);
897 c8sectpfe_debugfs_exit(fei);
899 dev_info(fei->dev, "Stopping memdma SLIM core\n");
900 if (readl(fei->io + DMA_CPU_RUN))
901 writel(0x0, fei->io + DMA_CPU_RUN);
903 /* unclock all internal IP's */
904 if (readl(fei->io + SYS_INPUT_CLKEN))
905 writel(0, fei->io + SYS_INPUT_CLKEN);
907 if (readl(fei->io + SYS_OTHER_CLKEN))
908 writel(0, fei->io + SYS_OTHER_CLKEN);
910 clk_disable_unprepare(fei->c8sectpfeclk);
916 static int configure_channels(struct c8sectpfei *fei)
919 struct device_node *child, *np = fei->dev->of_node;
921 /* iterate round each tsin and configure memdma descriptor and IB hw */
922 for_each_child_of_node(np, child) {
923 ret = configure_memdma_and_inputblock(fei,
924 fei->channel_data[index]);
927 "configure_memdma_and_inputblock failed\n");
938 free_input_block(fei, fei->channel_data[index]);
944 c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
946 struct elf32_hdr *ehdr;
950 dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
954 if (fw->size < sizeof(struct elf32_hdr)) {
955 dev_err(fei->dev, "Image is too small\n");
959 ehdr = (struct elf32_hdr *)fw->data;
961 /* We only support ELF32 at this point */
962 class = ehdr->e_ident[EI_CLASS];
963 if (class != ELFCLASS32) {
964 dev_err(fei->dev, "Unsupported class: %d\n", class);
968 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
969 dev_err(fei->dev, "Unsupported firmware endianness\n");
973 if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
974 dev_err(fei->dev, "Image is too small\n");
978 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
979 dev_err(fei->dev, "Image is corrupted (bad magic)\n");
983 /* Check ELF magic */
984 ehdr = (Elf32_Ehdr *)fw->data;
985 if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
986 ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
987 ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
988 ehdr->e_ident[EI_MAG3] != ELFMAG3) {
989 dev_err(fei->dev, "Invalid ELF magic\n");
993 if (ehdr->e_type != ET_EXEC) {
994 dev_err(fei->dev, "Unsupported ELF header type\n");
998 if (ehdr->e_phoff > fw->size) {
999 dev_err(fei->dev, "Firmware size is too small\n");
1007 static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1008 const struct firmware *fw, u8 __iomem *dest,
1011 const u8 *imem_src = fw->data + phdr->p_offset;
1015 * For IMEM segments, the segment contains 24-bit
1016 * instructions which must be padded to 32-bit
1017 * instructions before being written. The written
1018 * segment is padded with NOP instructions.
1022 "Loading IMEM segment %d 0x%08x\n\t (0x%x bytes) -> 0x%p (0x%x bytes)\n",
1023 seg_num, phdr->p_paddr, phdr->p_filesz, dest,
1024 phdr->p_memsz + phdr->p_memsz / 3);
1026 for (i = 0; i < phdr->p_filesz; i++) {
1028 writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
1030 /* Every 3 bytes, add an additional
1031 * padding zero in destination */
1034 writeb(0x00, (void __iomem *)dest);
1042 static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1043 const struct firmware *fw, u8 __iomem *dst, int seg_num)
1046 * For DMEM segments copy the segment data from the ELF
1047 * file and pad segment with zeroes
1051 "Loading DMEM segment %d 0x%08x\n\t(0x%x bytes) -> 0x%p (0x%x bytes)\n",
1052 seg_num, phdr->p_paddr, phdr->p_filesz,
1053 dst, phdr->p_memsz);
1055 memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
1058 memset((void __force *)dst + phdr->p_filesz, 0,
1059 phdr->p_memsz - phdr->p_filesz);
1062 static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
1072 ehdr = (Elf32_Ehdr *)fw->data;
1073 phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
1075 /* go through the available ELF segments */
1076 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1078 /* Only consider LOAD segments */
1079 if (phdr->p_type != PT_LOAD)
1083 * Check segment is contained within the fw->data buffer
1085 if (phdr->p_offset + phdr->p_filesz > fw->size) {
1087 "Segment %d is outside of firmware file\n", i);
1093 * MEMDMA IMEM has executable flag set, otherwise load
1094 * this segment into DMEM.
1098 if (phdr->p_flags & PF_X) {
1099 dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
1101 * The Slim ELF file uses 32-bit word addressing for
1104 dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1105 load_imem_segment(fei, phdr, fw, dst, i);
1107 dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
1109 * The Slim ELF file uses 32-bit word addressing for
1112 dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1113 load_dmem_segment(fei, phdr, fw, dst, i);
1117 release_firmware(fw);
1121 static int load_c8sectpfe_fw(struct c8sectpfei *fei)
1123 const struct firmware *fw;
1126 dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
1128 err = request_firmware(&fw, FIRMWARE_MEMDMA, fei->dev);
1132 err = c8sectpfe_elf_sanity_check(fei, fw);
1134 dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
1136 release_firmware(fw);
1140 err = load_slim_core_fw(fw, fei);
1142 dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
1146 /* now the firmware is loaded configure the input blocks */
1147 err = configure_channels(fei);
1149 dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
1154 * STBus target port can access IMEM and DMEM ports
1155 * without waiting for CPU
1157 writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
1159 dev_info(fei->dev, "Boot the memdma SLIM core\n");
1160 writel(0x1, fei->io + DMA_CPU_RUN);
1162 atomic_set(&fei->fw_loaded, 1);
1167 static const struct of_device_id c8sectpfe_match[] = {
1168 { .compatible = "st,stih407-c8sectpfe" },
1171 MODULE_DEVICE_TABLE(of, c8sectpfe_match);
1173 static struct platform_driver c8sectpfe_driver = {
1175 .name = "c8sectpfe",
1176 .of_match_table = of_match_ptr(c8sectpfe_match),
1178 .probe = c8sectpfe_probe,
1179 .remove = c8sectpfe_remove,
1182 module_platform_driver(c8sectpfe_driver);
1184 MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
1185 MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
1186 MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
1187 MODULE_LICENSE("GPL");