net/wan/fsl_ucc_hdlc: fix out of bounds write on array utdm_info
[platform/kernel/linux-rpi.git] / drivers / net / wan / fsl_ucc_hdlc.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Freescale QUICC Engine HDLC Device Driver
3  *
4  * Copyright 2016 Freescale Semiconductor Inc.
5  */
6
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/hdlc.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/netdevice.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/sched.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/stddef.h>
26 #include <soc/fsl/qe/qe_tdm.h>
27 #include <uapi/linux/if_arp.h>
28
29 #include "fsl_ucc_hdlc.h"
30
31 #define DRV_DESC "Freescale QE UCC HDLC Driver"
32 #define DRV_NAME "ucc_hdlc"
33
34 #define TDM_PPPOHT_SLIC_MAXIN
35 #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
36
37 static struct ucc_tdm_info utdm_primary_info = {
38         .uf_info = {
39                 .tsa = 0,
40                 .cdp = 0,
41                 .cds = 1,
42                 .ctsp = 1,
43                 .ctss = 1,
44                 .revd = 0,
45                 .urfs = 256,
46                 .utfs = 256,
47                 .urfet = 128,
48                 .urfset = 192,
49                 .utfet = 128,
50                 .utftt = 0x40,
51                 .ufpt = 256,
52                 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
53                 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
54                 .tenc = UCC_FAST_TX_ENCODING_NRZ,
55                 .renc = UCC_FAST_RX_ENCODING_NRZ,
56                 .tcrc = UCC_FAST_16_BIT_CRC,
57                 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
58         },
59
60         .si_info = {
61 #ifdef TDM_PPPOHT_SLIC_MAXIN
62                 .simr_rfsd = 1,
63                 .simr_tfsd = 2,
64 #else
65                 .simr_rfsd = 0,
66                 .simr_tfsd = 0,
67 #endif
68                 .simr_crt = 0,
69                 .simr_sl = 0,
70                 .simr_ce = 1,
71                 .simr_fe = 1,
72                 .simr_gm = 0,
73         },
74 };
75
76 static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
77
78 static int uhdlc_init(struct ucc_hdlc_private *priv)
79 {
80         struct ucc_tdm_info *ut_info;
81         struct ucc_fast_info *uf_info;
82         u32 cecr_subblock;
83         u16 bd_status;
84         int ret, i;
85         void *bd_buffer;
86         dma_addr_t bd_dma_addr;
87         u32 riptr;
88         u32 tiptr;
89         u32 gumr;
90
91         ut_info = priv->ut_info;
92         uf_info = &ut_info->uf_info;
93
94         if (priv->tsa) {
95                 uf_info->tsa = 1;
96                 uf_info->ctsp = 1;
97                 uf_info->cds = 1;
98                 uf_info->ctss = 1;
99         } else {
100                 uf_info->cds = 0;
101                 uf_info->ctsp = 0;
102                 uf_info->ctss = 0;
103         }
104
105         /* This sets HPM register in CMXUCR register which configures a
106          * open drain connected HDLC bus
107          */
108         if (priv->hdlc_bus)
109                 uf_info->brkpt_support = 1;
110
111         uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
112                                 UCC_HDLC_UCCE_TXB) << 16);
113
114         ret = ucc_fast_init(uf_info, &priv->uccf);
115         if (ret) {
116                 dev_err(priv->dev, "Failed to init uccf.");
117                 return ret;
118         }
119
120         priv->uf_regs = priv->uccf->uf_regs;
121         ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
122
123         /* Loopback mode */
124         if (priv->loopback) {
125                 dev_info(priv->dev, "Loopback Mode\n");
126                 /* use the same clock when work in loopback */
127                 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
128
129                 gumr = ioread32be(&priv->uf_regs->gumr);
130                 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
131                          UCC_FAST_GUMR_TCI);
132                 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
133                 iowrite32be(gumr, &priv->uf_regs->gumr);
134         }
135
136         /* Initialize SI */
137         if (priv->tsa)
138                 ucc_tdm_init(priv->utdm, priv->ut_info);
139
140         /* Write to QE CECR, UCCx channel to Stop Transmission */
141         cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
142         ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
143                            QE_CR_PROTOCOL_UNSPECIFIED, 0);
144
145         /* Set UPSMR normal mode (need fixed)*/
146         iowrite32be(0, &priv->uf_regs->upsmr);
147
148         /* hdlc_bus mode */
149         if (priv->hdlc_bus) {
150                 u32 upsmr;
151
152                 dev_info(priv->dev, "HDLC bus Mode\n");
153                 upsmr = ioread32be(&priv->uf_regs->upsmr);
154
155                 /* bus mode and retransmit enable, with collision window
156                  * set to 8 bytes
157                  */
158                 upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
159                                 UCC_HDLC_UPSMR_CW8;
160                 iowrite32be(upsmr, &priv->uf_regs->upsmr);
161
162                 /* explicitly disable CDS & CTSP */
163                 gumr = ioread32be(&priv->uf_regs->gumr);
164                 gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
165                 /* set automatic sync to explicitly ignore CD signal */
166                 gumr |= UCC_FAST_GUMR_SYNL_AUTO;
167                 iowrite32be(gumr, &priv->uf_regs->gumr);
168         }
169
170         priv->rx_ring_size = RX_BD_RING_LEN;
171         priv->tx_ring_size = TX_BD_RING_LEN;
172         /* Alloc Rx BD */
173         priv->rx_bd_base = dma_alloc_coherent(priv->dev,
174                         RX_BD_RING_LEN * sizeof(struct qe_bd),
175                         &priv->dma_rx_bd, GFP_KERNEL);
176
177         if (!priv->rx_bd_base) {
178                 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
179                 ret = -ENOMEM;
180                 goto free_uccf;
181         }
182
183         /* Alloc Tx BD */
184         priv->tx_bd_base = dma_alloc_coherent(priv->dev,
185                         TX_BD_RING_LEN * sizeof(struct qe_bd),
186                         &priv->dma_tx_bd, GFP_KERNEL);
187
188         if (!priv->tx_bd_base) {
189                 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
190                 ret = -ENOMEM;
191                 goto free_rx_bd;
192         }
193
194         /* Alloc parameter ram for ucc hdlc */
195         priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
196                                 ALIGNMENT_OF_UCC_HDLC_PRAM);
197
198         if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
199                 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
200                 ret = -ENOMEM;
201                 goto free_tx_bd;
202         }
203
204         priv->rx_skbuff = kcalloc(priv->rx_ring_size,
205                                   sizeof(*priv->rx_skbuff),
206                                   GFP_KERNEL);
207         if (!priv->rx_skbuff)
208                 goto free_ucc_pram;
209
210         priv->tx_skbuff = kcalloc(priv->tx_ring_size,
211                                   sizeof(*priv->tx_skbuff),
212                                   GFP_KERNEL);
213         if (!priv->tx_skbuff)
214                 goto free_rx_skbuff;
215
216         priv->skb_curtx = 0;
217         priv->skb_dirtytx = 0;
218         priv->curtx_bd = priv->tx_bd_base;
219         priv->dirty_tx = priv->tx_bd_base;
220         priv->currx_bd = priv->rx_bd_base;
221         priv->currx_bdnum = 0;
222
223         /* init parameter base */
224         cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
225         ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
226                            QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
227
228         priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
229                                         qe_muram_addr(priv->ucc_pram_offset);
230
231         /* Zero out parameter ram */
232         memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
233
234         /* Alloc riptr, tiptr */
235         riptr = qe_muram_alloc(32, 32);
236         if (IS_ERR_VALUE(riptr)) {
237                 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
238                 ret = -ENOMEM;
239                 goto free_tx_skbuff;
240         }
241
242         tiptr = qe_muram_alloc(32, 32);
243         if (IS_ERR_VALUE(tiptr)) {
244                 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
245                 ret = -ENOMEM;
246                 goto free_riptr;
247         }
248
249         /* Set RIPTR, TIPTR */
250         iowrite16be(riptr, &priv->ucc_pram->riptr);
251         iowrite16be(tiptr, &priv->ucc_pram->tiptr);
252
253         /* Set MRBLR */
254         iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
255
256         /* Set RBASE, TBASE */
257         iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
258         iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
259
260         /* Set RSTATE, TSTATE */
261         iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
262         iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
263
264         /* Set C_MASK, C_PRES for 16bit CRC */
265         iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
266         iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
267
268         iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
269         iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
270         iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
271         iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
272         iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
273         iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
274         iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
275         iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
276
277         /* Get BD buffer */
278         bd_buffer = dma_alloc_coherent(priv->dev,
279                                        (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
280                                        &bd_dma_addr, GFP_KERNEL);
281
282         if (!bd_buffer) {
283                 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
284                 ret = -ENOMEM;
285                 goto free_tiptr;
286         }
287
288         priv->rx_buffer = bd_buffer;
289         priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
290
291         priv->dma_rx_addr = bd_dma_addr;
292         priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
293
294         for (i = 0; i < RX_BD_RING_LEN; i++) {
295                 if (i < (RX_BD_RING_LEN - 1))
296                         bd_status = R_E_S | R_I_S;
297                 else
298                         bd_status = R_E_S | R_I_S | R_W_S;
299
300                 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
301                 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
302                             &priv->rx_bd_base[i].buf);
303         }
304
305         for (i = 0; i < TX_BD_RING_LEN; i++) {
306                 if (i < (TX_BD_RING_LEN - 1))
307                         bd_status =  T_I_S | T_TC_S;
308                 else
309                         bd_status =  T_I_S | T_TC_S | T_W_S;
310
311                 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
312                 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
313                             &priv->tx_bd_base[i].buf);
314         }
315
316         return 0;
317
318 free_tiptr:
319         qe_muram_free(tiptr);
320 free_riptr:
321         qe_muram_free(riptr);
322 free_tx_skbuff:
323         kfree(priv->tx_skbuff);
324 free_rx_skbuff:
325         kfree(priv->rx_skbuff);
326 free_ucc_pram:
327         qe_muram_free(priv->ucc_pram_offset);
328 free_tx_bd:
329         dma_free_coherent(priv->dev,
330                           TX_BD_RING_LEN * sizeof(struct qe_bd),
331                           priv->tx_bd_base, priv->dma_tx_bd);
332 free_rx_bd:
333         dma_free_coherent(priv->dev,
334                           RX_BD_RING_LEN * sizeof(struct qe_bd),
335                           priv->rx_bd_base, priv->dma_rx_bd);
336 free_uccf:
337         ucc_fast_free(priv->uccf);
338
339         return ret;
340 }
341
342 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
343 {
344         hdlc_device *hdlc = dev_to_hdlc(dev);
345         struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
346         struct qe_bd __iomem *bd;
347         u16 bd_status;
348         unsigned long flags;
349         u16 *proto_head;
350
351         switch (dev->type) {
352         case ARPHRD_RAWHDLC:
353                 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
354                         dev->stats.tx_dropped++;
355                         dev_kfree_skb(skb);
356                         netdev_err(dev, "No enough space for hdlc head\n");
357                         return -ENOMEM;
358                 }
359
360                 skb_push(skb, HDLC_HEAD_LEN);
361
362                 proto_head = (u16 *)skb->data;
363                 *proto_head = htons(DEFAULT_HDLC_HEAD);
364
365                 dev->stats.tx_bytes += skb->len;
366                 break;
367
368         case ARPHRD_PPP:
369                 proto_head = (u16 *)skb->data;
370                 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
371                         dev->stats.tx_dropped++;
372                         dev_kfree_skb(skb);
373                         netdev_err(dev, "Wrong ppp header\n");
374                         return -ENOMEM;
375                 }
376
377                 dev->stats.tx_bytes += skb->len;
378                 break;
379
380         case ARPHRD_ETHER:
381                 dev->stats.tx_bytes += skb->len;
382                 break;
383
384         default:
385                 dev->stats.tx_dropped++;
386                 dev_kfree_skb(skb);
387                 return -ENOMEM;
388         }
389         netdev_sent_queue(dev, skb->len);
390         spin_lock_irqsave(&priv->lock, flags);
391
392         /* Start from the next BD that should be filled */
393         bd = priv->curtx_bd;
394         bd_status = ioread16be(&bd->status);
395         /* Save the skb pointer so we can free it later */
396         priv->tx_skbuff[priv->skb_curtx] = skb;
397
398         /* Update the current skb pointer (wrapping if this was the last) */
399         priv->skb_curtx =
400             (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
401
402         /* copy skb data to tx buffer for sdma processing */
403         memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
404                skb->data, skb->len);
405
406         /* set bd status and length */
407         bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
408
409         iowrite16be(skb->len, &bd->length);
410         iowrite16be(bd_status, &bd->status);
411
412         /* Move to next BD in the ring */
413         if (!(bd_status & T_W_S))
414                 bd += 1;
415         else
416                 bd = priv->tx_bd_base;
417
418         if (bd == priv->dirty_tx) {
419                 if (!netif_queue_stopped(dev))
420                         netif_stop_queue(dev);
421         }
422
423         priv->curtx_bd = bd;
424
425         spin_unlock_irqrestore(&priv->lock, flags);
426
427         return NETDEV_TX_OK;
428 }
429
430 static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
431 {
432         u32 cecr_subblock;
433
434         cecr_subblock =
435                 ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
436
437         qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
438                      QE_CR_PROTOCOL_UNSPECIFIED, 0);
439         return 0;
440 }
441
442 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
443 {
444         /* Start from the next BD that should be filled */
445         struct net_device *dev = priv->ndev;
446         unsigned int bytes_sent = 0;
447         int howmany = 0;
448         struct qe_bd *bd;               /* BD pointer */
449         u16 bd_status;
450         int tx_restart = 0;
451
452         bd = priv->dirty_tx;
453         bd_status = ioread16be(&bd->status);
454
455         /* Normal processing. */
456         while ((bd_status & T_R_S) == 0) {
457                 struct sk_buff *skb;
458
459                 if (bd_status & T_UN_S) { /* Underrun */
460                         dev->stats.tx_fifo_errors++;
461                         tx_restart = 1;
462                 }
463                 if (bd_status & T_CT_S) { /* Carrier lost */
464                         dev->stats.tx_carrier_errors++;
465                         tx_restart = 1;
466                 }
467
468                 /* BD contains already transmitted buffer.   */
469                 /* Handle the transmitted buffer and release */
470                 /* the BD to be used with the current frame  */
471
472                 skb = priv->tx_skbuff[priv->skb_dirtytx];
473                 if (!skb)
474                         break;
475                 howmany++;
476                 bytes_sent += skb->len;
477                 dev->stats.tx_packets++;
478                 memset(priv->tx_buffer +
479                        (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
480                        0, skb->len);
481                 dev_consume_skb_irq(skb);
482
483                 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
484                 priv->skb_dirtytx =
485                     (priv->skb_dirtytx +
486                      1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
487
488                 /* We freed a buffer, so now we can restart transmission */
489                 if (netif_queue_stopped(dev))
490                         netif_wake_queue(dev);
491
492                 /* Advance the confirmation BD pointer */
493                 if (!(bd_status & T_W_S))
494                         bd += 1;
495                 else
496                         bd = priv->tx_bd_base;
497                 bd_status = ioread16be(&bd->status);
498         }
499         priv->dirty_tx = bd;
500
501         if (tx_restart)
502                 hdlc_tx_restart(priv);
503
504         netdev_completed_queue(dev, howmany, bytes_sent);
505         return 0;
506 }
507
508 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
509 {
510         struct net_device *dev = priv->ndev;
511         struct sk_buff *skb = NULL;
512         hdlc_device *hdlc = dev_to_hdlc(dev);
513         struct qe_bd *bd;
514         u16 bd_status;
515         u16 length, howmany = 0;
516         u8 *bdbuffer;
517
518         bd = priv->currx_bd;
519         bd_status = ioread16be(&bd->status);
520
521         /* while there are received buffers and BD is full (~R_E) */
522         while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
523                 if (bd_status & (RX_BD_ERRORS)) {
524                         dev->stats.rx_errors++;
525
526                         if (bd_status & R_CD_S)
527                                 dev->stats.collisions++;
528                         if (bd_status & R_OV_S)
529                                 dev->stats.rx_fifo_errors++;
530                         if (bd_status & R_CR_S)
531                                 dev->stats.rx_crc_errors++;
532                         if (bd_status & R_AB_S)
533                                 dev->stats.rx_over_errors++;
534                         if (bd_status & R_NO_S)
535                                 dev->stats.rx_frame_errors++;
536                         if (bd_status & R_LG_S)
537                                 dev->stats.rx_length_errors++;
538
539                         goto recycle;
540                 }
541                 bdbuffer = priv->rx_buffer +
542                         (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
543                 length = ioread16be(&bd->length);
544
545                 switch (dev->type) {
546                 case ARPHRD_RAWHDLC:
547                         bdbuffer += HDLC_HEAD_LEN;
548                         length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
549
550                         skb = dev_alloc_skb(length);
551                         if (!skb) {
552                                 dev->stats.rx_dropped++;
553                                 return -ENOMEM;
554                         }
555
556                         skb_put(skb, length);
557                         skb->len = length;
558                         skb->dev = dev;
559                         memcpy(skb->data, bdbuffer, length);
560                         break;
561
562                 case ARPHRD_PPP:
563                 case ARPHRD_ETHER:
564                         length -= HDLC_CRC_SIZE;
565
566                         skb = dev_alloc_skb(length);
567                         if (!skb) {
568                                 dev->stats.rx_dropped++;
569                                 return -ENOMEM;
570                         }
571
572                         skb_put(skb, length);
573                         skb->len = length;
574                         skb->dev = dev;
575                         memcpy(skb->data, bdbuffer, length);
576                         break;
577                 }
578
579                 dev->stats.rx_packets++;
580                 dev->stats.rx_bytes += skb->len;
581                 howmany++;
582                 if (hdlc->proto)
583                         skb->protocol = hdlc_type_trans(skb, dev);
584                 netif_receive_skb(skb);
585
586 recycle:
587                 iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
588
589                 /* update to point at the next bd */
590                 if (bd_status & R_W_S) {
591                         priv->currx_bdnum = 0;
592                         bd = priv->rx_bd_base;
593                 } else {
594                         if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
595                                 priv->currx_bdnum += 1;
596                         else
597                                 priv->currx_bdnum = RX_BD_RING_LEN - 1;
598
599                         bd += 1;
600                 }
601
602                 bd_status = ioread16be(&bd->status);
603         }
604
605         priv->currx_bd = bd;
606         return howmany;
607 }
608
609 static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
610 {
611         struct ucc_hdlc_private *priv = container_of(napi,
612                                                      struct ucc_hdlc_private,
613                                                      napi);
614         int howmany;
615
616         /* Tx event processing */
617         spin_lock(&priv->lock);
618         hdlc_tx_done(priv);
619         spin_unlock(&priv->lock);
620
621         howmany = 0;
622         howmany += hdlc_rx_done(priv, budget - howmany);
623
624         if (howmany < budget) {
625                 napi_complete_done(napi, howmany);
626                 qe_setbits32(priv->uccf->p_uccm,
627                              (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
628         }
629
630         return howmany;
631 }
632
633 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
634 {
635         struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
636         struct net_device *dev = priv->ndev;
637         struct ucc_fast_private *uccf;
638         struct ucc_tdm_info *ut_info;
639         u32 ucce;
640         u32 uccm;
641
642         ut_info = priv->ut_info;
643         uccf = priv->uccf;
644
645         ucce = ioread32be(uccf->p_ucce);
646         uccm = ioread32be(uccf->p_uccm);
647         ucce &= uccm;
648         iowrite32be(ucce, uccf->p_ucce);
649         if (!ucce)
650                 return IRQ_NONE;
651
652         if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
653                 if (napi_schedule_prep(&priv->napi)) {
654                         uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
655                                   << 16);
656                         iowrite32be(uccm, uccf->p_uccm);
657                         __napi_schedule(&priv->napi);
658                 }
659         }
660
661         /* Errors and other events */
662         if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
663                 dev->stats.rx_missed_errors++;
664         if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
665                 dev->stats.tx_errors++;
666
667         return IRQ_HANDLED;
668 }
669
670 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
671 {
672         const size_t size = sizeof(te1_settings);
673         te1_settings line;
674         struct ucc_hdlc_private *priv = netdev_priv(dev);
675
676         if (cmd != SIOCWANDEV)
677                 return hdlc_ioctl(dev, ifr, cmd);
678
679         switch (ifr->ifr_settings.type) {
680         case IF_GET_IFACE:
681                 ifr->ifr_settings.type = IF_IFACE_E1;
682                 if (ifr->ifr_settings.size < size) {
683                         ifr->ifr_settings.size = size; /* data size wanted */
684                         return -ENOBUFS;
685                 }
686                 memset(&line, 0, sizeof(line));
687                 line.clock_type = priv->clocking;
688
689                 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
690                         return -EFAULT;
691                 return 0;
692
693         default:
694                 return hdlc_ioctl(dev, ifr, cmd);
695         }
696 }
697
698 static int uhdlc_open(struct net_device *dev)
699 {
700         u32 cecr_subblock;
701         hdlc_device *hdlc = dev_to_hdlc(dev);
702         struct ucc_hdlc_private *priv = hdlc->priv;
703         struct ucc_tdm *utdm = priv->utdm;
704
705         if (priv->hdlc_busy != 1) {
706                 if (request_irq(priv->ut_info->uf_info.irq,
707                                 ucc_hdlc_irq_handler, 0, "hdlc", priv))
708                         return -ENODEV;
709
710                 cecr_subblock = ucc_fast_get_qe_cr_subblock(
711                                         priv->ut_info->uf_info.ucc_num);
712
713                 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
714                              QE_CR_PROTOCOL_UNSPECIFIED, 0);
715
716                 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
717
718                 /* Enable the TDM port */
719                 if (priv->tsa)
720                         utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
721
722                 priv->hdlc_busy = 1;
723                 netif_device_attach(priv->ndev);
724                 napi_enable(&priv->napi);
725                 netdev_reset_queue(dev);
726                 netif_start_queue(dev);
727                 hdlc_open(dev);
728         }
729
730         return 0;
731 }
732
733 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
734 {
735         qe_muram_free(priv->ucc_pram->riptr);
736         qe_muram_free(priv->ucc_pram->tiptr);
737
738         if (priv->rx_bd_base) {
739                 dma_free_coherent(priv->dev,
740                                   RX_BD_RING_LEN * sizeof(struct qe_bd),
741                                   priv->rx_bd_base, priv->dma_rx_bd);
742
743                 priv->rx_bd_base = NULL;
744                 priv->dma_rx_bd = 0;
745         }
746
747         if (priv->tx_bd_base) {
748                 dma_free_coherent(priv->dev,
749                                   TX_BD_RING_LEN * sizeof(struct qe_bd),
750                                   priv->tx_bd_base, priv->dma_tx_bd);
751
752                 priv->tx_bd_base = NULL;
753                 priv->dma_tx_bd = 0;
754         }
755
756         if (priv->ucc_pram) {
757                 qe_muram_free(priv->ucc_pram_offset);
758                 priv->ucc_pram = NULL;
759                 priv->ucc_pram_offset = 0;
760          }
761
762         kfree(priv->rx_skbuff);
763         priv->rx_skbuff = NULL;
764
765         kfree(priv->tx_skbuff);
766         priv->tx_skbuff = NULL;
767
768         if (priv->uf_regs) {
769                 iounmap(priv->uf_regs);
770                 priv->uf_regs = NULL;
771         }
772
773         if (priv->uccf) {
774                 ucc_fast_free(priv->uccf);
775                 priv->uccf = NULL;
776         }
777
778         if (priv->rx_buffer) {
779                 dma_free_coherent(priv->dev,
780                                   RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
781                                   priv->rx_buffer, priv->dma_rx_addr);
782                 priv->rx_buffer = NULL;
783                 priv->dma_rx_addr = 0;
784         }
785
786         if (priv->tx_buffer) {
787                 dma_free_coherent(priv->dev,
788                                   TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
789                                   priv->tx_buffer, priv->dma_tx_addr);
790                 priv->tx_buffer = NULL;
791                 priv->dma_tx_addr = 0;
792         }
793 }
794
795 static int uhdlc_close(struct net_device *dev)
796 {
797         struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
798         struct ucc_tdm *utdm = priv->utdm;
799         u32 cecr_subblock;
800
801         napi_disable(&priv->napi);
802         cecr_subblock = ucc_fast_get_qe_cr_subblock(
803                                 priv->ut_info->uf_info.ucc_num);
804
805         qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
806                      (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
807         qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
808                      (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
809
810         if (priv->tsa)
811                 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
812
813         ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
814
815         free_irq(priv->ut_info->uf_info.irq, priv);
816         netif_stop_queue(dev);
817         netdev_reset_queue(dev);
818         priv->hdlc_busy = 0;
819
820         return 0;
821 }
822
823 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
824                            unsigned short parity)
825 {
826         struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
827
828         if (encoding != ENCODING_NRZ &&
829             encoding != ENCODING_NRZI)
830                 return -EINVAL;
831
832         if (parity != PARITY_NONE &&
833             parity != PARITY_CRC32_PR1_CCITT &&
834             parity != PARITY_CRC16_PR0_CCITT &&
835             parity != PARITY_CRC16_PR1_CCITT)
836                 return -EINVAL;
837
838         priv->encoding = encoding;
839         priv->parity = parity;
840
841         return 0;
842 }
843
844 #ifdef CONFIG_PM
845 static void store_clk_config(struct ucc_hdlc_private *priv)
846 {
847         struct qe_mux *qe_mux_reg = &qe_immr->qmx;
848
849         /* store si clk */
850         priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
851         priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
852
853         /* store si sync */
854         priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
855
856         /* store ucc clk */
857         memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
858 }
859
860 static void resume_clk_config(struct ucc_hdlc_private *priv)
861 {
862         struct qe_mux *qe_mux_reg = &qe_immr->qmx;
863
864         memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
865
866         iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
867         iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
868
869         iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
870 }
871
872 static int uhdlc_suspend(struct device *dev)
873 {
874         struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
875         struct ucc_tdm_info *ut_info;
876         struct ucc_fast __iomem *uf_regs;
877
878         if (!priv)
879                 return -EINVAL;
880
881         if (!netif_running(priv->ndev))
882                 return 0;
883
884         netif_device_detach(priv->ndev);
885         napi_disable(&priv->napi);
886
887         ut_info = priv->ut_info;
888         uf_regs = priv->uf_regs;
889
890         /* backup gumr guemr*/
891         priv->gumr = ioread32be(&uf_regs->gumr);
892         priv->guemr = ioread8(&uf_regs->guemr);
893
894         priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
895                                         GFP_KERNEL);
896         if (!priv->ucc_pram_bak)
897                 return -ENOMEM;
898
899         /* backup HDLC parameter */
900         memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
901                       sizeof(struct ucc_hdlc_param));
902
903         /* store the clk configuration */
904         store_clk_config(priv);
905
906         /* save power */
907         ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
908
909         return 0;
910 }
911
912 static int uhdlc_resume(struct device *dev)
913 {
914         struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
915         struct ucc_tdm *utdm;
916         struct ucc_tdm_info *ut_info;
917         struct ucc_fast __iomem *uf_regs;
918         struct ucc_fast_private *uccf;
919         struct ucc_fast_info *uf_info;
920         int ret, i;
921         u32 cecr_subblock;
922         u16 bd_status;
923
924         if (!priv)
925                 return -EINVAL;
926
927         if (!netif_running(priv->ndev))
928                 return 0;
929
930         utdm = priv->utdm;
931         ut_info = priv->ut_info;
932         uf_info = &ut_info->uf_info;
933         uf_regs = priv->uf_regs;
934         uccf = priv->uccf;
935
936         /* restore gumr guemr */
937         iowrite8(priv->guemr, &uf_regs->guemr);
938         iowrite32be(priv->gumr, &uf_regs->gumr);
939
940         /* Set Virtual Fifo registers */
941         iowrite16be(uf_info->urfs, &uf_regs->urfs);
942         iowrite16be(uf_info->urfet, &uf_regs->urfet);
943         iowrite16be(uf_info->urfset, &uf_regs->urfset);
944         iowrite16be(uf_info->utfs, &uf_regs->utfs);
945         iowrite16be(uf_info->utfet, &uf_regs->utfet);
946         iowrite16be(uf_info->utftt, &uf_regs->utftt);
947         /* utfb, urfb are offsets from MURAM base */
948         iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
949         iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
950
951         /* Rx Tx and sync clock routing */
952         resume_clk_config(priv);
953
954         iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
955         iowrite32be(0xffffffff, &uf_regs->ucce);
956
957         ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
958
959         /* rebuild SIRAM */
960         if (priv->tsa)
961                 ucc_tdm_init(priv->utdm, priv->ut_info);
962
963         /* Write to QE CECR, UCCx channel to Stop Transmission */
964         cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
965         ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
966                            (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
967
968         /* Set UPSMR normal mode */
969         iowrite32be(0, &uf_regs->upsmr);
970
971         /* init parameter base */
972         cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
973         ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
974                            QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
975
976         priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
977                                 qe_muram_addr(priv->ucc_pram_offset);
978
979         /* restore ucc parameter */
980         memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
981                     sizeof(struct ucc_hdlc_param));
982         kfree(priv->ucc_pram_bak);
983
984         /* rebuild BD entry */
985         for (i = 0; i < RX_BD_RING_LEN; i++) {
986                 if (i < (RX_BD_RING_LEN - 1))
987                         bd_status = R_E_S | R_I_S;
988                 else
989                         bd_status = R_E_S | R_I_S | R_W_S;
990
991                 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
992                 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
993                             &priv->rx_bd_base[i].buf);
994         }
995
996         for (i = 0; i < TX_BD_RING_LEN; i++) {
997                 if (i < (TX_BD_RING_LEN - 1))
998                         bd_status =  T_I_S | T_TC_S;
999                 else
1000                         bd_status =  T_I_S | T_TC_S | T_W_S;
1001
1002                 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
1003                 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
1004                             &priv->tx_bd_base[i].buf);
1005         }
1006
1007         /* if hdlc is busy enable TX and RX */
1008         if (priv->hdlc_busy == 1) {
1009                 cecr_subblock = ucc_fast_get_qe_cr_subblock(
1010                                         priv->ut_info->uf_info.ucc_num);
1011
1012                 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
1013                              (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
1014
1015                 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
1016
1017                 /* Enable the TDM port */
1018                 if (priv->tsa)
1019                         utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
1020         }
1021
1022         napi_enable(&priv->napi);
1023         netif_device_attach(priv->ndev);
1024
1025         return 0;
1026 }
1027
1028 static const struct dev_pm_ops uhdlc_pm_ops = {
1029         .suspend = uhdlc_suspend,
1030         .resume = uhdlc_resume,
1031         .freeze = uhdlc_suspend,
1032         .thaw = uhdlc_resume,
1033 };
1034
1035 #define HDLC_PM_OPS (&uhdlc_pm_ops)
1036
1037 #else
1038
1039 #define HDLC_PM_OPS NULL
1040
1041 #endif
1042 static void uhdlc_tx_timeout(struct net_device *ndev)
1043 {
1044         netdev_err(ndev, "%s\n", __func__);
1045 }
1046
1047 static const struct net_device_ops uhdlc_ops = {
1048         .ndo_open       = uhdlc_open,
1049         .ndo_stop       = uhdlc_close,
1050         .ndo_start_xmit = hdlc_start_xmit,
1051         .ndo_do_ioctl   = uhdlc_ioctl,
1052         .ndo_tx_timeout = uhdlc_tx_timeout,
1053 };
1054
1055 static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
1056 {
1057         struct device_node *np;
1058         struct platform_device *pdev;
1059         struct resource *res;
1060         static int siram_init_flag;
1061         int ret = 0;
1062
1063         np = of_find_compatible_node(NULL, NULL, name);
1064         if (!np)
1065                 return -EINVAL;
1066
1067         pdev = of_find_device_by_node(np);
1068         if (!pdev) {
1069                 pr_err("%pOFn: failed to lookup pdev\n", np);
1070                 of_node_put(np);
1071                 return -EINVAL;
1072         }
1073
1074         of_node_put(np);
1075         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1076         if (!res) {
1077                 ret = -EINVAL;
1078                 goto error_put_device;
1079         }
1080         *ptr = ioremap(res->start, resource_size(res));
1081         if (!*ptr) {
1082                 ret = -ENOMEM;
1083                 goto error_put_device;
1084         }
1085
1086         /* We've remapped the addresses, and we don't need the device any
1087          * more, so we should release it.
1088          */
1089         put_device(&pdev->dev);
1090
1091         if (init_flag && siram_init_flag == 0) {
1092                 memset_io(*ptr, 0, resource_size(res));
1093                 siram_init_flag = 1;
1094         }
1095         return  0;
1096
1097 error_put_device:
1098         put_device(&pdev->dev);
1099
1100         return ret;
1101 }
1102
1103 static int ucc_hdlc_probe(struct platform_device *pdev)
1104 {
1105         struct device_node *np = pdev->dev.of_node;
1106         struct ucc_hdlc_private *uhdlc_priv = NULL;
1107         struct ucc_tdm_info *ut_info;
1108         struct ucc_tdm *utdm = NULL;
1109         struct resource res;
1110         struct net_device *dev;
1111         hdlc_device *hdlc;
1112         int ucc_num;
1113         const char *sprop;
1114         int ret;
1115         u32 val;
1116
1117         ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1118         if (ret) {
1119                 dev_err(&pdev->dev, "Invalid ucc property\n");
1120                 return -ENODEV;
1121         }
1122
1123         ucc_num = val - 1;
1124         if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
1125                 dev_err(&pdev->dev, ": Invalid UCC num\n");
1126                 return -EINVAL;
1127         }
1128
1129         memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1130                sizeof(utdm_primary_info));
1131
1132         ut_info = &utdm_info[ucc_num];
1133         ut_info->uf_info.ucc_num = ucc_num;
1134
1135         sprop = of_get_property(np, "rx-clock-name", NULL);
1136         if (sprop) {
1137                 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1138                 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1139                     (ut_info->uf_info.rx_clock > QE_CLK24)) {
1140                         dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1141                         return -EINVAL;
1142                 }
1143         } else {
1144                 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1145                 return -EINVAL;
1146         }
1147
1148         sprop = of_get_property(np, "tx-clock-name", NULL);
1149         if (sprop) {
1150                 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1151                 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1152                     (ut_info->uf_info.tx_clock > QE_CLK24)) {
1153                         dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1154                         return -EINVAL;
1155                 }
1156         } else {
1157                 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1158                 return -EINVAL;
1159         }
1160
1161         ret = of_address_to_resource(np, 0, &res);
1162         if (ret)
1163                 return -EINVAL;
1164
1165         ut_info->uf_info.regs = res.start;
1166         ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1167
1168         uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1169         if (!uhdlc_priv) {
1170                 return -ENOMEM;
1171         }
1172
1173         dev_set_drvdata(&pdev->dev, uhdlc_priv);
1174         uhdlc_priv->dev = &pdev->dev;
1175         uhdlc_priv->ut_info = ut_info;
1176
1177         if (of_get_property(np, "fsl,tdm-interface", NULL))
1178                 uhdlc_priv->tsa = 1;
1179
1180         if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1181                 uhdlc_priv->loopback = 1;
1182
1183         if (of_get_property(np, "fsl,hdlc-bus", NULL))
1184                 uhdlc_priv->hdlc_bus = 1;
1185
1186         if (uhdlc_priv->tsa == 1) {
1187                 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1188                 if (!utdm) {
1189                         ret = -ENOMEM;
1190                         dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1191                         goto free_uhdlc_priv;
1192                 }
1193                 uhdlc_priv->utdm = utdm;
1194                 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1195                 if (ret)
1196                         goto free_utdm;
1197
1198                 ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
1199                                      (void __iomem **)&utdm->si_regs);
1200                 if (ret)
1201                         goto free_utdm;
1202                 ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1203                                      (void __iomem **)&utdm->siram);
1204                 if (ret)
1205                         goto unmap_si_regs;
1206         }
1207
1208         if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
1209                 uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
1210
1211         ret = uhdlc_init(uhdlc_priv);
1212         if (ret) {
1213                 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1214                 goto undo_uhdlc_init;
1215         }
1216
1217         dev = alloc_hdlcdev(uhdlc_priv);
1218         if (!dev) {
1219                 ret = -ENOMEM;
1220                 pr_err("ucc_hdlc: unable to allocate memory\n");
1221                 goto undo_uhdlc_init;
1222         }
1223
1224         uhdlc_priv->ndev = dev;
1225         hdlc = dev_to_hdlc(dev);
1226         dev->tx_queue_len = 16;
1227         dev->netdev_ops = &uhdlc_ops;
1228         dev->watchdog_timeo = 2 * HZ;
1229         hdlc->attach = ucc_hdlc_attach;
1230         hdlc->xmit = ucc_hdlc_tx;
1231         netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1232         if (register_hdlc_device(dev)) {
1233                 ret = -ENOBUFS;
1234                 pr_err("ucc_hdlc: unable to register hdlc device\n");
1235                 goto free_dev;
1236         }
1237
1238         return 0;
1239
1240 free_dev:
1241         free_netdev(dev);
1242 undo_uhdlc_init:
1243         iounmap(utdm->siram);
1244 unmap_si_regs:
1245         iounmap(utdm->si_regs);
1246 free_utdm:
1247         if (uhdlc_priv->tsa)
1248                 kfree(utdm);
1249 free_uhdlc_priv:
1250         kfree(uhdlc_priv);
1251         return ret;
1252 }
1253
1254 static int ucc_hdlc_remove(struct platform_device *pdev)
1255 {
1256         struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1257
1258         uhdlc_memclean(priv);
1259
1260         if (priv->utdm->si_regs) {
1261                 iounmap(priv->utdm->si_regs);
1262                 priv->utdm->si_regs = NULL;
1263         }
1264
1265         if (priv->utdm->siram) {
1266                 iounmap(priv->utdm->siram);
1267                 priv->utdm->siram = NULL;
1268         }
1269         kfree(priv);
1270
1271         dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1272
1273         return 0;
1274 }
1275
1276 static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1277         {
1278         .compatible = "fsl,ucc-hdlc",
1279         },
1280         {},
1281 };
1282
1283 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1284
1285 static struct platform_driver ucc_hdlc_driver = {
1286         .probe  = ucc_hdlc_probe,
1287         .remove = ucc_hdlc_remove,
1288         .driver = {
1289                 .name           = DRV_NAME,
1290                 .pm             = HDLC_PM_OPS,
1291                 .of_match_table = fsl_ucc_hdlc_of_match,
1292         },
1293 };
1294
1295 module_platform_driver(ucc_hdlc_driver);
1296 MODULE_LICENSE("GPL");