1 /* /linux/drivers/misc/modem_if/modem_io_device.c
3 * Copyright (C) 2010 Google, Inc.
4 * Copyright (C) 2010 Samsung Electronics.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/poll.h>
21 #include <linux/irq.h>
22 #include <linux/gpio.h>
23 #include <linux/if_arp.h>
25 #include <linux/if_ether.h>
26 #include <linux/etherdevice.h>
27 #include <linux/device.h>
29 #include <linux/platform_data/modem_tizen.h>
30 #ifdef CONFIG_LINK_DEVICE_C2C
31 #include <linux/platform_data/c2c.h>
33 #include "modem_prj.h"
34 #include "modem_utils.h"
37 * MAX_RXDATA_SIZE is used at making skb, when it called with page size
38 * it need more bytes to allocate itself (Ex, cache byte, shared info,
40 * So, give restriction to allocation size below 1 page to prevent
43 #define MAX_RXDATA_SIZE 0x0E00 /* 4 * 1024 - 512 */
44 #define MAX_MULTI_FMT_SIZE 0x4000 /* 16 * 1024 */
46 static const char hdlc_start[1] = { HDLC_START };
47 static const char hdlc_end[1] = { HDLC_END };
49 static int rx_iodev_skb(struct sk_buff *skb);
51 static ssize_t show_waketime(struct device *dev,
52 struct device_attribute *attr, char *buf)
56 struct miscdevice *miscdev = dev_get_drvdata(dev);
57 struct io_device *iod = container_of(miscdev, struct io_device,
60 msec = jiffies_to_msecs(iod->waketime);
62 p += sprintf(buf, "raw waketime : %ums\n", msec);
67 static ssize_t store_waketime(struct device *dev,
68 struct device_attribute *attr, const char *buf, size_t count)
72 struct miscdevice *miscdev = dev_get_drvdata(dev);
73 struct io_device *iod = container_of(miscdev, struct io_device,
76 ret = strict_strtoul(buf, 10, &msec);
80 iod->waketime = msecs_to_jiffies(msec);
85 static struct device_attribute attr_waketime =
86 __ATTR(waketime, S_IRUGO | S_IWUSR, show_waketime, store_waketime);
88 static ssize_t show_loopback(struct device *dev,
89 struct device_attribute *attr, char *buf)
91 struct miscdevice *miscdev = dev_get_drvdata(dev);
92 struct modem_shared *msd =
93 container_of(miscdev, struct io_device, miscdev)->msd;
94 unsigned char *ip = (unsigned char *)&msd->loopback_ipaddr;
97 p += sprintf(buf, "%u.%u.%u.%u\n", ip[0], ip[1], ip[2], ip[3]);
102 static ssize_t store_loopback(struct device *dev,
103 struct device_attribute *attr, const char *buf, size_t count)
105 struct miscdevice *miscdev = dev_get_drvdata(dev);
106 struct modem_shared *msd =
107 container_of(miscdev, struct io_device, miscdev)->msd;
109 msd->loopback_ipaddr = ipv4str_to_be32(buf, count);
114 static struct device_attribute attr_loopback =
115 __ATTR(loopback, S_IRUGO | S_IWUSR, show_loopback, store_loopback);
117 static int get_header_size(struct io_device *iod)
119 switch (iod->format) {
120 #if 0 /*for tizen modem*/
123 return sizeof(struct raw_hdr);
126 /* minimum size for transaction align */
137 static int get_hdlc_size(struct io_device *iod, char *buf)
139 struct fmt_hdr *fmt_header;
140 struct raw_hdr *raw_header;
141 struct rfs_hdr *rfs_header;
143 mif_debug("buf : %02x %02x %02x (%d)\n", *buf, *(buf + 1),
144 *(buf + 2), __LINE__);
146 switch (iod->format) {
147 #if 0 /*for tizen modem*/
150 raw_header = (struct raw_hdr *)buf;
151 return raw_header->len;
159 static void *get_header(struct io_device *iod, size_t count,
160 char *frame_header_buf)
162 struct fmt_hdr *fmt_h;
163 struct raw_hdr *raw_h;
164 struct rfs_hdr *rfs_h;
166 switch (iod->format) {
167 #if 0 /*for tizen modem*/
170 raw_h = (struct raw_hdr *)frame_header_buf;
172 raw_h->len = count + sizeof(struct raw_hdr);
173 raw_h->channel = iod->id & 0x1F;
176 return (void *)frame_header_buf;
183 static inline int calc_padding_size(struct io_device *iod,
184 struct link_device *ld, unsigned len)
187 return (4 - (len & 0x3)) & 0x3;
192 static inline int rx_hdlc_head_start_check(char *buf)
194 /* check hdlc head and return size of start byte */
195 return (buf[0] == HDLC_START) ? SIZE_OF_HDLC_START : -EBADMSG;
198 static inline int rx_hdlc_tail_check(char *buf)
200 /* check hdlc tail and return size of tail byte */
201 return (buf[0] == HDLC_END) ? SIZE_OF_HDLC_END : -EBADMSG;
204 /* remove hdlc header and store IPC header */
205 static int rx_hdlc_head_check(struct io_device *iod, struct link_device *ld,
206 char *buf, unsigned rest)
208 struct header_data *hdr = &fragdata(iod, ld)->h_data;
209 int head_size = get_header_size(iod);
213 /* first frame, remove start header 7F */
215 len = rx_hdlc_head_start_check(buf);
217 mif_err("Wrong HDLC start: 0x%x\n", *buf);
218 return len; /*Wrong hdlc start*/
221 mif_debug("check len : %d, rest : %d (%d)\n", len,
224 /* set the start flag of current packet */
225 hdr->start = HDLC_START;
229 switch (iod->format) {
234 /* TODO: print buf... */
245 rest -= len; /* rest, call by value */
248 mif_debug("check len : %d, rest : %d (%d)\n",
249 len, rest, __LINE__);
251 /* store the HDLC header to iod priv */
252 if (hdr->len < head_size) {
253 len = min(rest, head_size - hdr->len);
254 memcpy(hdr->hdr + hdr->len, buf, len);
259 mif_debug("check done_len : %d, rest : %d (%d)\n", done_len,
264 /* alloc skb and copy data to skb */
265 static int rx_hdlc_data_check(struct io_device *iod, struct link_device *ld,
266 char *buf, unsigned rest)
268 struct header_data *hdr = &fragdata(iod, ld)->h_data;
269 struct sk_buff *skb = fragdata(iod, ld)->skb_recv;
270 int head_size = get_header_size(iod);
271 int data_size = get_hdlc_size(iod, hdr->hdr) - head_size;
275 int rest_len = data_size - hdr->frag_len;
276 int continue_len = fragdata(iod, ld)->realloc_offset;
278 mif_debug("head_size : %d, data_size : %d (%d)\n", head_size,
279 data_size, __LINE__);
282 /* check the HDLC header*/
283 if (rx_hdlc_head_start_check(buf) == SIZE_OF_HDLC_START) {
284 rest_len -= (head_size + SIZE_OF_HDLC_START);
285 continue_len += (head_size + SIZE_OF_HDLC_START);
289 rest -= continue_len;
290 done_len += continue_len;
291 fragdata(iod, ld)->realloc_offset = 0;
293 mif_debug("realloc_offset = %d\n", continue_len);
296 /* first payload data - alloc skb */
298 /* make skb data size under MAX_RXDATA_SIZE */
299 alloc_size = min(data_size, MAX_RXDATA_SIZE);
300 alloc_size = min(alloc_size, rest_len);
302 /* allocate first packet for data, when its size exceed
303 * MAX_RXDATA_SIZE, this packet will split to
306 skb = rx_alloc_skb(alloc_size, iod, ld);
307 if (unlikely(!skb)) {
308 fragdata(iod, ld)->realloc_offset = continue_len;
311 fragdata(iod, ld)->skb_recv = skb;
315 /* copy length cannot exceed rest_len */
316 len = min_t(int, rest_len, rest);
317 /* copy length should be under skb tailroom size */
318 len = min(len, skb_tailroom(skb));
319 /* when skb tailroom is bigger than MAX_RXDATA_SIZE
320 * restrict its size to MAX_RXDATA_SIZE just for convinience */
321 len = min(len, MAX_RXDATA_SIZE);
323 /* copy bytes to skb */
324 memcpy(skb_put(skb, len), buf, len);
326 /* adjusting variables */
331 hdr->frag_len += len;
333 /* check if it is final for this packet sequence */
334 if (!rest_len || !rest)
337 /* more bytes are remain for this packet sequence
338 * pass fully loaded skb to rx queue
339 * and allocate another skb for continues data recv chain
342 fragdata(iod, ld)->skb_recv = NULL;
344 alloc_size = min(rest_len, MAX_RXDATA_SIZE);
346 skb = rx_alloc_skb(alloc_size, iod, ld);
347 if (unlikely(!skb)) {
348 fragdata(iod, ld)->realloc_offset = done_len;
351 fragdata(iod, ld)->skb_recv = skb;
354 mif_debug("rest : %d, alloc_size : %d , len : %d (%d)\n",
355 rest, alloc_size, skb->len, __LINE__);
360 static int rx_multi_fmt_frame(struct sk_buff *rx_skb)
362 struct io_device *iod = skbpriv(rx_skb)->iod;
363 struct link_device *ld = skbpriv(rx_skb)->ld;
365 (struct fmt_hdr *)fragdata(iod, ld)->h_data.hdr;
366 unsigned int id = fh->control & 0x7F;
367 struct sk_buff *skb = iod->skb[id];
368 unsigned char *data = fragdata(iod, ld)->skb_recv->data;
369 unsigned int rcvd = fragdata(iod, ld)->skb_recv->len;
372 /* If there has been no multiple frame with this ID */
373 if (!(fh->control & 0x80)) {
374 /* It is a single frame because the "more" bit is 0. */
376 mif_err("\n<%s> Rx FMT frame (len %d)\n",
378 print_sipc4_fmt_frame(data);
381 skb_queue_tail(&iod->sk_rx_q,
382 fragdata(iod, ld)->skb_recv);
383 mif_debug("wake up wq of %s\n", iod->name);
387 struct fmt_hdr *fh = NULL;
388 skb = rx_alloc_skb(MAX_MULTI_FMT_SIZE, iod, ld);
390 mif_err("<%d> alloc_skb fail\n",
396 fh = (struct fmt_hdr *)data;
397 mif_info("Start multi-frame (ID %d, len %d)",
402 /* Start multi-frame processing */
404 memcpy(skb_put(skb, rcvd), data, rcvd);
405 dev_kfree_skb_any(fragdata(iod, ld)->skb_recv);
407 if (fh->control & 0x80) {
408 /* The last frame has not arrived yet. */
409 mif_info("Receiving (ID %d, %d bytes)\n",
412 /* It is the last frame because the "more" bit is 0. */
413 mif_info("The Last (ID %d, %d bytes received)\n",
416 mif_err("\n<%s> Rx FMT frame (len %d)\n",
417 iod->name, skb->len);
418 print_sipc4_fmt_frame(skb->data);
421 skb_queue_tail(&iod->sk_rx_q, skb);
423 mif_info("wake up wq of %s\n", iod->name);
430 static int rx_multi_fmt_frame_sipc42(struct sk_buff *rx_skb)
432 struct io_device *iod = skbpriv(rx_skb)->iod;
433 struct link_device *ld = skbpriv(rx_skb)->ld;
435 (struct fmt_hdr *)fragdata(iod, ld)->h_data.hdr;
436 unsigned int id = fh->control & 0x7F;
437 struct sk_buff *skb = iod->skb[id];
438 unsigned char *data = fragdata(iod, ld)->skb_recv->data;
439 unsigned int rcvd = fragdata(iod, ld)->skb_recv->len;
442 struct io_device *real_iod = NULL;
444 ch = (fh->len & 0xC000) >> 14;
445 fh->len = fh->len & 0x3FFF;
446 real_iod = ld->fmt_iods[ch];
448 mif_err("wrong channel %d\n", ch);
451 skbpriv(rx_skb)->real_iod = real_iod;
454 /* If there has been no multiple frame with this ID */
455 if (!(fh->control & 0x80)) {
456 /* It is a single frame because the "more" bit is 0. */
458 mif_err("\n<%s> Rx FMT frame (len %d)\n",
460 print_sipc4_fmt_frame(data);
463 skb_queue_tail(&real_iod->sk_rx_q,
464 fragdata(iod, ld)->skb_recv);
465 mif_debug("wake up wq of %s\n", iod->name);
466 wake_up(&real_iod->wq);
469 struct fmt_hdr *fh = NULL;
470 skb = rx_alloc_skb(MAX_MULTI_FMT_SIZE, real_iod, ld);
472 mif_err("alloc_skb fail\n");
475 real_iod->skb[id] = skb;
477 fh = (struct fmt_hdr *)data;
478 mif_err("Start multi-frame (ID %d, len %d)",
483 /* Start multi-frame processing */
485 memcpy(skb_put(skb, rcvd), data, rcvd);
486 dev_kfree_skb_any(fragdata(real_iod, ld)->skb_recv);
488 if (fh->control & 0x80) {
489 /* The last frame has not arrived yet. */
490 mif_err("Receiving (ID %d, %d bytes)\n",
493 /* It is the last frame because the "more" bit is 0. */
494 mif_err("The Last (ID %d, %d bytes received)\n",
497 mif_err("\n<%s> Rx FMT frame (len %d)\n",
498 iod->name, skb->len);
499 print_sipc4_fmt_frame(skb->data);
502 skb_queue_tail(&real_iod->sk_rx_q, skb);
503 real_iod->skb[id] = NULL;
504 mif_info("wake up wq of %s\n", real_iod->name);
505 wake_up(&real_iod->wq);
511 static int rx_iodev_skb_raw(struct sk_buff *skb)
514 struct io_device *iod = skbpriv(skb)->iod;
515 struct net_device *ndev = NULL;
516 struct iphdr *ip_header = NULL;
517 struct ethhdr *ehdr = NULL;
518 const char source[ETH_ALEN] = SOURCE_MAC_ADDR;
520 /* check the real_iod is open? */
522 if (atomic_read(&iod->opened) == 0) {
523 mif_err("<%s> is not opened.\n",
525 pr_skb("drop packet", skb);
530 switch (iod->io_typ) {
532 mif_debug("<%s> sk_rx_q.qlen = %d\n",
533 iod->name, iod->sk_rx_q.qlen);
534 skb_queue_tail(&iod->sk_rx_q, skb);
539 pr_skb("rx_iodev_skb_raw", skb);
542 mif_err("<%s> ndev == NULL",
548 ndev->stats.rx_packets++;
549 ndev->stats.rx_bytes += skb->len;
551 /* check the version of IP */
552 ip_header = (struct iphdr *)skb->data;
553 if (ip_header->version == IP6VERSION)
554 skb->protocol = htons(ETH_P_IPV6);
556 skb->protocol = htons(ETH_P_IP);
558 if (iod->use_handover) {
559 skb_push(skb, sizeof(struct ethhdr));
560 ehdr = (void *)skb->data;
561 memcpy(ehdr->h_dest, ndev->dev_addr, ETH_ALEN);
562 memcpy(ehdr->h_source, source, ETH_ALEN);
563 ehdr->h_proto = skb->protocol;
564 skb->ip_summed = CHECKSUM_UNNECESSARY;
565 skb_reset_mac_header(skb);
567 skb_pull(skb, sizeof(struct ethhdr));
573 err = netif_rx_ni(skb);
575 if (err != NET_RX_SUCCESS)
576 dev_err(&ndev->dev, "rx error: %d\n", err);
581 mif_err("wrong io_type : %d\n", iod->io_typ);
586 static void rx_iodev_work(struct work_struct *work)
589 struct sk_buff *skb = NULL;
590 struct io_device *iod = container_of(work, struct io_device,
593 while ((skb = skb_dequeue(&iod->sk_rx_q)) != NULL) {
594 ret = rx_iodev_skb_raw(skb);
596 mif_err("<%s> rx_iodev_skb_raw err = %d",
598 dev_kfree_skb_any(skb);
599 } else if (ret == NET_RX_DROP) {
600 mif_err("<%s> ret == NET_RX_DROP\n",
602 schedule_delayed_work(&iod->rx_work,
603 msecs_to_jiffies(100));
609 static int rx_multipdp(struct sk_buff *skb)
612 struct io_device *iod = skbpriv(skb)->iod;
613 struct link_device *ld = skbpriv(skb)->ld;
614 struct raw_hdr *raw_header =
615 (struct raw_hdr *)fragdata(iod, ld)->h_data.hdr;
616 struct io_device *real_iod = NULL;
618 ch = raw_header->channel;
619 if (ch == DATA_LOOPBACK_CHANNEL && ld->msd->loopback_ipaddr)
622 real_iod = link_get_iod_with_channel(ld, 0x20 | ch);
624 mif_err("wrong channel %d\n", ch);
628 skbpriv(skb)->real_iod = real_iod;
629 skb_queue_tail(&iod->sk_rx_q, skb);
630 mif_debug("sk_rx_qlen:%d\n", iod->sk_rx_q.qlen);
632 schedule_delayed_work(&iod->rx_work, 0);
636 /* de-mux function draft */
637 static int rx_iodev_skb(struct sk_buff *skb)
639 struct io_device *iod = skbpriv(skb)->iod;
641 switch (iod->format) {
644 skb_queue_tail(&iod->sk_rx_q, skb);
645 mif_debug("sk_rx_qlen:%d\n", iod->sk_rx_q.qlen);
647 schedule_delayed_work(&iod->rx_work, 0);
651 skb_queue_tail(&iod->sk_rx_q, skb);
652 mif_debug("wake up wq of %s\n", iod->name);
658 static int rx_hdlc_packet(struct io_device *iod, struct link_device *ld,
659 const char *data, unsigned recv_size)
661 int rest = (int)recv_size;
662 char *buf = (char *)data;
670 mif_debug("RX_SIZE = %d, ld: %s\n", rest, ld->name);
672 if (fragdata(iod, ld)->h_data.frag_len) {
674 If the fragdata(iod, ld)->h_data.frag_len field is
675 not zero, there is a HDLC frame that is waiting for more data
676 or HDLC_END in the skb (fragdata(iod, ld)->skb_recv).
677 In this case, rx_hdlc_head_check() must be skipped.
683 err = len = rx_hdlc_head_check(iod, ld, buf, rest);
686 mif_debug("check len : %d, rest : %d (%d)\n", len, rest,
696 If the return value of rx_hdlc_data_check() is zero, there remains
697 only HDLC_END that will be received.
699 err = len = rx_hdlc_data_check(iod, ld, buf, rest);
702 mif_debug("check len : %d, rest : %d (%d)\n", len, rest,
708 if (!rest && fragdata(iod, ld)->h_data.frag_len) {
710 Data is being received and more data or HDLC_END does not
711 arrive yet, but there is no more data in the buffer. More
712 data may come within the next frame from the link device.
715 } else if (rest <= 0)
718 /* At this point, one HDLC frame except HDLC_END has been received. */
720 err = len = rx_hdlc_tail_check(buf);
722 mif_err("Wrong HDLC end: 0x%02X\n", *buf);
725 mif_debug("check len : %d, rest : %d (%d)\n", len, rest,
730 /* At this point, one complete HDLC frame has been received. */
733 The padding size is applied for the next HDLC frame. Zero will be
734 returned by calc_padding_size() if the link device does not require
735 4-byte aligned access.
737 rcvd = get_hdlc_size(iod, fragdata(iod, ld)->h_data.hdr) +
738 (SIZE_OF_HDLC_START + SIZE_OF_HDLC_END);
739 len = calc_padding_size(iod, ld, rcvd);
745 err = rx_iodev_skb(fragdata(iod, ld)->skb_recv);
749 /* initialize header & skb */
750 fragdata(iod, ld)->skb_recv = NULL;
751 memset(&fragdata(iod, ld)->h_data, 0x00,
752 sizeof(struct header_data));
753 fragdata(iod, ld)->realloc_offset = 0;
759 /* free buffers. mipi-hsi re-use recv buf */
764 if (err == -ENOMEM) {
765 if (!(fragdata(iod, ld)->h_data.frag_len))
766 memset(&fragdata(iod, ld)->h_data, 0x00,
767 sizeof(struct header_data));
771 if (err < 0 && fragdata(iod, ld)->skb_recv) {
772 dev_kfree_skb_any(fragdata(iod, ld)->skb_recv);
773 fragdata(iod, ld)->skb_recv = NULL;
776 memset(&fragdata(iod, ld)->h_data, 0x00,
777 sizeof(struct header_data));
778 fragdata(iod, ld)->realloc_offset = 0;
784 static int rx_rfs_packet(struct io_device *iod, struct link_device *ld,
785 const char *data, unsigned size)
792 if (data[0] != HDLC_START) {
793 mif_err("Dropping RFS packet ... "
794 "size = %d, start = %02X %02X %02X %02X\n",
796 data[0], data[1], data[2], data[3]);
800 if (data[size-1] != HDLC_END) {
801 for (pad = 1; pad < 4; pad++)
802 if (data[(size-1)-pad] == HDLC_END)
806 char *b = (char *)data;
808 mif_err("size %d, No END_FLAG!!!\n", size);
809 mif_err("end = %02X %02X %02X %02X\n",
810 b[sz-4], b[sz-3], b[sz-2], b[sz-1]);
813 mif_info("padding = %d\n", pad);
817 skb = rx_alloc_skb(size, iod, ld);
818 if (unlikely(!skb)) {
819 mif_err("alloc_skb fail\n");
823 /* copy the RFS haeder to skb->data */
824 rcvd = size - sizeof(hdlc_start) - sizeof(hdlc_end) - pad;
825 memcpy(skb_put(skb, rcvd), ((char *)data + sizeof(hdlc_start)), rcvd);
827 fragdata(iod, ld)->skb_recv = skb;
828 err = rx_iodev_skb(fragdata(iod, ld)->skb_recv);
833 /* called from link device when a packet arrives for this io device */
834 static int io_dev_recv_data_from_link_dev(struct io_device *iod,
835 struct link_device *ld, const char *data, unsigned int len)
839 unsigned int alloc_size, rest_len;
842 switch (iod->format) {
846 wake_lock_timeout(&iod->wakelock, iod->waketime);
847 err = rx_iodev_skb((struct sk_buff *)data);
849 mif_err("fail process RX ether packet\n");
854 /* alloc 3.5K a page.. in case of BYPASS,RFS */
855 /* should be smaller than user alloc size */
856 if (len >= MAX_RXDATA_SIZE)
857 mif_info("(%d)more than 3.5K, alloc 3.5K pages\n", len);
861 alloc_size = min_t(unsigned int, MAX_RXDATA_SIZE,
863 skb = rx_alloc_skb(alloc_size, iod, ld);
865 mif_err("fail alloc skb (%d)\n", __LINE__);
868 mif_debug("bypass/rfs len : %d\n", alloc_size);
870 memcpy(skb_put(skb, alloc_size), cur, alloc_size);
871 skb_queue_tail(&iod->sk_rx_q, skb);
872 mif_debug("skb len : %d\n", skb->len);
874 rest_len -= alloc_size;
881 /* save packet to sk_buff */
882 skb = rx_alloc_skb(len, iod, ld);
884 mif_debug("boot len : %d\n", len);
886 memcpy(skb_put(skb, len), data, len);
887 skb_queue_tail(&iod->sk_rx_q, skb);
888 mif_debug("skb len : %d\n", skb->len);
893 /* 32KB page alloc fail case, alloc 3.5K a page.. */
894 mif_info("(%d)page fail, alloc fragment pages\n", len);
899 alloc_size = min_t(unsigned int, MAX_RXDATA_SIZE,
901 skb = rx_alloc_skb(alloc_size, iod, ld);
903 mif_err("fail alloc skb (%d)\n", __LINE__);
906 mif_debug("boot len : %d\n", alloc_size);
908 memcpy(skb_put(skb, alloc_size), cur, alloc_size);
909 skb_queue_tail(&iod->sk_rx_q, skb);
910 mif_debug("skb len : %d\n", skb->len);
912 rest_len -= alloc_size;
923 /* inform the IO device that the modem is now online or offline or
924 * crashing or whatever...
926 static void io_dev_modem_state_changed(struct io_device *iod,
927 enum modem_state state)
929 iod->mc->phone_state = state;
930 mif_err("modem state changed. (iod: %s, state: %d)\n",
933 if ((state == STATE_CRASH_RESET) || (state == STATE_CRASH_EXIT)
934 || (state == STATE_NV_REBUILDING))
939 * io_dev_sim_state_changed
940 * @iod: IPC's io_device
941 * @sim_online: SIM is online?
943 static void io_dev_sim_state_changed(struct io_device *iod, bool sim_online)
945 if (atomic_read(&iod->opened) == 0) {
946 mif_err("iod is not opened: %s\n",
948 } else if (iod->mc->sim_state.online == sim_online) {
949 mif_err("sim state not changed.\n");
951 iod->mc->sim_state.online = sim_online;
952 iod->mc->sim_state.changed = true;
953 wake_lock_timeout(&iod->mc->bootd->wakelock,
954 iod->mc->bootd->waketime);
955 mif_err("sim state changed. (iod: %s, state: "
956 "[online=%d, changed=%d])\n",
957 iod->name, iod->mc->sim_state.online,
958 iod->mc->sim_state.changed);
963 static int misc_open(struct inode *inode, struct file *filp)
965 struct io_device *iod = to_io_device(filp->private_data);
966 struct modem_shared *msd = iod->msd;
967 struct link_device *ld;
969 filp->private_data = (void *)iod;
971 mif_err("iod = %s\n", iod->name);
972 atomic_inc(&iod->opened);
974 list_for_each_entry(ld, &msd->link_dev_list, list) {
975 if (IS_CONNECTED(iod, ld) && ld->init_comm) {
976 ret = ld->init_comm(ld, iod);
978 mif_err("%s: init_comm error: %d\n",
988 static int misc_release(struct inode *inode, struct file *filp)
990 struct io_device *iod = (struct io_device *)filp->private_data;
991 struct modem_shared *msd = iod->msd;
992 struct link_device *ld;
994 mif_err("iod = %s\n", iod->name);
995 atomic_dec(&iod->opened);
996 skb_queue_purge(&iod->sk_rx_q);
998 list_for_each_entry(ld, &msd->link_dev_list, list) {
999 if (IS_CONNECTED(iod, ld) && ld->terminate_comm)
1000 ld->terminate_comm(ld, iod);
1006 static unsigned int misc_poll(struct file *filp, struct poll_table_struct *wait)
1008 struct io_device *iod = (struct io_device *)filp->private_data;
1010 poll_wait(filp, &iod->wq, wait);
1012 if ((!skb_queue_empty(&iod->sk_rx_q)) &&
1013 (iod->mc->phone_state != STATE_OFFLINE)) {
1014 return POLLIN | POLLRDNORM;
1015 } else if ((iod->mc->phone_state == STATE_CRASH_RESET) ||
1016 (iod->mc->phone_state == STATE_CRASH_EXIT) ||
1017 (iod->mc->phone_state == STATE_NV_REBUILDING) ||
1018 (iod->mc->sim_state.changed)) {
1019 if (iod->format != IPC_BYPASS) {
1029 static long misc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1032 struct io_device *iod = (struct io_device *)filp->private_data;
1033 struct link_device *ld = get_current_link(iod);
1034 char cpinfo_buf[530] = "CP Crash ";
1038 mif_debug("cmd = 0x%x\n", cmd);
1041 case IOCTL_MODEM_ON:
1042 mif_debug("misc_ioctl : IOCTL_MODEM_ON\n");
1043 return iod->mc->ops.modem_on(iod->mc);
1045 case IOCTL_MODEM_OFF:
1046 mif_debug("misc_ioctl : IOCTL_MODEM_OFF\n");
1047 return iod->mc->ops.modem_off(iod->mc);
1049 case IOCTL_MODEM_RESET:
1050 mif_debug("misc_ioctl : IOCTL_MODEM_RESET\n");
1051 return iod->mc->ops.modem_reset(iod->mc);
1053 case IOCTL_MODEM_BOOT_ON:
1054 mif_debug("misc_ioctl : IOCTL_MODEM_BOOT_ON\n");
1055 return iod->mc->ops.modem_boot_on(iod->mc);
1057 case IOCTL_MODEM_BOOT_OFF:
1058 mif_debug("misc_ioctl : IOCTL_MODEM_BOOT_OFF\n");
1059 return iod->mc->ops.modem_boot_off(iod->mc);
1061 /* TODO - will remove this command after ril updated */
1062 case IOCTL_MODEM_BOOT_DONE:
1063 mif_debug("misc_ioctl : IOCTL_MODEM_BOOT_DONE\n");
1066 case IOCTL_MODEM_STATUS:
1067 mif_debug("misc_ioctl : IOCTL_MODEM_STATUS\n");
1069 p_state = iod->mc->phone_state;
1070 if ((p_state == STATE_CRASH_RESET) ||
1071 (p_state == STATE_CRASH_EXIT)) {
1072 mif_err("<%s> send err state : %d\n",
1073 iod->name, p_state);
1074 } else if (iod->mc->sim_state.changed) {
1075 int s_state = iod->mc->sim_state.online ?
1076 STATE_SIM_ATTACH : STATE_SIM_DETACH;
1077 iod->mc->sim_state.changed = false;
1079 } else if (p_state == STATE_NV_REBUILDING) {
1080 mif_info("send nv rebuild state : %d\n",
1082 iod->mc->phone_state = STATE_ONLINE;
1086 #if 0 /*for tizen modem*/
1087 case IOCTL_MODEM_PROTOCOL_SUSPEND:
1088 mif_info("misc_ioctl : IOCTL_MODEM_PROTOCOL_SUSPEND\n");
1090 if (iod->format != IPC_MULTI_RAW)
1093 iodevs_for_each(iod->msd, iodev_netif_stop, 0);
1096 case IOCTL_MODEM_PROTOCOL_RESUME:
1097 mif_info("misc_ioctl : IOCTL_MODEM_PROTOCOL_RESUME\n");
1099 if (iod->format != IPC_MULTI_RAW)
1102 iodevs_for_each(iod->msd, iodev_netif_wake, 0);
1106 case IOCTL_MODEM_DUMP_START:
1107 mif_err("misc_ioctl : IOCTL_MODEM_DUMP_START\n");
1108 return ld->dump_start(ld, iod);
1110 case IOCTL_MODEM_DUMP_UPDATE:
1111 mif_debug("misc_ioctl : IOCTL_MODEM_DUMP_UPDATE\n");
1112 return ld->dump_update(ld, iod, arg);
1114 case IOCTL_MODEM_FORCE_CRASH_EXIT:
1115 mif_debug("misc_ioctl : IOCTL_MODEM_FORCE_CRASH_EXIT\n");
1116 if (iod->mc->ops.modem_force_crash_exit)
1117 return iod->mc->ops.modem_force_crash_exit(iod->mc);
1120 case IOCTL_MODEM_CP_UPLOAD:
1121 mif_err("misc_ioctl : IOCTL_MODEM_CP_UPLOAD\n");
1122 if (copy_from_user(cpinfo_buf + strlen(cpinfo_buf),
1123 (void __user *)arg, MAX_CPINFO_SIZE) != 0)
1129 case IOCTL_MODEM_DUMP_RESET:
1130 mif_err("misc_ioctl : IOCTL_MODEM_DUMP_RESET\n");
1131 return iod->mc->ops.modem_dump_reset(iod->mc);
1133 case IOCTL_MIF_LOG_DUMP:
1134 size = MAX_MIF_BUFF_SIZE;
1135 ret = copy_to_user((void __user *)arg, &size,
1136 sizeof(unsigned long));
1140 mif_dump_log(iod->mc->msd, iod);
1143 case IOCTL_MIF_DPRAM_DUMP:
1144 #ifdef CONFIG_LINK_DEVICE_DPRAM
1145 if (iod->mc->mdm_data->link_types & LINKTYPE(LINKDEV_DPRAM)) {
1146 size = iod->mc->mdm_data->dpram_ctl->dp_size;
1147 ret = copy_to_user((void __user *)arg, &size,
1148 sizeof(unsigned long));
1151 mif_dump_dpram(iod);
1157 case IOCTL_CG_DATA_SEND:
1158 mif_info("misc_ioctl : IOCTL_CG_DATA_SEND, arg = %d\n", arg);
1159 send_cg_data(ld, iod, arg);
1163 /* If you need to handle the ioctl for specific link device,
1164 * then assign the link ioctl handler to ld->ioctl
1165 * It will be call for specific link ioctl */
1167 return ld->ioctl(ld, iod, cmd, arg);
1169 mif_err("misc_ioctl : ioctl 0x%X is not defined.\n", cmd);
1175 static ssize_t misc_write(struct file *filp, const char __user *buf,
1176 size_t count, loff_t *ppos)
1178 struct io_device *iod = (struct io_device *)filp->private_data;
1179 struct link_device *ld = get_current_link(iod);
1181 char frame_header_buf[sizeof(struct raw_hdr)];
1182 struct sk_buff *skb;
1186 /* TODO - check here flow control for only raw data */
1188 frame_len = SIZE_OF_HDLC_START +
1189 get_header_size(iod) +
1193 frame_len += MAX_LINK_PADDING_SIZE;
1195 skb = alloc_skb(frame_len, GFP_KERNEL);
1197 mif_err("fail alloc skb (%d)\n", __LINE__);
1201 switch (iod->format) {
1206 if (copy_from_user(skb_put(skb, count), buf, count) != 0) {
1207 dev_kfree_skb_any(skb);
1213 memcpy(skb_put(skb, SIZE_OF_HDLC_START), hdlc_start,
1214 SIZE_OF_HDLC_START);
1215 memcpy(skb_put(skb, get_header_size(iod)),
1216 get_header(iod, count, frame_header_buf),
1217 get_header_size(iod));
1218 if (copy_from_user(skb_put(skb, count), buf, count) != 0) {
1219 dev_kfree_skb_any(skb);
1222 memcpy(skb_put(skb, SIZE_OF_HDLC_END), hdlc_end,
1227 skb_put(skb, calc_padding_size(iod, ld, skb->len));
1229 /* send data with sk_buff, link device will put sk_buff
1230 * into the specific sk_buff_q and run work-q to send data
1234 skbpriv(skb)->iod = iod;
1235 skbpriv(skb)->ld = ld;
1237 err = ld->send(ld, iod, skb);
1239 dev_kfree_skb_any(skb);
1244 mif_err("WARNNING: wrong tx size: %s, format=%d "
1245 "count=%d, tx_size=%d, return_size=%d",
1246 iod->name, iod->format, count, tx_size, err);
1251 static ssize_t misc_read(struct file *filp, char *buf, size_t count,
1254 struct io_device *iod = (struct io_device *)filp->private_data;
1255 struct sk_buff *skb = NULL;
1257 unsigned int rest_len, copy_len;
1260 skb = skb_dequeue(&iod->sk_rx_q);
1262 mif_err("<%s> no data from sk_rx_q\n", iod->name);
1265 mif_debug("<%s> skb->len : %d\n", iod->name, skb->len);
1267 if (iod->format == IPC_BOOT) {
1268 pktsize = rest_len = count;
1270 if (skb->len > rest_len) {
1271 /* BOOT device receviced rx data as serial
1272 stream, return data by User requested size */
1273 mif_err("skb->len %d > count %d\n", skb->len,
1275 pr_skb("BOOT-wRX", skb);
1276 if (copy_to_user(cur, skb->data, rest_len)
1278 dev_kfree_skb_any(skb);
1282 skb_pull(skb, rest_len);
1284 mif_info("queue-head, skb->len = %d\n",
1286 skb_queue_head(&iod->sk_rx_q, skb);
1288 mif_debug("return %u\n", rest_len);
1292 copy_len = min(rest_len, skb->len);
1293 if (copy_to_user(cur, skb->data, copy_len) != 0) {
1294 dev_kfree_skb_any(skb);
1298 dev_kfree_skb_any(skb);
1299 rest_len -= copy_len;
1304 skb = skb_dequeue(&iod->sk_rx_q);
1306 mif_err("<%s> %d / %d sk_rx_q\n", iod->name,
1307 (count - rest_len), count);
1308 return count - rest_len;
1312 if (skb->len > count) {
1313 mif_err("<%s> skb->len %d > count %d\n", iod->name,
1315 dev_kfree_skb_any(skb);
1319 if (copy_to_user(buf, skb->data, pktsize) != 0) {
1320 dev_kfree_skb_any(skb);
1323 if (iod->format == IPC_BYPASS)
1324 mif_debug("copied %d bytes to user\n", pktsize);
1326 dev_kfree_skb_any(skb);
1331 #ifdef CONFIG_LINK_DEVICE_C2C
1332 static int misc_mmap(struct file *filp, struct vm_area_struct *vma)
1335 unsigned long size = 0;
1336 unsigned long pfn = 0;
1337 unsigned long offset = 0;
1338 struct io_device *iod = (struct io_device *)filp->private_data;
1343 size = vma->vm_end - vma->vm_start;
1344 offset = vma->vm_pgoff << PAGE_SHIFT;
1345 if (offset + size > (C2C_CP_RGN_SIZE + C2C_SH_RGN_SIZE)) {
1346 mif_err("offset + size > C2C_CP_RGN_SIZE\n");
1350 /* Set the noncacheable property to the region */
1351 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1352 vma->vm_flags |= VM_RESERVED | VM_IO;
1354 pfn = __phys_to_pfn(C2C_CP_RGN_ADDR + offset);
1355 r = remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot);
1357 mif_err("Failed in remap_pfn_range()!!!\n");
1361 mif_err("VA = 0x%08lx, offset = 0x%lx, size = %lu\n",
1362 vma->vm_start, offset, size);
1368 static const struct file_operations misc_io_fops = {
1369 .owner = THIS_MODULE,
1371 .release = misc_release,
1373 .unlocked_ioctl = misc_ioctl,
1374 .write = misc_write,
1376 #ifdef CONFIG_LINK_DEVICE_C2C
1381 static int vnet_open(struct net_device *ndev)
1383 struct vnet *vnet = netdev_priv(ndev);
1384 netif_start_queue(ndev);
1385 atomic_inc(&vnet->iod->opened);
1389 static int vnet_stop(struct net_device *ndev)
1391 struct vnet *vnet = netdev_priv(ndev);
1392 atomic_dec(&vnet->iod->opened);
1393 netif_stop_queue(ndev);
1397 static int vnet_xmit(struct sk_buff *skb, struct net_device *ndev)
1402 struct sk_buff *skb_new = NULL;
1403 struct vnet *vnet = netdev_priv(ndev);
1404 struct io_device *iod = vnet->iod;
1405 struct link_device *ld = get_current_link(iod);
1407 struct iphdr *ip_header = NULL;
1410 if (iod->io_typ == IODEV_NET) {
1415 /* When use `handover' with Network Bridge,
1416 * user -> TCP/IP(kernel) -> bridge device -> TCP/IP(kernel) -> this.
1418 * We remove the one ethernet header of skb before using skb->len,
1419 * because the skb has two ethernet headers.
1421 if (iod->use_handover) {
1422 if (iod->id >= PSD_DATA_CHID_BEGIN &&
1423 iod->id <= PSD_DATA_CHID_END)
1424 skb_pull(skb, sizeof(struct ethhdr));
1428 ip_header = (struct iphdr *)skb->data;
1429 if (iod->msd->loopback_ipaddr &&
1430 ip_header->daddr == iod->msd->loopback_ipaddr) {
1431 swap(ip_header->saddr, ip_header->daddr);
1432 hd.channel = DATA_LOOPBACK_CHANNEL;
1434 hd.channel = iod->id & 0x1F;
1436 hd.len = skb->len + sizeof(hd);
1439 headroom = sizeof(hd) + sizeof(hdlc_start);
1440 tailroom = sizeof(hdlc_end);
1442 tailroom += MAX_LINK_PADDING_SIZE;
1443 if (skb_headroom(skb) < headroom || skb_tailroom(skb) < tailroom) {
1444 skb_new = skb_copy_expand(skb, headroom, tailroom, GFP_ATOMIC);
1445 /* skb_copy_expand success or not, free old skb from caller */
1446 dev_kfree_skb_any(skb);
1452 memcpy(skb_push(skb_new, sizeof(hd)), &hd, sizeof(hd));
1453 memcpy(skb_push(skb_new, sizeof(hdlc_start)), hdlc_start,
1454 sizeof(hdlc_start));
1455 memcpy(skb_put(skb_new, sizeof(hdlc_end)), hdlc_end, sizeof(hdlc_end));
1456 skb_put(skb_new, calc_padding_size(iod, ld, skb_new->len));
1459 skbpriv(skb_new)->iod = iod;
1460 skbpriv(skb_new)->ld = ld;
1462 ret = ld->send(ld, iod, skb_new);
1464 netif_stop_queue(ndev);
1465 dev_kfree_skb_any(skb_new);
1466 return NETDEV_TX_BUSY;
1469 ndev->stats.tx_packets++;
1470 ndev->stats.tx_bytes += skb->len;
1472 return NETDEV_TX_OK;
1475 static struct net_device_ops vnet_ops = {
1476 .ndo_open = vnet_open,
1477 .ndo_stop = vnet_stop,
1478 .ndo_start_xmit = vnet_xmit,
1481 static void vnet_setup(struct net_device *ndev)
1483 ndev->netdev_ops = &vnet_ops;
1484 ndev->type = ARPHRD_PPP;
1485 ndev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1487 ndev->hard_header_len = 0;
1488 ndev->tx_queue_len = 1000;
1489 ndev->mtu = ETH_DATA_LEN;
1490 ndev->watchdog_timeo = 5 * HZ;
1493 static void vnet_setup_ether(struct net_device *ndev)
1495 ndev->netdev_ops = &vnet_ops;
1496 ndev->type = ARPHRD_ETHER;
1497 ndev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST | IFF_SLAVE;
1498 ndev->addr_len = ETH_ALEN;
1499 random_ether_addr(ndev->dev_addr);
1500 ndev->hard_header_len = 0;
1501 ndev->tx_queue_len = 1000;
1502 ndev->mtu = ETH_DATA_LEN;
1503 ndev->watchdog_timeo = 5 * HZ;
1506 int sipc4_init_io_device(struct io_device *iod)
1511 /* Get modem state from modem control device */
1512 iod->modem_state_changed = io_dev_modem_state_changed;
1514 iod->sim_state_changed = io_dev_sim_state_changed;
1516 /* Get data from link device */
1517 iod->recv = io_dev_recv_data_from_link_dev;
1519 /* Register misc or net device */
1520 switch (iod->io_typ) {
1522 init_waitqueue_head(&iod->wq);
1523 skb_queue_head_init(&iod->sk_rx_q);
1524 INIT_DELAYED_WORK(&iod->rx_work, rx_iodev_work);
1526 iod->miscdev.minor = MISC_DYNAMIC_MINOR;
1527 iod->miscdev.name = iod->name;
1528 iod->miscdev.fops = &misc_io_fops;
1530 ret = misc_register(&iod->miscdev);
1532 mif_err("failed to register misc io device : %s\n",
1538 skb_queue_head_init(&iod->sk_rx_q);
1539 INIT_DELAYED_WORK(&iod->rx_work, rx_iodev_work);
1540 if (iod->use_handover)
1541 iod->ndev = alloc_netdev(0, iod->name,
1544 iod->ndev = alloc_netdev(0, iod->name, vnet_setup);
1547 mif_err("failed to alloc netdev\n");
1551 ret = register_netdev(iod->ndev);
1553 free_netdev(iod->ndev);
1555 mif_debug("(iod:0x%p)\n", iod);
1556 vnet = netdev_priv(iod->ndev);
1557 mif_debug("(vnet:0x%p)\n", vnet);
1562 skb_queue_head_init(&iod->sk_rx_q);
1563 INIT_DELAYED_WORK(&iod->rx_work, rx_iodev_work);
1565 iod->miscdev.minor = MISC_DYNAMIC_MINOR;
1566 iod->miscdev.name = iod->name;
1567 iod->miscdev.fops = &misc_io_fops;
1569 ret = misc_register(&iod->miscdev);
1571 mif_err("failed to register misc io device : %s\n",
1573 ret = device_create_file(iod->miscdev.this_device,
1576 mif_err("failed to create `waketime' file : %s\n",
1578 ret = device_create_file(iod->miscdev.this_device,
1581 mif_err("failed to create `loopback file' : %s\n",
1586 mif_err("wrong io_type : %d\n", iod->io_typ);
1590 mif_debug("%s(%d) : init_io_device() done : %d\n",
1591 iod->name, iod->io_typ, ret);