1 /* /linux/drivers/misc/modem_if/modem_io_device.c
3 * Copyright (C) 2010 Google, Inc.
4 * Copyright (C) 2010 Samsung Electronics.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/poll.h>
21 #include <linux/irq.h>
22 #include <linux/gpio.h>
23 #include <linux/if_arp.h>
25 #include <linux/if_ether.h>
26 #include <linux/etherdevice.h>
27 #include <linux/device.h>
29 #include <linux/platform_data/modem_tizen.h>
30 #ifdef CONFIG_LINK_DEVICE_C2C
31 #include <linux/platform_data/c2c.h>
33 #include "modem_prj.h"
34 #include "modem_utils.h"
37 * MAX_RXDATA_SIZE is used at making skb, when it called with page size
38 * it need more bytes to allocate itself (Ex, cache byte, shared info,
40 * So, give restriction to allocation size below 1 page to prevent
43 #define MAX_RXDATA_SIZE 0x0E00 /* 4 * 1024 - 512 */
44 #define MAX_MULTI_FMT_SIZE 0x4000 /* 16 * 1024 */
46 static const char hdlc_start[1] = { HDLC_START };
47 static const char hdlc_end[1] = { HDLC_END };
49 static int rx_iodev_skb(struct sk_buff *skb);
51 static ssize_t show_waketime(struct device *dev,
52 struct device_attribute *attr, char *buf)
56 struct miscdevice *miscdev = dev_get_drvdata(dev);
57 struct io_device *iod = container_of(miscdev, struct io_device,
60 msec = jiffies_to_msecs(iod->waketime);
62 p += sprintf(buf, "raw waketime : %ums\n", msec);
67 static ssize_t store_waketime(struct device *dev,
68 struct device_attribute *attr, const char *buf, size_t count)
72 struct miscdevice *miscdev = dev_get_drvdata(dev);
73 struct io_device *iod = container_of(miscdev, struct io_device,
76 ret = strict_strtoul(buf, 10, &msec);
80 iod->waketime = msecs_to_jiffies(msec);
85 static struct device_attribute attr_waketime =
86 __ATTR(waketime, S_IRUGO | S_IWUSR, show_waketime, store_waketime);
88 static ssize_t show_loopback(struct device *dev,
89 struct device_attribute *attr, char *buf)
91 struct miscdevice *miscdev = dev_get_drvdata(dev);
92 struct modem_shared *msd =
93 container_of(miscdev, struct io_device, miscdev)->msd;
94 unsigned char *ip = (unsigned char *)&msd->loopback_ipaddr;
97 p += sprintf(buf, "%u.%u.%u.%u\n", ip[0], ip[1], ip[2], ip[3]);
102 static ssize_t store_loopback(struct device *dev,
103 struct device_attribute *attr, const char *buf, size_t count)
105 struct miscdevice *miscdev = dev_get_drvdata(dev);
106 struct modem_shared *msd =
107 container_of(miscdev, struct io_device, miscdev)->msd;
109 msd->loopback_ipaddr = ipv4str_to_be32(buf, count);
114 static struct device_attribute attr_loopback =
115 __ATTR(loopback, S_IRUGO | S_IWUSR, show_loopback, store_loopback);
117 static int get_header_size(struct io_device *iod)
119 switch (iod->format) {
120 #if 0 /*for tizen modem*/
123 return sizeof(struct raw_hdr);
126 /* minimum size for transaction align */
137 static int get_hdlc_size(struct io_device *iod, char *buf)
139 struct fmt_hdr *fmt_header;
140 struct raw_hdr *raw_header;
141 struct rfs_hdr *rfs_header;
143 mif_debug("buf : %02x %02x %02x (%d)\n", *buf, *(buf + 1),
144 *(buf + 2), __LINE__);
146 switch (iod->format) {
147 #if 0 /*for tizen modem*/
150 raw_header = (struct raw_hdr *)buf;
151 return raw_header->len;
159 static void *get_header(struct io_device *iod, size_t count,
160 char *frame_header_buf)
162 struct fmt_hdr *fmt_h;
163 struct raw_hdr *raw_h;
164 struct rfs_hdr *rfs_h;
166 switch (iod->format) {
167 #if 0 /*for tizen modem*/
170 raw_h = (struct raw_hdr *)frame_header_buf;
172 raw_h->len = count + sizeof(struct raw_hdr);
173 raw_h->channel = iod->id & 0x1F;
176 return (void *)frame_header_buf;
183 static inline int calc_padding_size(struct io_device *iod,
184 struct link_device *ld, unsigned len)
187 return (4 - (len & 0x3)) & 0x3;
192 static inline int rx_hdlc_head_start_check(char *buf)
194 /* check hdlc head and return size of start byte */
195 return (buf[0] == HDLC_START) ? SIZE_OF_HDLC_START : -EBADMSG;
198 static inline int rx_hdlc_tail_check(char *buf)
200 /* check hdlc tail and return size of tail byte */
201 return (buf[0] == HDLC_END) ? SIZE_OF_HDLC_END : -EBADMSG;
204 /* remove hdlc header and store IPC header */
205 static int rx_hdlc_head_check(struct io_device *iod, struct link_device *ld,
206 char *buf, unsigned rest)
208 struct header_data *hdr = &fragdata(iod, ld)->h_data;
209 int head_size = get_header_size(iod);
213 /* first frame, remove start header 7F */
215 len = rx_hdlc_head_start_check(buf);
217 mif_err("Wrong HDLC start: 0x%x\n", *buf);
218 return len; /*Wrong hdlc start*/
221 mif_debug("check len : %d, rest : %d (%d)\n", len,
224 /* set the start flag of current packet */
225 hdr->start = HDLC_START;
229 switch (iod->format) {
234 /* TODO: print buf... */
245 rest -= len; /* rest, call by value */
248 mif_debug("check len : %d, rest : %d (%d)\n",
249 len, rest, __LINE__);
251 /* store the HDLC header to iod priv */
252 if (hdr->len < head_size) {
253 len = min(rest, head_size - hdr->len);
254 memcpy(hdr->hdr + hdr->len, buf, len);
259 mif_debug("check done_len : %d, rest : %d (%d)\n", done_len,
264 /* alloc skb and copy data to skb */
265 static int rx_hdlc_data_check(struct io_device *iod, struct link_device *ld,
266 char *buf, unsigned rest)
268 struct header_data *hdr = &fragdata(iod, ld)->h_data;
269 struct sk_buff *skb = fragdata(iod, ld)->skb_recv;
270 int head_size = get_header_size(iod);
271 int data_size = get_hdlc_size(iod, hdr->hdr) - head_size;
275 int rest_len = data_size - hdr->frag_len;
276 int continue_len = fragdata(iod, ld)->realloc_offset;
278 mif_debug("head_size : %d, data_size : %d (%d)\n", head_size,
279 data_size, __LINE__);
282 /* check the HDLC header*/
283 if (rx_hdlc_head_start_check(buf) == SIZE_OF_HDLC_START) {
284 rest_len -= (head_size + SIZE_OF_HDLC_START);
285 continue_len += (head_size + SIZE_OF_HDLC_START);
289 rest -= continue_len;
290 done_len += continue_len;
291 fragdata(iod, ld)->realloc_offset = 0;
293 mif_debug("realloc_offset = %d\n", continue_len);
296 /* first payload data - alloc skb */
298 /* make skb data size under MAX_RXDATA_SIZE */
299 alloc_size = min(data_size, MAX_RXDATA_SIZE);
300 alloc_size = min(alloc_size, rest_len);
302 /* allocate first packet for data, when its size exceed
303 * MAX_RXDATA_SIZE, this packet will split to
306 skb = rx_alloc_skb(alloc_size, iod, ld);
307 if (unlikely(!skb)) {
308 fragdata(iod, ld)->realloc_offset = continue_len;
311 fragdata(iod, ld)->skb_recv = skb;
315 /* copy length cannot exceed rest_len */
316 len = min_t(int, rest_len, rest);
317 /* copy length should be under skb tailroom size */
318 len = min(len, skb_tailroom(skb));
319 /* when skb tailroom is bigger than MAX_RXDATA_SIZE
320 * restrict its size to MAX_RXDATA_SIZE just for convinience */
321 len = min(len, MAX_RXDATA_SIZE);
323 /* copy bytes to skb */
324 memcpy(skb_put(skb, len), buf, len);
326 /* adjusting variables */
331 hdr->frag_len += len;
333 /* check if it is final for this packet sequence */
334 if (!rest_len || !rest)
337 /* more bytes are remain for this packet sequence
338 * pass fully loaded skb to rx queue
339 * and allocate another skb for continues data recv chain
342 fragdata(iod, ld)->skb_recv = NULL;
344 alloc_size = min(rest_len, MAX_RXDATA_SIZE);
346 skb = rx_alloc_skb(alloc_size, iod, ld);
347 if (unlikely(!skb)) {
348 fragdata(iod, ld)->realloc_offset = done_len;
351 fragdata(iod, ld)->skb_recv = skb;
354 mif_debug("rest : %d, alloc_size : %d , len : %d (%d)\n",
355 rest, alloc_size, skb->len, __LINE__);
360 static int rx_multi_fmt_frame(struct sk_buff *rx_skb)
362 struct io_device *iod = skbpriv(rx_skb)->iod;
363 struct link_device *ld = skbpriv(rx_skb)->ld;
365 (struct fmt_hdr *)fragdata(iod, ld)->h_data.hdr;
366 unsigned int id = fh->control & 0x7F;
367 struct sk_buff *skb = iod->skb[id];
368 unsigned char *data = fragdata(iod, ld)->skb_recv->data;
369 unsigned int rcvd = fragdata(iod, ld)->skb_recv->len;
372 /* If there has been no multiple frame with this ID */
373 if (!(fh->control & 0x80)) {
374 /* It is a single frame because the "more" bit is 0. */
376 mif_err("\n<%s> Rx FMT frame (len %d)\n",
378 print_sipc4_fmt_frame(data);
381 skb_queue_tail(&iod->sk_rx_q,
382 fragdata(iod, ld)->skb_recv);
383 mif_debug("wake up wq of %s\n", iod->name);
387 struct fmt_hdr *fh = NULL;
388 skb = rx_alloc_skb(MAX_MULTI_FMT_SIZE, iod, ld);
390 mif_err("<%d> alloc_skb fail\n",
396 fh = (struct fmt_hdr *)data;
397 mif_info("Start multi-frame (ID %d, len %d)",
402 /* Start multi-frame processing */
404 memcpy(skb_put(skb, rcvd), data, rcvd);
405 dev_kfree_skb_any(fragdata(iod, ld)->skb_recv);
407 if (fh->control & 0x80) {
408 /* The last frame has not arrived yet. */
409 mif_info("Receiving (ID %d, %d bytes)\n",
412 /* It is the last frame because the "more" bit is 0. */
413 mif_info("The Last (ID %d, %d bytes received)\n",
416 mif_err("\n<%s> Rx FMT frame (len %d)\n",
417 iod->name, skb->len);
418 print_sipc4_fmt_frame(skb->data);
421 skb_queue_tail(&iod->sk_rx_q, skb);
423 mif_info("wake up wq of %s\n", iod->name);
430 static int rx_multi_fmt_frame_sipc42(struct sk_buff *rx_skb)
432 struct io_device *iod = skbpriv(rx_skb)->iod;
433 struct link_device *ld = skbpriv(rx_skb)->ld;
435 (struct fmt_hdr *)fragdata(iod, ld)->h_data.hdr;
436 unsigned int id = fh->control & 0x7F;
437 struct sk_buff *skb = iod->skb[id];
438 unsigned char *data = fragdata(iod, ld)->skb_recv->data;
439 unsigned int rcvd = fragdata(iod, ld)->skb_recv->len;
442 struct io_device *real_iod = NULL;
444 ch = (fh->len & 0xC000) >> 14;
445 fh->len = fh->len & 0x3FFF;
446 real_iod = ld->fmt_iods[ch];
448 mif_err("wrong channel %d\n", ch);
451 skbpriv(rx_skb)->real_iod = real_iod;
454 /* If there has been no multiple frame with this ID */
455 if (!(fh->control & 0x80)) {
456 /* It is a single frame because the "more" bit is 0. */
458 mif_err("\n<%s> Rx FMT frame (len %d)\n",
460 print_sipc4_fmt_frame(data);
463 skb_queue_tail(&real_iod->sk_rx_q,
464 fragdata(iod, ld)->skb_recv);
465 mif_debug("wake up wq of %s\n", iod->name);
466 wake_up(&real_iod->wq);
469 struct fmt_hdr *fh = NULL;
470 skb = rx_alloc_skb(MAX_MULTI_FMT_SIZE, real_iod, ld);
472 mif_err("alloc_skb fail\n");
475 real_iod->skb[id] = skb;
477 fh = (struct fmt_hdr *)data;
478 mif_err("Start multi-frame (ID %d, len %d)",
483 /* Start multi-frame processing */
485 memcpy(skb_put(skb, rcvd), data, rcvd);
486 dev_kfree_skb_any(fragdata(real_iod, ld)->skb_recv);
488 if (fh->control & 0x80) {
489 /* The last frame has not arrived yet. */
490 mif_err("Receiving (ID %d, %d bytes)\n",
493 /* It is the last frame because the "more" bit is 0. */
494 mif_err("The Last (ID %d, %d bytes received)\n",
497 mif_err("\n<%s> Rx FMT frame (len %d)\n",
498 iod->name, skb->len);
499 print_sipc4_fmt_frame(skb->data);
502 skb_queue_tail(&real_iod->sk_rx_q, skb);
503 real_iod->skb[id] = NULL;
504 mif_info("wake up wq of %s\n", real_iod->name);
505 wake_up(&real_iod->wq);
511 static int rx_iodev_skb_raw(struct sk_buff *skb)
514 struct io_device *iod = skbpriv(skb)->iod;
515 struct net_device *ndev = NULL;
516 struct iphdr *ip_header = NULL;
517 struct ethhdr *ehdr = NULL;
518 const char source[ETH_ALEN] = SOURCE_MAC_ADDR;
520 /* check the real_iod is open? */
522 if (atomic_read(&iod->opened) == 0) {
523 mif_err("<%s> is not opened.\n",
525 pr_skb("drop packet", skb);
530 switch (iod->io_typ) {
532 mif_debug("<%s> sk_rx_q.qlen = %d\n",
533 iod->name, iod->sk_rx_q.qlen);
534 skb_queue_tail(&iod->sk_rx_q, skb);
539 pr_skb("rx_iodev_skb_raw", skb);
542 mif_err("<%s> ndev == NULL",
548 ndev->stats.rx_packets++;
549 ndev->stats.rx_bytes += skb->len;
551 /* check the version of IP */
552 ip_header = (struct iphdr *)skb->data;
553 if (ip_header->version == IP6VERSION)
554 skb->protocol = htons(ETH_P_IPV6);
556 skb->protocol = htons(ETH_P_IP);
558 if (iod->use_handover) {
559 skb_push(skb, sizeof(struct ethhdr));
560 ehdr = (void *)skb->data;
561 memcpy(ehdr->h_dest, ndev->dev_addr, ETH_ALEN);
562 memcpy(ehdr->h_source, source, ETH_ALEN);
563 ehdr->h_proto = skb->protocol;
564 skb->ip_summed = CHECKSUM_UNNECESSARY;
565 skb_reset_mac_header(skb);
567 skb_pull(skb, sizeof(struct ethhdr));
573 err = netif_rx_ni(skb);
575 if (err != NET_RX_SUCCESS)
576 dev_err(&ndev->dev, "rx error: %d\n", err);
581 mif_err("wrong io_type : %d\n", iod->io_typ);
586 static void rx_iodev_work(struct work_struct *work)
589 struct sk_buff *skb = NULL;
590 struct io_device *iod = container_of(work, struct io_device,
593 while ((skb = skb_dequeue(&iod->sk_rx_q)) != NULL) {
594 ret = rx_iodev_skb_raw(skb);
596 mif_err("<%s> rx_iodev_skb_raw err = %d",
598 dev_kfree_skb_any(skb);
599 } else if (ret == NET_RX_DROP) {
600 mif_err("<%s> ret == NET_RX_DROP\n",
602 schedule_delayed_work(&iod->rx_work,
603 msecs_to_jiffies(100));
609 static int rx_multipdp(struct sk_buff *skb)
612 struct io_device *iod = skbpriv(skb)->iod;
613 struct link_device *ld = skbpriv(skb)->ld;
614 struct raw_hdr *raw_header =
615 (struct raw_hdr *)fragdata(iod, ld)->h_data.hdr;
616 struct io_device *real_iod = NULL;
618 ch = raw_header->channel;
619 if (ch == DATA_LOOPBACK_CHANNEL && ld->msd->loopback_ipaddr)
622 real_iod = link_get_iod_with_channel(ld, 0x20 | ch);
624 mif_err("wrong channel %d\n", ch);
628 skbpriv(skb)->real_iod = real_iod;
629 skb_queue_tail(&iod->sk_rx_q, skb);
630 mif_debug("sk_rx_qlen:%d\n", iod->sk_rx_q.qlen);
632 schedule_delayed_work(&iod->rx_work, 0);
636 /* de-mux function draft */
637 static int rx_iodev_skb(struct sk_buff *skb)
639 struct io_device *iod = skbpriv(skb)->iod;
641 switch (iod->format) {
644 skb_queue_tail(&iod->sk_rx_q, skb);
645 mif_debug("sk_rx_qlen:%d\n", iod->sk_rx_q.qlen);
647 schedule_delayed_work(&iod->rx_work, 0);
651 skb_queue_tail(&iod->sk_rx_q, skb);
652 mif_debug("wake up wq of %s\n", iod->name);
658 static int rx_hdlc_packet(struct io_device *iod, struct link_device *ld,
659 const char *data, unsigned recv_size)
661 int rest = (int)recv_size;
662 char *buf = (char *)data;
670 mif_debug("RX_SIZE = %d, ld: %s\n", rest, ld->name);
672 if (fragdata(iod, ld)->h_data.frag_len) {
674 If the fragdata(iod, ld)->h_data.frag_len field is
675 not zero, there is a HDLC frame that is waiting for more data
676 or HDLC_END in the skb (fragdata(iod, ld)->skb_recv).
677 In this case, rx_hdlc_head_check() must be skipped.
683 err = len = rx_hdlc_head_check(iod, ld, buf, rest);
686 mif_debug("check len : %d, rest : %d (%d)\n", len, rest,
696 If the return value of rx_hdlc_data_check() is zero, there remains
697 only HDLC_END that will be received.
699 err = len = rx_hdlc_data_check(iod, ld, buf, rest);
702 mif_debug("check len : %d, rest : %d (%d)\n", len, rest,
708 if (!rest && fragdata(iod, ld)->h_data.frag_len) {
710 Data is being received and more data or HDLC_END does not
711 arrive yet, but there is no more data in the buffer. More
712 data may come within the next frame from the link device.
715 } else if (rest <= 0)
718 /* At this point, one HDLC frame except HDLC_END has been received. */
720 err = len = rx_hdlc_tail_check(buf);
722 mif_err("Wrong HDLC end: 0x%02X\n", *buf);
725 mif_debug("check len : %d, rest : %d (%d)\n", len, rest,
730 /* At this point, one complete HDLC frame has been received. */
733 The padding size is applied for the next HDLC frame. Zero will be
734 returned by calc_padding_size() if the link device does not require
735 4-byte aligned access.
737 rcvd = get_hdlc_size(iod, fragdata(iod, ld)->h_data.hdr) +
738 (SIZE_OF_HDLC_START + SIZE_OF_HDLC_END);
739 len = calc_padding_size(iod, ld, rcvd);
745 err = rx_iodev_skb(fragdata(iod, ld)->skb_recv);
749 /* initialize header & skb */
750 fragdata(iod, ld)->skb_recv = NULL;
751 memset(&fragdata(iod, ld)->h_data, 0x00,
752 sizeof(struct header_data));
753 fragdata(iod, ld)->realloc_offset = 0;
759 /* free buffers. mipi-hsi re-use recv buf */
764 if (err == -ENOMEM) {
765 if (!(fragdata(iod, ld)->h_data.frag_len))
766 memset(&fragdata(iod, ld)->h_data, 0x00,
767 sizeof(struct header_data));
771 if (err < 0 && fragdata(iod, ld)->skb_recv) {
772 dev_kfree_skb_any(fragdata(iod, ld)->skb_recv);
773 fragdata(iod, ld)->skb_recv = NULL;
776 memset(&fragdata(iod, ld)->h_data, 0x00,
777 sizeof(struct header_data));
778 fragdata(iod, ld)->realloc_offset = 0;
784 static int rx_rfs_packet(struct io_device *iod, struct link_device *ld,
785 const char *data, unsigned size)
792 if (data[0] != HDLC_START) {
793 mif_err("Dropping RFS packet ... "
794 "size = %d, start = %02X %02X %02X %02X\n",
796 data[0], data[1], data[2], data[3]);
800 if (data[size-1] != HDLC_END) {
801 for (pad = 1; pad < 4; pad++)
802 if (data[(size-1)-pad] == HDLC_END)
806 char *b = (char *)data;
808 mif_err("size %d, No END_FLAG!!!\n", size);
809 mif_err("end = %02X %02X %02X %02X\n",
810 b[sz-4], b[sz-3], b[sz-2], b[sz-1]);
813 mif_info("padding = %d\n", pad);
817 skb = rx_alloc_skb(size, iod, ld);
818 if (unlikely(!skb)) {
819 mif_err("alloc_skb fail\n");
823 /* copy the RFS haeder to skb->data */
824 rcvd = size - sizeof(hdlc_start) - sizeof(hdlc_end) - pad;
825 memcpy(skb_put(skb, rcvd), ((char *)data + sizeof(hdlc_start)), rcvd);
827 fragdata(iod, ld)->skb_recv = skb;
828 err = rx_iodev_skb(fragdata(iod, ld)->skb_recv);
833 /* called from link device when a packet arrives for this io device */
834 static int io_dev_recv_data_from_link_dev(struct io_device *iod,
835 struct link_device *ld, const char *data, unsigned int len)
839 unsigned int alloc_size, rest_len;
842 switch (iod->format) {
846 wake_lock_timeout(&iod->wakelock, iod->waketime);
847 err = rx_iodev_skb((struct sk_buff *)data);
849 mif_err("fail process RX ether packet\n");
856 /* save packet to sk_buff */
857 skb = rx_alloc_skb(len, iod, ld);
859 mif_debug("boot len : %d\n", len);
861 memcpy(skb_put(skb, len), data, len);
862 skb_queue_tail(&iod->sk_rx_q, skb);
863 mif_debug("skb len : %d\n", skb->len);
868 /* 32KB page alloc fail case, alloc 3.5K a page.. */
869 mif_info("(%d)page fail, alloc fragment pages\n", len);
874 alloc_size = min_t(unsigned int, MAX_RXDATA_SIZE,
876 skb = rx_alloc_skb(alloc_size, iod, ld);
878 mif_err("fail alloc skb (%d)\n", __LINE__);
881 mif_debug("boot len : %d\n", alloc_size);
883 memcpy(skb_put(skb, alloc_size), cur, alloc_size);
884 skb_queue_tail(&iod->sk_rx_q, skb);
885 mif_debug("skb len : %d\n", skb->len);
887 rest_len -= alloc_size;
898 /* inform the IO device that the modem is now online or offline or
899 * crashing or whatever...
901 static void io_dev_modem_state_changed(struct io_device *iod,
902 enum modem_state state)
904 iod->mc->phone_state = state;
905 mif_err("modem state changed. (iod: %s, state: %d)\n",
908 if ((state == STATE_CRASH_RESET) || (state == STATE_CRASH_EXIT)
909 || (state == STATE_NV_REBUILDING))
914 * io_dev_sim_state_changed
915 * @iod: IPC's io_device
916 * @sim_online: SIM is online?
918 static void io_dev_sim_state_changed(struct io_device *iod, bool sim_online)
920 if (atomic_read(&iod->opened) == 0) {
921 mif_err("iod is not opened: %s\n",
923 } else if (iod->mc->sim_state.online == sim_online) {
924 mif_err("sim state not changed.\n");
926 iod->mc->sim_state.online = sim_online;
927 iod->mc->sim_state.changed = true;
928 wake_lock_timeout(&iod->mc->bootd->wakelock,
929 iod->mc->bootd->waketime);
930 mif_err("sim state changed. (iod: %s, state: "
931 "[online=%d, changed=%d])\n",
932 iod->name, iod->mc->sim_state.online,
933 iod->mc->sim_state.changed);
938 static int misc_open(struct inode *inode, struct file *filp)
940 struct io_device *iod = to_io_device(filp->private_data);
941 struct modem_shared *msd = iod->msd;
942 struct link_device *ld;
944 filp->private_data = (void *)iod;
946 mif_err("iod = %s\n", iod->name);
947 atomic_inc(&iod->opened);
949 list_for_each_entry(ld, &msd->link_dev_list, list) {
950 if (IS_CONNECTED(iod, ld) && ld->init_comm) {
951 ret = ld->init_comm(ld, iod);
953 mif_err("%s: init_comm error: %d\n",
963 static int misc_release(struct inode *inode, struct file *filp)
965 struct io_device *iod = (struct io_device *)filp->private_data;
966 struct modem_shared *msd = iod->msd;
967 struct link_device *ld;
969 mif_err("iod = %s\n", iod->name);
970 atomic_dec(&iod->opened);
971 skb_queue_purge(&iod->sk_rx_q);
973 list_for_each_entry(ld, &msd->link_dev_list, list) {
974 if (IS_CONNECTED(iod, ld) && ld->terminate_comm)
975 ld->terminate_comm(ld, iod);
981 static unsigned int misc_poll(struct file *filp, struct poll_table_struct *wait)
983 struct io_device *iod = (struct io_device *)filp->private_data;
985 poll_wait(filp, &iod->wq, wait);
987 if ((!skb_queue_empty(&iod->sk_rx_q)) &&
988 (iod->mc->phone_state != STATE_OFFLINE)) {
989 return POLLIN | POLLRDNORM;
990 } else if ((iod->mc->phone_state == STATE_CRASH_RESET) ||
991 (iod->mc->phone_state == STATE_CRASH_EXIT) ||
992 (iod->mc->phone_state == STATE_NV_REBUILDING) ||
993 (iod->mc->sim_state.changed)) {
994 if (iod->format != IPC_BYPASS) {
1004 static long misc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1007 struct io_device *iod = (struct io_device *)filp->private_data;
1008 struct link_device *ld = get_current_link(iod);
1009 char cpinfo_buf[530] = "CP Crash ";
1013 mif_debug("cmd = 0x%x\n", cmd);
1016 case IOCTL_MODEM_ON:
1017 mif_debug("misc_ioctl : IOCTL_MODEM_ON\n");
1018 return iod->mc->ops.modem_on(iod->mc);
1020 case IOCTL_MODEM_OFF:
1021 mif_debug("misc_ioctl : IOCTL_MODEM_OFF\n");
1022 return iod->mc->ops.modem_off(iod->mc);
1024 case IOCTL_MODEM_RESET:
1025 mif_debug("misc_ioctl : IOCTL_MODEM_RESET\n");
1026 return iod->mc->ops.modem_reset(iod->mc);
1028 case IOCTL_MODEM_BOOT_ON:
1029 mif_debug("misc_ioctl : IOCTL_MODEM_BOOT_ON\n");
1030 return iod->mc->ops.modem_boot_on(iod->mc);
1032 case IOCTL_MODEM_BOOT_OFF:
1033 mif_debug("misc_ioctl : IOCTL_MODEM_BOOT_OFF\n");
1034 return iod->mc->ops.modem_boot_off(iod->mc);
1036 /* TODO - will remove this command after ril updated */
1037 case IOCTL_MODEM_BOOT_DONE:
1038 mif_debug("misc_ioctl : IOCTL_MODEM_BOOT_DONE\n");
1041 case IOCTL_MODEM_STATUS:
1042 mif_debug("misc_ioctl : IOCTL_MODEM_STATUS\n");
1044 p_state = iod->mc->phone_state;
1045 if ((p_state == STATE_CRASH_RESET) ||
1046 (p_state == STATE_CRASH_EXIT)) {
1047 mif_err("<%s> send err state : %d\n",
1048 iod->name, p_state);
1049 } else if (iod->mc->sim_state.changed) {
1050 int s_state = iod->mc->sim_state.online ?
1051 STATE_SIM_ATTACH : STATE_SIM_DETACH;
1052 iod->mc->sim_state.changed = false;
1054 } else if (p_state == STATE_NV_REBUILDING) {
1055 mif_info("send nv rebuild state : %d\n",
1057 iod->mc->phone_state = STATE_ONLINE;
1061 #if 0 /*for tizen modem*/
1062 case IOCTL_MODEM_PROTOCOL_SUSPEND:
1063 mif_info("misc_ioctl : IOCTL_MODEM_PROTOCOL_SUSPEND\n");
1065 if (iod->format != IPC_MULTI_RAW)
1068 iodevs_for_each(iod->msd, iodev_netif_stop, 0);
1071 case IOCTL_MODEM_PROTOCOL_RESUME:
1072 mif_info("misc_ioctl : IOCTL_MODEM_PROTOCOL_RESUME\n");
1074 if (iod->format != IPC_MULTI_RAW)
1077 iodevs_for_each(iod->msd, iodev_netif_wake, 0);
1081 case IOCTL_MODEM_DUMP_START:
1082 mif_err("misc_ioctl : IOCTL_MODEM_DUMP_START\n");
1083 return ld->dump_start(ld, iod);
1085 case IOCTL_MODEM_DUMP_UPDATE:
1086 mif_debug("misc_ioctl : IOCTL_MODEM_DUMP_UPDATE\n");
1087 return ld->dump_update(ld, iod, arg);
1089 case IOCTL_MODEM_FORCE_CRASH_EXIT:
1090 mif_debug("misc_ioctl : IOCTL_MODEM_FORCE_CRASH_EXIT\n");
1091 if (iod->mc->ops.modem_force_crash_exit)
1092 return iod->mc->ops.modem_force_crash_exit(iod->mc);
1095 case IOCTL_MODEM_CP_UPLOAD:
1096 mif_err("misc_ioctl : IOCTL_MODEM_CP_UPLOAD\n");
1097 if (copy_from_user(cpinfo_buf + strlen(cpinfo_buf),
1098 (void __user *)arg, MAX_CPINFO_SIZE) != 0)
1104 case IOCTL_MODEM_DUMP_RESET:
1105 mif_err("misc_ioctl : IOCTL_MODEM_DUMP_RESET\n");
1106 return iod->mc->ops.modem_dump_reset(iod->mc);
1108 case IOCTL_MIF_LOG_DUMP:
1109 size = MAX_MIF_BUFF_SIZE;
1110 ret = copy_to_user((void __user *)arg, &size,
1111 sizeof(unsigned long));
1115 mif_dump_log(iod->mc->msd, iod);
1118 case IOCTL_MIF_DPRAM_DUMP:
1119 #ifdef CONFIG_LINK_DEVICE_DPRAM
1120 if (iod->mc->mdm_data->link_types & LINKTYPE(LINKDEV_DPRAM)) {
1121 size = iod->mc->mdm_data->dpram_ctl->dp_size;
1122 ret = copy_to_user((void __user *)arg, &size,
1123 sizeof(unsigned long));
1126 mif_dump_dpram(iod);
1132 case IOCTL_CG_DATA_SEND:
1133 mif_info("misc_ioctl : IOCTL_CG_DATA_SEND, arg = %d\n", arg);
1134 send_cg_data(ld, iod, arg);
1138 /* If you need to handle the ioctl for specific link device,
1139 * then assign the link ioctl handler to ld->ioctl
1140 * It will be call for specific link ioctl */
1142 return ld->ioctl(ld, iod, cmd, arg);
1144 mif_err("misc_ioctl : ioctl 0x%X is not defined.\n", cmd);
1150 static ssize_t misc_write(struct file *filp, const char __user *buf,
1151 size_t count, loff_t *ppos)
1153 struct io_device *iod = (struct io_device *)filp->private_data;
1154 struct link_device *ld = get_current_link(iod);
1156 char frame_header_buf[sizeof(struct raw_hdr)];
1157 struct sk_buff *skb;
1161 /* TODO - check here flow control for only raw data */
1163 frame_len = SIZE_OF_HDLC_START +
1164 get_header_size(iod) +
1168 frame_len += MAX_LINK_PADDING_SIZE;
1170 skb = alloc_skb(frame_len, GFP_KERNEL);
1172 mif_err("fail alloc skb (%d)\n", __LINE__);
1176 switch (iod->format) {
1181 if (copy_from_user(skb_put(skb, count), buf, count) != 0) {
1182 dev_kfree_skb_any(skb);
1188 memcpy(skb_put(skb, SIZE_OF_HDLC_START), hdlc_start,
1189 SIZE_OF_HDLC_START);
1190 memcpy(skb_put(skb, get_header_size(iod)),
1191 get_header(iod, count, frame_header_buf),
1192 get_header_size(iod));
1193 if (copy_from_user(skb_put(skb, count), buf, count) != 0) {
1194 dev_kfree_skb_any(skb);
1197 memcpy(skb_put(skb, SIZE_OF_HDLC_END), hdlc_end,
1202 skb_put(skb, calc_padding_size(iod, ld, skb->len));
1204 /* send data with sk_buff, link device will put sk_buff
1205 * into the specific sk_buff_q and run work-q to send data
1209 skbpriv(skb)->iod = iod;
1210 skbpriv(skb)->ld = ld;
1212 err = ld->send(ld, iod, skb);
1214 dev_kfree_skb_any(skb);
1219 mif_err("WARNNING: wrong tx size: %s, format=%d "
1220 "count=%d, tx_size=%d, return_size=%d",
1221 iod->name, iod->format, count, tx_size, err);
1226 static ssize_t misc_read(struct file *filp, char *buf, size_t count,
1229 struct io_device *iod = (struct io_device *)filp->private_data;
1230 struct sk_buff *skb = NULL;
1232 unsigned int rest_len, copy_len;
1235 skb = skb_dequeue(&iod->sk_rx_q);
1237 mif_err("<%s> no data from sk_rx_q\n", iod->name);
1240 mif_debug("<%s> skb->len : %d\n", iod->name, skb->len);
1242 if (iod->format == IPC_BOOT) {
1243 pktsize = rest_len = count;
1245 if (skb->len > rest_len) {
1246 /* BOOT device receviced rx data as serial
1247 stream, return data by User requested size */
1248 mif_err("skb->len %d > count %d\n", skb->len,
1250 pr_skb("BOOT-wRX", skb);
1251 if (copy_to_user(cur, skb->data, rest_len)
1253 dev_kfree_skb_any(skb);
1257 skb_pull(skb, rest_len);
1259 mif_info("queue-head, skb->len = %d\n",
1261 skb_queue_head(&iod->sk_rx_q, skb);
1263 mif_debug("return %u\n", rest_len);
1267 copy_len = min(rest_len, skb->len);
1268 if (copy_to_user(cur, skb->data, copy_len) != 0) {
1269 dev_kfree_skb_any(skb);
1273 dev_kfree_skb_any(skb);
1274 rest_len -= copy_len;
1279 skb = skb_dequeue(&iod->sk_rx_q);
1281 mif_err("<%s> %d / %d sk_rx_q\n", iod->name,
1282 (count - rest_len), count);
1283 return count - rest_len;
1287 if (skb->len > count) {
1288 mif_err("<%s> skb->len %d > count %d\n", iod->name,
1290 dev_kfree_skb_any(skb);
1294 if (copy_to_user(buf, skb->data, pktsize) != 0) {
1295 dev_kfree_skb_any(skb);
1298 if (iod->format == IPC_BYPASS)
1299 mif_debug("copied %d bytes to user\n", pktsize);
1301 dev_kfree_skb_any(skb);
1306 #ifdef CONFIG_LINK_DEVICE_C2C
1307 static int misc_mmap(struct file *filp, struct vm_area_struct *vma)
1310 unsigned long size = 0;
1311 unsigned long pfn = 0;
1312 unsigned long offset = 0;
1313 struct io_device *iod = (struct io_device *)filp->private_data;
1318 size = vma->vm_end - vma->vm_start;
1319 offset = vma->vm_pgoff << PAGE_SHIFT;
1320 if (offset + size > (C2C_CP_RGN_SIZE + C2C_SH_RGN_SIZE)) {
1321 mif_err("offset + size > C2C_CP_RGN_SIZE\n");
1325 /* Set the noncacheable property to the region */
1326 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1327 vma->vm_flags |= VM_RESERVED | VM_IO;
1329 pfn = __phys_to_pfn(C2C_CP_RGN_ADDR + offset);
1330 r = remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot);
1332 mif_err("Failed in remap_pfn_range()!!!\n");
1336 mif_err("VA = 0x%08lx, offset = 0x%lx, size = %lu\n",
1337 vma->vm_start, offset, size);
1343 static const struct file_operations misc_io_fops = {
1344 .owner = THIS_MODULE,
1346 .release = misc_release,
1348 .unlocked_ioctl = misc_ioctl,
1349 .write = misc_write,
1351 #ifdef CONFIG_LINK_DEVICE_C2C
1356 static int vnet_open(struct net_device *ndev)
1358 struct vnet *vnet = netdev_priv(ndev);
1359 netif_start_queue(ndev);
1360 atomic_inc(&vnet->iod->opened);
1364 static int vnet_stop(struct net_device *ndev)
1366 struct vnet *vnet = netdev_priv(ndev);
1367 atomic_dec(&vnet->iod->opened);
1368 netif_stop_queue(ndev);
1372 static int vnet_xmit(struct sk_buff *skb, struct net_device *ndev)
1377 struct sk_buff *skb_new = NULL;
1378 struct vnet *vnet = netdev_priv(ndev);
1379 struct io_device *iod = vnet->iod;
1380 struct link_device *ld = get_current_link(iod);
1382 struct iphdr *ip_header = NULL;
1385 if (iod->io_typ == IODEV_NET) {
1390 /* When use `handover' with Network Bridge,
1391 * user -> TCP/IP(kernel) -> bridge device -> TCP/IP(kernel) -> this.
1393 * We remove the one ethernet header of skb before using skb->len,
1394 * because the skb has two ethernet headers.
1396 if (iod->use_handover) {
1397 if (iod->id >= PSD_DATA_CHID_BEGIN &&
1398 iod->id <= PSD_DATA_CHID_END)
1399 skb_pull(skb, sizeof(struct ethhdr));
1403 ip_header = (struct iphdr *)skb->data;
1404 if (iod->msd->loopback_ipaddr &&
1405 ip_header->daddr == iod->msd->loopback_ipaddr) {
1406 swap(ip_header->saddr, ip_header->daddr);
1407 hd.channel = DATA_LOOPBACK_CHANNEL;
1409 hd.channel = iod->id & 0x1F;
1411 hd.len = skb->len + sizeof(hd);
1414 headroom = sizeof(hd) + sizeof(hdlc_start);
1415 tailroom = sizeof(hdlc_end);
1417 tailroom += MAX_LINK_PADDING_SIZE;
1418 if (skb_headroom(skb) < headroom || skb_tailroom(skb) < tailroom) {
1419 skb_new = skb_copy_expand(skb, headroom, tailroom, GFP_ATOMIC);
1420 /* skb_copy_expand success or not, free old skb from caller */
1421 dev_kfree_skb_any(skb);
1427 memcpy(skb_push(skb_new, sizeof(hd)), &hd, sizeof(hd));
1428 memcpy(skb_push(skb_new, sizeof(hdlc_start)), hdlc_start,
1429 sizeof(hdlc_start));
1430 memcpy(skb_put(skb_new, sizeof(hdlc_end)), hdlc_end, sizeof(hdlc_end));
1431 skb_put(skb_new, calc_padding_size(iod, ld, skb_new->len));
1434 skbpriv(skb_new)->iod = iod;
1435 skbpriv(skb_new)->ld = ld;
1437 ret = ld->send(ld, iod, skb_new);
1439 netif_stop_queue(ndev);
1440 dev_kfree_skb_any(skb_new);
1441 return NETDEV_TX_BUSY;
1444 ndev->stats.tx_packets++;
1445 ndev->stats.tx_bytes += skb->len;
1447 return NETDEV_TX_OK;
1450 static struct net_device_ops vnet_ops = {
1451 .ndo_open = vnet_open,
1452 .ndo_stop = vnet_stop,
1453 .ndo_start_xmit = vnet_xmit,
1456 static void vnet_setup(struct net_device *ndev)
1458 ndev->netdev_ops = &vnet_ops;
1459 ndev->type = ARPHRD_PPP;
1460 ndev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1462 ndev->hard_header_len = 0;
1463 ndev->tx_queue_len = 1000;
1464 ndev->mtu = ETH_DATA_LEN;
1465 ndev->watchdog_timeo = 5 * HZ;
1468 static void vnet_setup_ether(struct net_device *ndev)
1470 ndev->netdev_ops = &vnet_ops;
1471 ndev->type = ARPHRD_ETHER;
1472 ndev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST | IFF_SLAVE;
1473 ndev->addr_len = ETH_ALEN;
1474 random_ether_addr(ndev->dev_addr);
1475 ndev->hard_header_len = 0;
1476 ndev->tx_queue_len = 1000;
1477 ndev->mtu = ETH_DATA_LEN;
1478 ndev->watchdog_timeo = 5 * HZ;
1481 int sipc4_init_io_device(struct io_device *iod)
1486 /* Get modem state from modem control device */
1487 iod->modem_state_changed = io_dev_modem_state_changed;
1489 iod->sim_state_changed = io_dev_sim_state_changed;
1491 /* Get data from link device */
1492 iod->recv = io_dev_recv_data_from_link_dev;
1494 /* Register misc or net device */
1495 switch (iod->io_typ) {
1497 init_waitqueue_head(&iod->wq);
1498 skb_queue_head_init(&iod->sk_rx_q);
1499 INIT_DELAYED_WORK(&iod->rx_work, rx_iodev_work);
1501 iod->miscdev.minor = MISC_DYNAMIC_MINOR;
1502 iod->miscdev.name = iod->name;
1503 iod->miscdev.fops = &misc_io_fops;
1505 ret = misc_register(&iod->miscdev);
1507 mif_err("failed to register misc io device : %s\n",
1513 skb_queue_head_init(&iod->sk_rx_q);
1514 INIT_DELAYED_WORK(&iod->rx_work, rx_iodev_work);
1515 if (iod->use_handover)
1516 iod->ndev = alloc_netdev(0, iod->name,
1519 iod->ndev = alloc_netdev(0, iod->name, vnet_setup);
1522 mif_err("failed to alloc netdev\n");
1526 ret = register_netdev(iod->ndev);
1528 free_netdev(iod->ndev);
1530 mif_debug("(iod:0x%p)\n", iod);
1531 vnet = netdev_priv(iod->ndev);
1532 mif_debug("(vnet:0x%p)\n", vnet);
1537 skb_queue_head_init(&iod->sk_rx_q);
1538 INIT_DELAYED_WORK(&iod->rx_work, rx_iodev_work);
1540 iod->miscdev.minor = MISC_DYNAMIC_MINOR;
1541 iod->miscdev.name = iod->name;
1542 iod->miscdev.fops = &misc_io_fops;
1544 ret = misc_register(&iod->miscdev);
1546 mif_err("failed to register misc io device : %s\n",
1548 ret = device_create_file(iod->miscdev.this_device,
1551 mif_err("failed to create `waketime' file : %s\n",
1553 ret = device_create_file(iod->miscdev.this_device,
1556 mif_err("failed to create `loopback file' : %s\n",
1561 mif_err("wrong io_type : %d\n", iod->io_typ);
1565 mif_debug("%s(%d) : init_io_device() done : %d\n",
1566 iod->name, iod->io_typ, ret);