2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
9 #include <linux/stddef.h>
10 #include <linux/spinlock.h>
11 #include <linux/slab.h>
12 #include <asm/unaligned.h>
13 #include <net/caif/caif_layer.h>
14 #include <net/caif/cfsrvl.h>
15 #include <net/caif/cfpkt.h>
17 #define container_obj(layr) container_of(layr, struct cfrfml, serv.layer)
18 #define RFM_SEGMENTATION_BIT 0x01
19 #define RFM_HEAD_SIZE 7
21 static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
22 static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
26 struct cfpkt *incomplete_frm;
30 /* Protects serialized processing of packets */
34 static void cfrfml_release(struct cflayer *layer)
36 struct cfsrvl *srvl = container_of(layer, struct cfsrvl, layer);
37 struct cfrfml *rfml = container_obj(&srvl->layer);
39 if (rfml->incomplete_frm)
40 cfpkt_destroy(rfml->incomplete_frm);
45 struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
49 struct cfrfml *this = kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
54 cfsrvl_init(&this->serv, channel_id, dev_info, false);
55 this->serv.release = cfrfml_release;
56 this->serv.layer.receive = cfrfml_receive;
57 this->serv.layer.transmit = cfrfml_transmit;
59 /* Round down to closest multiple of 16 */
60 tmp = (mtu_size - RFM_HEAD_SIZE - 6) / 16;
63 this->fragment_size = tmp;
64 spin_lock_init(&this->sync);
65 snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ,
68 return &this->serv.layer;
71 static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead,
72 struct cfpkt *pkt, int *err)
76 /* n-th but not last segment */
78 if (cfpkt_extr_head(pkt, seghead, 6) < 0)
81 /* Verify correct header */
82 if (memcmp(seghead, rfml->seghead, 6) != 0)
85 tmppkt = cfpkt_append(rfml->incomplete_frm, pkt,
86 rfml->pdu_size + RFM_HEAD_SIZE);
88 /* If cfpkt_append failes input pkts are not freed */
97 static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
104 struct cfpkt *tmppkt = NULL;
106 caif_assert(layr->up != NULL);
107 caif_assert(layr->receive != NULL);
108 rfml = container_obj(layr);
109 spin_lock(&rfml->sync);
112 if (cfpkt_extr_head(pkt, &tmp, 1) < 0)
114 segmented = tmp & RFM_SEGMENTATION_BIT;
117 if (rfml->incomplete_frm == NULL) {
118 /* Initial Segment */
119 if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
122 rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
124 if (cfpkt_erroneous(pkt))
126 rfml->incomplete_frm = pkt;
130 tmppkt = rfm_append(rfml, seghead, pkt, &err);
134 if (cfpkt_erroneous(tmppkt))
137 rfml->incomplete_frm = tmppkt;
140 if (cfpkt_erroneous(tmppkt))
147 if (rfml->incomplete_frm) {
150 tmppkt = rfm_append(rfml, seghead, pkt, &err);
154 if (cfpkt_erroneous(tmppkt))
157 rfml->incomplete_frm = NULL;
161 /* Verify that length is correct */
163 if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
167 err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt);
173 cfpkt_destroy(tmppkt);
176 if (rfml->incomplete_frm)
177 cfpkt_destroy(rfml->incomplete_frm);
178 rfml->incomplete_frm = NULL;
180 pr_info("Connection error %d triggered on RFM link\n", err);
182 /* Trigger connection error upon failure.*/
183 layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
184 rfml->serv.dev_info.id);
186 spin_unlock(&rfml->sync);
188 if (unlikely(err == -EAGAIN))
189 /* It is not possible to recover after drop of a fragment */
196 static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
198 caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size + RFM_HEAD_SIZE);
200 /* Add info for MUX-layer to route the packet out. */
201 cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
204 * To optimize alignment, we add up the size of CAIF header before
207 cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE;
208 cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info;
210 return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt);
213 static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
218 struct cfpkt *rearpkt = NULL;
219 struct cfpkt *frontpkt = pkt;
220 struct cfrfml *rfml = container_obj(layr);
222 caif_assert(layr->dn != NULL);
223 caif_assert(layr->dn->transmit != NULL);
225 if (!cfsrvl_ready(&rfml->serv, &err))
229 if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1)
233 if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
234 err = cfpkt_peek_head(pkt, head, 6);
239 while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
244 if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
247 * On OOM error cfpkt_split returns NULL.
249 * NOTE: Segmented pdu is not correctly aligned.
250 * This has negative performance impact.
253 rearpkt = cfpkt_split(frontpkt, rfml->fragment_size);
257 err = cfrfml_transmit_segment(rfml, frontpkt);
268 if (frontpkt == NULL)
271 if (cfpkt_add_head(frontpkt, head, 6) < 0)
279 if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
282 err = cfrfml_transmit_segment(rfml, frontpkt);
288 pr_info("Connection error %d triggered on RFM link\n", err);
289 /* Trigger connection error upon failure.*/
291 layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
292 rfml->serv.dev_info.id);
295 cfpkt_destroy(rearpkt);
298 cfpkt_destroy(frontpkt);