d41e373f9c0adb6362f90e5cac7f7636a736600c
[platform/kernel/linux-starfive.git] / drivers / net / wwan / iosm / iosm_ipc_mux_codec.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5
6 #include <linux/nospec.h>
7
8 #include "iosm_ipc_imem_ops.h"
9 #include "iosm_ipc_mux_codec.h"
10 #include "iosm_ipc_task_queue.h"
11
12 /* Test the link power state and send a MUX command in blocking mode. */
13 static int ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
14                                size_t size)
15 {
16         struct iosm_mux *ipc_mux = ipc_imem->mux;
17         const struct mux_acb *acb = msg;
18
19         skb_queue_tail(&ipc_mux->channel->ul_list, acb->skb);
20         ipc_imem_ul_send(ipc_mux->imem);
21
22         return 0;
23 }
24
25 static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
26 {
27         struct completion *completion = &ipc_mux->channel->ul_sem;
28         int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
29                                            0, &ipc_mux->acb,
30                                            sizeof(ipc_mux->acb), false);
31         if (ret) {
32                 dev_err(ipc_mux->dev, "unable to send mux command");
33                 return ret;
34         }
35
36         /* if blocking, suspend the app and wait for irq in the flash or
37          * crash phase. return false on timeout to indicate failure.
38          */
39         if (blocking) {
40                 u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
41
42                 reinit_completion(completion);
43
44                 if (wait_for_completion_interruptible_timeout
45                    (completion, msecs_to_jiffies(wait_time_milliseconds)) ==
46                    0) {
47                         dev_err(ipc_mux->dev, "ch[%d] timeout",
48                                 ipc_mux->channel_id);
49                         ipc_uevent_send(ipc_mux->imem->dev, UEVENT_MDM_TIMEOUT);
50                         return -ETIMEDOUT;
51                 }
52         }
53
54         return 0;
55 }
56
57 /* Initialize the command header. */
58 static void ipc_mux_acb_init(struct iosm_mux *ipc_mux)
59 {
60         struct mux_acb *acb = &ipc_mux->acb;
61         struct mux_acbh *header;
62
63         header = (struct mux_acbh *)(acb->skb)->data;
64         header->block_length = cpu_to_le32(sizeof(struct mux_acbh));
65         header->first_cmd_index = header->block_length;
66         header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH);
67         header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++);
68 }
69
70 /* Add a command to the ACB. */
71 static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd,
72                                             void *param, u32 param_size)
73 {
74         struct mux_acbh *header;
75         struct mux_cmdh *cmdh;
76         struct mux_acb *acb;
77
78         acb = &ipc_mux->acb;
79         header = (struct mux_acbh *)(acb->skb)->data;
80         cmdh = (struct mux_cmdh *)
81                 ((acb->skb)->data + le32_to_cpu(header->block_length));
82
83         cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
84         cmdh->command_type = cpu_to_le32(cmd);
85         cmdh->if_id = acb->if_id;
86
87         acb->cmd = cmd;
88         cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) +
89                                     param_size);
90         cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
91         if (param)
92                 memcpy(&cmdh->param, param, param_size);
93
94         skb_put(acb->skb, le32_to_cpu(header->block_length) +
95                                         le16_to_cpu(cmdh->cmd_len));
96
97         return cmdh;
98 }
99
100 /* Prepare mux Command */
101 static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
102                                                   u32 cmd, struct mux_acb *acb,
103                                                   void *param, u32 param_size)
104 {
105         struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)acb->skb->data;
106
107         cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
108         cmdh->command_type = cpu_to_le32(cmd);
109         cmdh->if_id = acb->if_id;
110
111         acb->cmd = cmd;
112
113         cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_lite_cmdh, param) +
114                                     param_size);
115         cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
116
117         if (param)
118                 memcpy(&cmdh->param, param, param_size);
119
120         skb_put(acb->skb, le16_to_cpu(cmdh->cmd_len));
121
122         return cmdh;
123 }
124
125 static int ipc_mux_acb_alloc(struct iosm_mux *ipc_mux)
126 {
127         struct mux_acb *acb = &ipc_mux->acb;
128         struct sk_buff *skb;
129         dma_addr_t mapping;
130
131         /* Allocate skb memory for the uplink buffer. */
132         skb = ipc_pcie_alloc_skb(ipc_mux->pcie, MUX_MAX_UL_ACB_BUF_SIZE,
133                                  GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
134         if (!skb)
135                 return -ENOMEM;
136
137         /* Save the skb address. */
138         acb->skb = skb;
139
140         memset(skb->data, 0, MUX_MAX_UL_ACB_BUF_SIZE);
141
142         return 0;
143 }
144
145 int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
146                              u32 transaction_id, union mux_cmd_param *param,
147                              size_t res_size, bool blocking, bool respond)
148 {
149         struct mux_acb *acb = &ipc_mux->acb;
150         union mux_type_cmdh cmdh;
151         int ret = 0;
152
153         acb->if_id = if_id;
154         ret = ipc_mux_acb_alloc(ipc_mux);
155         if (ret)
156                 return ret;
157
158         if (ipc_mux->protocol == MUX_LITE) {
159                 cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb,
160                                                      param, res_size);
161
162                 if (respond)
163                         cmdh.ack_lite->transaction_id =
164                                         cpu_to_le32(transaction_id);
165         } else {
166                 /* Initialize the ACB header. */
167                 ipc_mux_acb_init(ipc_mux);
168                 cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param,
169                                                     res_size);
170
171                 if (respond)
172                         cmdh.ack_aggr->transaction_id =
173                                         cpu_to_le32(transaction_id);
174         }
175         ret = ipc_mux_acb_send(ipc_mux, blocking);
176
177         return ret;
178 }
179
180 void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
181 {
182         /* Inform the network interface to start/stop flow ctrl */
183         ipc_wwan_tx_flowctrl(session->wwan, idx, on);
184 }
185
186 static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
187                                               union mux_cmd_param param,
188                                               __le32 command_type, u8 if_id,
189                                               __le32 transaction_id)
190 {
191         struct mux_acb *acb = &ipc_mux->acb;
192
193         switch (le32_to_cpu(command_type)) {
194         case MUX_CMD_OPEN_SESSION_RESP:
195         case MUX_CMD_CLOSE_SESSION_RESP:
196                 /* Resume the control application. */
197                 acb->got_param = param;
198                 break;
199
200         case MUX_LITE_CMD_FLOW_CTL_ACK:
201                 /* This command type is not expected as response for
202                  * Aggregation version of the protocol. So return non-zero.
203                  */
204                 if (ipc_mux->protocol != MUX_LITE)
205                         return -EINVAL;
206
207                 dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received",
208                         if_id, le32_to_cpu(transaction_id));
209                 break;
210
211         case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK:
212                 /* This command type is not expected as response for
213                  * Lite version of the protocol. So return non-zero.
214                  */
215                 if (ipc_mux->protocol == MUX_LITE)
216                         return -EINVAL;
217                 break;
218
219         default:
220                 return -EINVAL;
221         }
222
223         acb->wanted_response = MUX_CMD_INVALID;
224         acb->got_response = le32_to_cpu(command_type);
225         complete(&ipc_mux->channel->ul_sem);
226
227         return 0;
228 }
229
230 static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux,
231                                           union mux_cmd_param *param,
232                                           __le32 command_type, u8 if_id,
233                                           __le16 cmd_len, int size)
234 {
235         struct mux_session *session;
236         struct hrtimer *adb_timer;
237
238         dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
239                 if_id, le32_to_cpu(command_type));
240
241         switch (le32_to_cpu(command_type)) {
242         case MUX_LITE_CMD_FLOW_CTL:
243         case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE:
244
245                 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
246                         dev_err(ipc_mux->dev, "if_id [%d] not valid",
247                                 if_id);
248                         return -EINVAL; /* No session interface id. */
249                 }
250
251                 session = &ipc_mux->session[if_id];
252                 adb_timer = &ipc_mux->imem->adb_timer;
253
254                 if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
255                         /* Backward Compatibility */
256                         if (cmd_len == cpu_to_le16(size))
257                                 session->flow_ctl_mask =
258                                         le32_to_cpu(param->flow_ctl.mask);
259                         else
260                                 session->flow_ctl_mask = ~0;
261                         /* if CP asks for FLOW CTRL Enable
262                          * then set our internal flow control Tx flag
263                          * to limit uplink session queueing
264                          */
265                         session->net_tx_stop = true;
266
267                         /* We have to call Finish ADB here.
268                          * Otherwise any already queued data
269                          * will be sent to CP when ADB is full
270                          * for some other sessions.
271                          */
272                         if (ipc_mux->protocol == MUX_AGGREGATION) {
273                                 ipc_mux_ul_adb_finish(ipc_mux);
274                                 ipc_imem_hrtimer_stop(adb_timer);
275                         }
276                         /* Update the stats */
277                         session->flow_ctl_en_cnt++;
278                 } else if (param->flow_ctl.mask == 0) {
279                         /* Just reset the Flow control mask and let
280                          * mux_flow_ctrl_low_thre_b take control on
281                          * our internal Tx flag and enabling kernel
282                          * flow control
283                          */
284                         dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
285                                 if_id, le32_to_cpu(param->flow_ctl.mask));
286                         /* Backward Compatibility */
287                         if (cmd_len == cpu_to_le16(size))
288                                 session->flow_ctl_mask =
289                                         le32_to_cpu(param->flow_ctl.mask);
290                         else
291                                 session->flow_ctl_mask = 0;
292                         /* Update the stats */
293                         session->flow_ctl_dis_cnt++;
294                 } else {
295                         break;
296                 }
297
298                 ipc_mux->acc_adb_size = 0;
299                 ipc_mux->acc_payload_size = 0;
300
301                 dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id,
302                         le32_to_cpu(param->flow_ctl.mask));
303                 break;
304
305         case MUX_LITE_CMD_LINK_STATUS_REPORT:
306                 break;
307
308         default:
309                 return -EINVAL;
310         }
311         return 0;
312 }
313
314 /* Decode and Send appropriate response to a command block. */
315 static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
316 {
317         struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
318         __le32 trans_id = cmdh->transaction_id;
319         int size;
320
321         if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
322                                                cmdh->command_type, cmdh->if_id,
323                                                cmdh->transaction_id)) {
324                 /* Unable to decode command response indicates the cmd_type
325                  * may be a command instead of response. So try to decoding it.
326                  */
327                 size = offsetof(struct mux_lite_cmdh, param) +
328                                 sizeof(cmdh->param.flow_ctl);
329                 if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
330                                                     cmdh->command_type,
331                                                     cmdh->if_id,
332                                                     cmdh->cmd_len, size)) {
333                         /* Decoded command may need a response. Give the
334                          * response according to the command type.
335                          */
336                         union mux_cmd_param *mux_cmd = NULL;
337                         size_t size = 0;
338                         u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
339
340                         if (cmdh->command_type ==
341                             cpu_to_le32(MUX_LITE_CMD_LINK_STATUS_REPORT)) {
342                                 mux_cmd = &cmdh->param;
343                                 mux_cmd->link_status_resp.response =
344                                         cpu_to_le32(MUX_CMD_RESP_SUCCESS);
345                                 /* response field is u32 */
346                                 size = sizeof(u32);
347                         } else if (cmdh->command_type ==
348                                    cpu_to_le32(MUX_LITE_CMD_FLOW_CTL)) {
349                                 cmd = MUX_LITE_CMD_FLOW_CTL_ACK;
350                         } else {
351                                 return;
352                         }
353
354                         if (ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
355                                                      le32_to_cpu(trans_id),
356                                                      mux_cmd, size, false,
357                                                      true))
358                                 dev_err(ipc_mux->dev,
359                                         "if_id %d: cmd send failed",
360                                         cmdh->if_id);
361                 }
362         }
363 }
364
365 /* Pass the DL packet to the netif layer. */
366 static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
367                                struct iosm_wwan *wwan, u32 offset,
368                                u8 service_class, struct sk_buff *skb)
369 {
370         struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
371
372         if (!dest_skb)
373                 return -ENOMEM;
374
375         skb_pull(dest_skb, offset);
376         skb_set_tail_pointer(dest_skb, dest_skb->len);
377         /* Pass the packet to the netif layer. */
378         dest_skb->priority = service_class;
379
380         return ipc_wwan_receive(wwan, dest_skb, false, if_id);
381 }
382
383 /* Decode Flow Credit Table in the block */
384 static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
385                                    unsigned char *block)
386 {
387         struct ipc_mem_lite_gen_tbl *fct = (struct ipc_mem_lite_gen_tbl *)block;
388         struct iosm_wwan *wwan;
389         int ul_credits;
390         int if_id;
391
392         if (fct->vfl_length != sizeof(fct->vfl.nr_of_bytes)) {
393                 dev_err(ipc_mux->dev, "unexpected FCT length: %d",
394                         fct->vfl_length);
395                 return;
396         }
397
398         if_id = fct->if_id;
399         if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
400                 dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
401                 return;
402         }
403
404         /* Is the session active ? */
405         if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
406         wwan = ipc_mux->session[if_id].wwan;
407         if (!wwan) {
408                 dev_err(ipc_mux->dev, "session Net ID is NULL");
409                 return;
410         }
411
412         ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
413
414         dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
415                 if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
416
417         /* Update the Flow Credit information from ADB */
418         ipc_mux->session[if_id].ul_flow_credits += ul_credits;
419
420         /* Check whether the TX can be started */
421         if (ipc_mux->session[if_id].ul_flow_credits > 0) {
422                 ipc_mux->session[if_id].net_tx_stop = false;
423                 ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
424                                           ipc_mux->session[if_id].if_id, false);
425         }
426 }
427
428 /* Decode non-aggregated datagram */
429 static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
430                                    struct sk_buff *skb)
431 {
432         u32 pad_len, packet_offset;
433         struct iosm_wwan *wwan;
434         struct mux_adgh *adgh;
435         u8 *block = skb->data;
436         int rc = 0;
437         u8 if_id;
438
439         adgh = (struct mux_adgh *)block;
440
441         if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) {
442                 dev_err(ipc_mux->dev, "invalid ADGH signature received");
443                 return;
444         }
445
446         if_id = adgh->if_id;
447         if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
448                 dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
449                 return;
450         }
451
452         /* Is the session active ? */
453         if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
454         wwan = ipc_mux->session[if_id].wwan;
455         if (!wwan) {
456                 dev_err(ipc_mux->dev, "session Net ID is NULL");
457                 return;
458         }
459
460         /* Store the pad len for the corresponding session
461          * Pad bytes as negotiated in the open session less the header size
462          * (see session management chapter for details).
463          * If resulting padding is zero or less, the additional head padding is
464          * omitted. For e.g., if HEAD_PAD_LEN = 16 or less, this field is
465          * omitted if HEAD_PAD_LEN = 20, then this field will have 4 bytes
466          * set to zero
467          */
468         pad_len =
469                 ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
470         packet_offset = sizeof(*adgh) + pad_len;
471
472         if_id += ipc_mux->wwan_q_offset;
473
474         /* Pass the packet to the netif layer */
475         rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
476                                  adgh->service_class, skb);
477         if (rc) {
478                 dev_err(ipc_mux->dev, "mux adgh decoding error");
479                 return;
480         }
481         ipc_mux->session[if_id].flush = 1;
482 }
483
484 static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux,
485                                      struct mux_cmdh *cmdh, int size)
486 {
487         u32 link_st  = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP;
488         u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE;
489         u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE;
490         u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK;
491         union mux_cmd_param *cmd_p = NULL;
492         u32 cmd = link_st;
493         u32 trans_id;
494
495         if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
496                                             cmdh->command_type, cmdh->if_id,
497                                             cmdh->cmd_len, size)) {
498                 size = 0;
499                 if (cmdh->command_type == cpu_to_le32(link_st)) {
500                         cmd_p = &cmdh->param;
501                         cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS;
502                 } else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) ||
503                                 (cmdh->command_type == cpu_to_le32(fctl_dis))) {
504                         cmd = fctl_ack;
505                 } else {
506                         return;
507                         }
508                 trans_id = le32_to_cpu(cmdh->transaction_id);
509                 ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
510                                          trans_id, cmd_p, size, false, true);
511         }
512 }
513
514 /* Decode an aggregated command block. */
515 static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
516 {
517         struct mux_acbh *acbh;
518         struct mux_cmdh *cmdh;
519         u32 next_cmd_index;
520         u8 *block;
521         int size;
522
523         acbh = (struct mux_acbh *)(skb->data);
524         block = (u8 *)(skb->data);
525
526         next_cmd_index = le32_to_cpu(acbh->first_cmd_index);
527         next_cmd_index = array_index_nospec(next_cmd_index,
528                                             sizeof(struct mux_cmdh));
529
530         while (next_cmd_index != 0) {
531                 cmdh = (struct mux_cmdh *)&block[next_cmd_index];
532                 next_cmd_index = le32_to_cpu(cmdh->next_cmd_index);
533                 if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
534                                                        cmdh->command_type,
535                                                        cmdh->if_id,
536                                                        cmdh->transaction_id)) {
537                         size = offsetof(struct mux_cmdh, param) +
538                                 sizeof(cmdh->param.flow_ctl);
539                         ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size);
540                 }
541         }
542 }
543
544 /* process datagram */
545 static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
546                              struct mux_adth_dg *dg, struct sk_buff *skb,
547                              int if_id, int nr_of_dg)
548 {
549         u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
550         u32 packet_offset, i, rc;
551
552         for (i = 0; i < nr_of_dg; i++, dg++) {
553                 if (le32_to_cpu(dg->datagram_index)
554                                 < sizeof(struct mux_adbh))
555                         goto dg_error;
556
557                 /* Is the packet inside of the ADB */
558                 if (le32_to_cpu(dg->datagram_index) >=
559                                         le32_to_cpu(adbh->block_length)) {
560                         goto dg_error;
561                 } else {
562                         packet_offset =
563                                 le32_to_cpu(dg->datagram_index) +
564                                 dl_head_pad_len;
565                         /* Pass the packet to the netif layer. */
566                         rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
567                                                  packet_offset,
568                                                  dg->service_class,
569                                                  skb);
570                         if (rc)
571                                 goto dg_error;
572                 }
573         }
574         return 0;
575 dg_error:
576         return -1;
577 }
578
579 /* Decode an aggregated data block. */
580 static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
581                               struct sk_buff *skb)
582 {
583         struct mux_adth_dg *dg;
584         struct iosm_wwan *wwan;
585         struct mux_adbh *adbh;
586         struct mux_adth *adth;
587         int nr_of_dg, if_id;
588         u32 adth_index;
589         u8 *block;
590
591         block = skb->data;
592         adbh = (struct mux_adbh *)block;
593
594         /* Process the aggregated datagram tables. */
595         adth_index = le32_to_cpu(adbh->first_table_index);
596
597         /* Has CP sent an empty ADB ? */
598         if (adth_index < 1) {
599                 dev_err(ipc_mux->dev, "unexpected empty ADB");
600                 goto adb_decode_err;
601         }
602
603         /* Loop through mixed session tables. */
604         while (adth_index) {
605                 /* Get the reference to the table header. */
606                 adth = (struct mux_adth *)(block + adth_index);
607
608                 /* Get the interface id and map it to the netif id. */
609                 if_id = adth->if_id;
610                 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
611                         goto adb_decode_err;
612
613                 if_id = array_index_nospec(if_id,
614                                            IPC_MEM_MUX_IP_SESSION_ENTRIES);
615
616                 /* Is the session active ? */
617                 wwan = ipc_mux->session[if_id].wwan;
618                 if (!wwan)
619                         goto adb_decode_err;
620
621                 /* Consistency checks for aggregated datagram table. */
622                 if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
623                         goto adb_decode_err;
624
625                 if (le16_to_cpu(adth->table_length) < (sizeof(struct mux_adth) -
626                                 sizeof(struct mux_adth_dg)))
627                         goto adb_decode_err;
628
629                 /* Calculate the number of datagrams. */
630                 nr_of_dg = (le16_to_cpu(adth->table_length) -
631                                         sizeof(struct mux_adth) +
632                                         sizeof(struct mux_adth_dg)) /
633                                         sizeof(struct mux_adth_dg);
634
635                 /* Is the datagram table empty ? */
636                 if (nr_of_dg < 1) {
637                         dev_err(ipc_mux->dev,
638                                 "adthidx=%u,nr_of_dg=%d,next_tblidx=%u",
639                                 adth_index, nr_of_dg,
640                                 le32_to_cpu(adth->next_table_index));
641
642                         /* Move to the next aggregated datagram table. */
643                         adth_index = le32_to_cpu(adth->next_table_index);
644                         continue;
645                 }
646
647                 /* New aggregated datagram table. */
648                 dg = &adth->dg;
649                 if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
650                                       nr_of_dg) < 0)
651                         goto adb_decode_err;
652
653                 /* mark session for final flush */
654                 ipc_mux->session[if_id].flush = 1;
655
656                 /* Move to the next aggregated datagram table. */
657                 adth_index = le32_to_cpu(adth->next_table_index);
658         }
659
660 adb_decode_err:
661         return;
662 }
663
664 /**
665  * ipc_mux_dl_decode -  Route the DL packet through the IP MUX layer
666  *                      depending on Header.
667  * @ipc_mux:            Pointer to MUX data-struct
668  * @skb:                Pointer to ipc_skb.
669  */
670 void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
671 {
672         u32 signature;
673
674         if (!skb->data)
675                 return;
676
677         /* Decode the MUX header type. */
678         signature = le32_to_cpup((__le32 *)skb->data);
679
680         switch (signature) {
681         case IOSM_AGGR_MUX_SIG_ADBH:    /* Aggregated Data Block Header */
682                 mux_dl_adb_decode(ipc_mux, skb);
683                 break;
684         case IOSM_AGGR_MUX_SIG_ADGH:
685                 ipc_mux_dl_adgh_decode(ipc_mux, skb);
686                 break;
687         case MUX_SIG_FCTH:
688                 ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
689                 break;
690         case IOSM_AGGR_MUX_SIG_ACBH:    /* Aggregated Command Block Header */
691                 ipc_mux_dl_acb_decode(ipc_mux, skb);
692                 break;
693         case MUX_SIG_CMDH:
694                 ipc_mux_dl_cmd_decode(ipc_mux, skb);
695                 break;
696
697         default:
698                 dev_err(ipc_mux->dev, "invalid ABH signature");
699         }
700
701         ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
702 }
703
704 static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
705                                 struct mux_adb *ul_adb, u32 type)
706 {
707         /* Take the first element of the free list. */
708         struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
709         u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
710         u32 *next_tb_id;
711         int qlt_size;
712         u32 if_id;
713
714         if (!skb)
715                 return -EBUSY; /* Wait for a free ADB skb. */
716
717         /* Mark it as UL ADB to select the right free operation. */
718         IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
719
720         switch (type) {
721         case IOSM_AGGR_MUX_SIG_ADBH:
722                 /* Save the ADB memory settings. */
723                 ul_adb->dest_skb = skb;
724                 ul_adb->buf = skb->data;
725                 ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
726
727                 /* reset statistic counter */
728                 ul_adb->if_cnt = 0;
729                 ul_adb->payload_size = 0;
730                 ul_adb->dg_cnt_total = 0;
731
732                 /* Initialize the ADBH. */
733                 ul_adb->adbh = (struct mux_adbh *)ul_adb->buf;
734                 memset(ul_adb->adbh, 0, sizeof(struct mux_adbh));
735                 ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH);
736                 ul_adb->adbh->block_length =
737                                         cpu_to_le32(sizeof(struct mux_adbh));
738                 next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index;
739                 ul_adb->next_table_index = next_tb_id;
740
741                 /* Clear the local copy of DGs for new ADB */
742                 memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
743
744                 /* Clear the DG count and QLT updated status for new ADB */
745                 for (if_id = 0; if_id < no_if; if_id++) {
746                         ul_adb->dg_count[if_id] = 0;
747                         ul_adb->qlt_updated[if_id] = 0;
748                 }
749                 break;
750
751         case IOSM_AGGR_MUX_SIG_ADGH:
752                 /* Save the ADB memory settings. */
753                 ul_adb->dest_skb = skb;
754                 ul_adb->buf = skb->data;
755                 ul_adb->size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
756                 /* reset statistic counter */
757                 ul_adb->if_cnt = 0;
758                 ul_adb->payload_size = 0;
759                 ul_adb->dg_cnt_total = 0;
760
761                 ul_adb->adgh = (struct mux_adgh *)skb->data;
762                 memset(ul_adb->adgh, 0, sizeof(struct mux_adgh));
763                 break;
764
765         case MUX_SIG_QLTH:
766                 qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
767                            (MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl));
768
769                 if (qlt_size > IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE) {
770                         dev_err(ipc_mux->dev,
771                                 "can't support. QLT size:%d SKB size: %d",
772                                 qlt_size, IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE);
773                         return -ERANGE;
774                 }
775
776                 ul_adb->qlth_skb = skb;
777                 memset((ul_adb->qlth_skb)->data, 0, qlt_size);
778                 skb_put(skb, qlt_size);
779                 break;
780         }
781
782         return 0;
783 }
784
785 static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
786 {
787         struct mux_adb *ul_adb = &ipc_mux->ul_adb;
788         u16 adgh_len;
789         long long bytes;
790         char *str;
791
792         if (!ul_adb->dest_skb) {
793                 dev_err(ipc_mux->dev, "no dest skb");
794                 return;
795         }
796
797         adgh_len = le16_to_cpu(ul_adb->adgh->length);
798         skb_put(ul_adb->dest_skb, adgh_len);
799         skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
800         ul_adb->dest_skb = NULL;
801
802         if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
803                 struct mux_session *session;
804
805                 session = &ipc_mux->session[ul_adb->adgh->if_id];
806                 str = "available_credits";
807                 bytes = (long long)session->ul_flow_credits;
808
809         } else {
810                 str = "pend_bytes";
811                 bytes = ipc_mux->ul_data_pend_bytes;
812                 ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes +
813                                               adgh_len;
814         }
815
816         dev_dbg(ipc_mux->dev, "UL ADGH: size=%u, if_id=%d, payload=%d, %s=%lld",
817                 adgh_len, ul_adb->adgh->if_id, ul_adb->payload_size,
818                 str, bytes);
819 }
820
821 static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
822                                    struct mux_adb *ul_adb, int *out_offset)
823 {
824         int i, qlt_size, offset = *out_offset;
825         struct mux_qlth *p_adb_qlt;
826         struct mux_adth_dg *dg;
827         struct mux_adth *adth;
828         u16 adth_dg_size;
829         u32 *next_tb_id;
830
831         qlt_size = offsetof(struct mux_qlth, ql) +
832                         MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
833
834         for (i = 0; i < ipc_mux->nr_sessions; i++) {
835                 if (ul_adb->dg_count[i] > 0) {
836                         adth_dg_size = offsetof(struct mux_adth, dg) +
837                                         ul_adb->dg_count[i] * sizeof(*dg);
838
839                         *ul_adb->next_table_index = offset;
840                         adth = (struct mux_adth *)&ul_adb->buf[offset];
841                         next_tb_id = (unsigned int *)&adth->next_table_index;
842                         ul_adb->next_table_index = next_tb_id;
843                         offset += adth_dg_size;
844                         adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH);
845                         adth->if_id = i;
846                         adth->table_length = cpu_to_le16(adth_dg_size);
847                         adth_dg_size -= offsetof(struct mux_adth, dg);
848                         memcpy(&adth->dg, ul_adb->dg[i], adth_dg_size);
849                         ul_adb->if_cnt++;
850                 }
851
852                 if (ul_adb->qlt_updated[i]) {
853                         *ul_adb->next_table_index = offset;
854                         p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset];
855                         ul_adb->next_table_index =
856                                 (u32 *)&p_adb_qlt->next_table_index;
857                         memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size);
858                         offset += qlt_size;
859                 }
860         }
861         *out_offset = offset;
862 }
863
864 /**
865  * ipc_mux_ul_adb_finish - Add the TD of the aggregated session packets to TDR.
866  * @ipc_mux:               Pointer to MUX data-struct.
867  */
868 void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux)
869 {
870         bool ul_data_pend = false;
871         struct mux_adb *ul_adb;
872         unsigned long flags;
873         int offset;
874
875         ul_adb = &ipc_mux->ul_adb;
876         if (!ul_adb->dest_skb)
877                 return;
878
879         offset = *ul_adb->next_table_index;
880         ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset);
881         ul_adb->adbh->block_length = cpu_to_le32(offset);
882
883         if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) {
884                 ul_adb->dest_skb = NULL;
885                 return;
886         }
887
888         *ul_adb->next_table_index = 0;
889         ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++);
890         skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length));
891
892         spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags);
893         __skb_queue_tail(&ipc_mux->channel->ul_list,  ul_adb->dest_skb);
894         spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags);
895
896         ul_adb->dest_skb = NULL;
897         /* Updates the TDs with ul_list */
898         ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem);
899
900         /* Delay the doorbell irq */
901         if (ul_data_pend)
902                 ipc_imem_td_update_timer_start(ipc_mux->imem);
903
904         ipc_mux->acc_adb_size +=  le32_to_cpu(ul_adb->adbh->block_length);
905         ipc_mux->acc_payload_size += ul_adb->payload_size;
906         ipc_mux->ul_data_pend_bytes += ul_adb->payload_size;
907 }
908
909 /* Allocates an ADB from the free list and initializes it with ADBH  */
910 static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
911                                     struct mux_adb *adb, int *size_needed,
912                                     u32 type)
913 {
914         bool ret_val = false;
915         int status;
916
917         if (!adb->dest_skb) {
918                 /* Allocate memory for the ADB including of the
919                  * datagram table header.
920                  */
921                 status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type);
922                 if (status)
923                         /* Is a pending ADB available ? */
924                         ret_val = true; /* None. */
925
926                 /* Update size need to zero only for new ADB memory */
927                 *size_needed = 0;
928         }
929
930         return ret_val;
931 }
932
933 /* Informs the network stack to stop sending further packets for all opened
934  * sessions
935  */
936 static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
937 {
938         struct mux_session *session;
939         int idx;
940
941         for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
942                 session = &ipc_mux->session[idx];
943
944                 if (!session->wwan)
945                         continue;
946
947                 session->net_tx_stop = true;
948         }
949 }
950
951 /* Sends Queue Level Table of all opened sessions */
952 static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
953 {
954         struct ipc_mem_lite_gen_tbl *qlt;
955         struct mux_session *session;
956         bool qlt_updated = false;
957         int i;
958         int qlt_size;
959
960         if (!ipc_mux->initialized || ipc_mux->state != MUX_S_ACTIVE)
961                 return qlt_updated;
962
963         qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
964                    MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
965
966         for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
967                 session = &ipc_mux->session[i];
968
969                 if (!session->wwan || session->flow_ctl_mask)
970                         continue;
971
972                 if (ipc_mux_ul_skb_alloc(ipc_mux, &ipc_mux->ul_adb,
973                                          MUX_SIG_QLTH)) {
974                         dev_err(ipc_mux->dev,
975                                 "no reserved mem to send QLT of if_id: %d", i);
976                         break;
977                 }
978
979                 /* Prepare QLT */
980                 qlt = (struct ipc_mem_lite_gen_tbl *)(ipc_mux->ul_adb.qlth_skb)
981                               ->data;
982                 qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
983                 qlt->length = cpu_to_le16(qlt_size);
984                 qlt->if_id = i;
985                 qlt->vfl_length = MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
986                 qlt->reserved[0] = 0;
987                 qlt->reserved[1] = 0;
988
989                 qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
990
991                 /* Add QLT to the transfer list. */
992                 skb_queue_tail(&ipc_mux->channel->ul_list,
993                                ipc_mux->ul_adb.qlth_skb);
994
995                 qlt_updated = true;
996                 ipc_mux->ul_adb.qlth_skb = NULL;
997         }
998
999         if (qlt_updated)
1000                 /* Updates the TDs with ul_list */
1001                 (void)ipc_imem_ul_write_td(ipc_mux->imem);
1002
1003         return qlt_updated;
1004 }
1005
1006 /* Checks the available credits for the specified session and returns
1007  * number of packets for which credits are available.
1008  */
1009 static int ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux,
1010                                           struct mux_session *session,
1011                                           struct sk_buff_head *ul_list,
1012                                           int max_nr_of_pkts)
1013 {
1014         int pkts_to_send = 0;
1015         struct sk_buff *skb;
1016         int credits = 0;
1017
1018         if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
1019                 credits = session->ul_flow_credits;
1020                 if (credits <= 0) {
1021                         dev_dbg(ipc_mux->dev,
1022                                 "FC::if_id[%d] Insuff.Credits/Qlen:%d/%u",
1023                                 session->if_id, session->ul_flow_credits,
1024                                 session->ul_list.qlen); /* nr_of_bytes */
1025                         return 0;
1026                 }
1027         } else {
1028                 credits = IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B -
1029                           ipc_mux->ul_data_pend_bytes;
1030                 if (credits <= 0) {
1031                         ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1032
1033                         dev_dbg(ipc_mux->dev,
1034                                 "if_id[%d] encod. fail Bytes: %llu, thresh: %d",
1035                                 session->if_id, ipc_mux->ul_data_pend_bytes,
1036                                 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B);
1037                         return 0;
1038                 }
1039         }
1040
1041         /* Check if there are enough credits/bytes available to send the
1042          * requested max_nr_of_pkts. Otherwise restrict the nr_of_pkts
1043          * depending on available credits.
1044          */
1045         skb_queue_walk(ul_list, skb)
1046         {
1047                 if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts))
1048                         break;
1049                 credits -= skb->len;
1050                 pkts_to_send++;
1051         }
1052
1053         return pkts_to_send;
1054 }
1055
1056 /* Encode the UL IP packet according to Lite spec. */
1057 static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
1058                                   struct mux_session *session,
1059                                   struct sk_buff_head *ul_list,
1060                                   struct mux_adb *adb, int nr_of_pkts)
1061 {
1062         int offset = sizeof(struct mux_adgh);
1063         int adb_updated = -EINVAL;
1064         struct sk_buff *src_skb;
1065         int aligned_size = 0;
1066         int nr_of_skb = 0;
1067         u32 pad_len = 0;
1068
1069         /* Re-calculate the number of packets depending on number of bytes to be
1070          * processed/available credits.
1071          */
1072         nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
1073                                                     nr_of_pkts);
1074
1075         /* If calculated nr_of_pkts from available credits is <= 0
1076          * then nothing to do.
1077          */
1078         if (nr_of_pkts <= 0)
1079                 return 0;
1080
1081         /* Read configured UL head_pad_length for session.*/
1082         if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1083                 pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1084
1085         /* Process all pending UL packets for this session
1086          * depending on the allocated datagram table size.
1087          */
1088         while (nr_of_pkts > 0) {
1089                 /* get destination skb allocated */
1090                 if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1091                                             IOSM_AGGR_MUX_SIG_ADGH)) {
1092                         dev_err(ipc_mux->dev, "no reserved memory for ADGH");
1093                         return -ENOMEM;
1094                 }
1095
1096                 /* Peek at the head of the list. */
1097                 src_skb = skb_peek(ul_list);
1098                 if (!src_skb) {
1099                         dev_err(ipc_mux->dev,
1100                                 "skb peek return NULL with count : %d",
1101                                 nr_of_pkts);
1102                         break;
1103                 }
1104
1105                 /* Calculate the memory value. */
1106                 aligned_size = ALIGN((pad_len + src_skb->len), 4);
1107
1108                 ipc_mux->size_needed = sizeof(struct mux_adgh) + aligned_size;
1109
1110                 if (ipc_mux->size_needed > adb->size) {
1111                         dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
1112                                 ipc_mux->size_needed, adb->size);
1113                         /* Return 1 if any IP packet is added to the transfer
1114                          * list.
1115                          */
1116                         return nr_of_skb ? 1 : 0;
1117                 }
1118
1119                 /* Add buffer (without head padding to next pending transfer) */
1120                 memcpy(adb->buf + offset + pad_len, src_skb->data,
1121                        src_skb->len);
1122
1123                 adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH);
1124                 adb->adgh->if_id = session_id;
1125                 adb->adgh->length =
1126                         cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
1127                                     src_skb->len);
1128                 adb->adgh->service_class = src_skb->priority;
1129                 adb->adgh->next_count = --nr_of_pkts;
1130                 adb->dg_cnt_total++;
1131                 adb->payload_size += src_skb->len;
1132
1133                 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS)
1134                         /* Decrement the credit value as we are processing the
1135                          * datagram from the UL list.
1136                          */
1137                         session->ul_flow_credits -= src_skb->len;
1138
1139                 /* Remove the processed elements and free it. */
1140                 src_skb = skb_dequeue(ul_list);
1141                 dev_kfree_skb(src_skb);
1142                 nr_of_skb++;
1143
1144                 ipc_mux_ul_adgh_finish(ipc_mux);
1145         }
1146
1147         if (nr_of_skb) {
1148                 /* Send QLT info to modem if pending bytes > high watermark
1149                  * in case of mux lite
1150                  */
1151                 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
1152                     ipc_mux->ul_data_pend_bytes >=
1153                             IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
1154                         adb_updated = ipc_mux_lite_send_qlt(ipc_mux);
1155                 else
1156                         adb_updated = 1;
1157
1158                 /* Updates the TDs with ul_list */
1159                 (void)ipc_imem_ul_write_td(ipc_mux->imem);
1160         }
1161
1162         return adb_updated;
1163 }
1164
1165 /**
1166  * ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB
1167  * @ipc_mux:            pointer to MUX instance data
1168  * @p_adb:              pointer to UL aggegated data block
1169  * @session_id:         session id
1170  * @qlth_n_ql_size:     Length (in bytes) of the datagram table
1171  * @ul_list:            pointer to skb buffer head
1172  */
1173 void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
1174                               int session_id, int qlth_n_ql_size,
1175                               struct sk_buff_head *ul_list)
1176 {
1177         int qlevel = ul_list->qlen;
1178         struct mux_qlth *p_qlt;
1179
1180         p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id];
1181
1182         /* Initialize QLTH if not been done */
1183         if (p_adb->qlt_updated[session_id] == 0) {
1184                 p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
1185                 p_qlt->if_id = session_id;
1186                 p_qlt->table_length = cpu_to_le16(qlth_n_ql_size);
1187                 p_qlt->reserved = 0;
1188                 p_qlt->reserved2 = 0;
1189         }
1190
1191         /* Update Queue Level information always */
1192         p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel);
1193         p_adb->qlt_updated[session_id] = 1;
1194 }
1195
1196 /* Update the next table index. */
1197 static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
1198                                       int session_id,
1199                                       struct sk_buff_head *ul_list,
1200                                       struct mux_adth_dg *dg,
1201                                       int aligned_size,
1202                                       u32 qlth_n_ql_size,
1203                                       struct mux_adb *adb,
1204                                       struct sk_buff *src_skb)
1205 {
1206         ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1207                                  qlth_n_ql_size, ul_list);
1208         ipc_mux_ul_adb_finish(ipc_mux);
1209         if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1210                                     IOSM_AGGR_MUX_SIG_ADBH)) {
1211                 dev_kfree_skb(src_skb);
1212                 return -ENOMEM;
1213         }
1214         ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
1215
1216         ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1217         ipc_mux->size_needed += qlth_n_ql_size;
1218         ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1219         return 0;
1220 }
1221
1222 /* Process encode session UL data. */
1223 static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb,
1224                             struct mux_adth_dg *dg,
1225                             struct sk_buff_head *ul_list,
1226                             struct sk_buff *src_skb, int session_id,
1227                             int pkt_to_send, u32 qlth_n_ql_size,
1228                             int *out_offset, int head_pad_len)
1229 {
1230         int aligned_size;
1231         int offset = *out_offset;
1232         unsigned long flags;
1233         int nr_of_skb = 0;
1234
1235         while (pkt_to_send > 0) {
1236                 /* Peek at the head of the list. */
1237                 src_skb = skb_peek(ul_list);
1238                 if (!src_skb) {
1239                         dev_err(ipc_mux->dev,
1240                                 "skb peek return NULL with count : %d",
1241                                 pkt_to_send);
1242                         return -1;
1243                 }
1244                 aligned_size = ALIGN((head_pad_len + src_skb->len), 4);
1245                 ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1246
1247                 if (ipc_mux->size_needed > adb->size ||
1248                     ((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >=
1249                       IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) {
1250                         *adb->next_table_index = offset;
1251                         if (mux_ul_dg_update_tbl_index(ipc_mux, session_id,
1252                                                        ul_list, dg,
1253                                                        aligned_size,
1254                                                        qlth_n_ql_size, adb,
1255                                                        src_skb) < 0)
1256                                 return -ENOMEM;
1257                         nr_of_skb = 0;
1258                         offset = le32_to_cpu(adb->adbh->block_length);
1259                         /* Load pointer to next available datagram entry */
1260                         dg = adb->dg[session_id] + adb->dg_count[session_id];
1261                 }
1262                 /* Add buffer without head padding to next pending transfer. */
1263                 memcpy(adb->buf + offset + head_pad_len,
1264                        src_skb->data, src_skb->len);
1265                 /* Setup datagram entry. */
1266                 dg->datagram_index = cpu_to_le32(offset);
1267                 dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len);
1268                 dg->service_class = (((struct sk_buff *)src_skb)->priority);
1269                 dg->reserved = 0;
1270                 adb->dg_cnt_total++;
1271                 adb->payload_size += le16_to_cpu(dg->datagram_length);
1272                 dg++;
1273                 adb->dg_count[session_id]++;
1274                 offset += aligned_size;
1275                 /* Remove the processed elements and free it. */
1276                 spin_lock_irqsave(&ul_list->lock, flags);
1277                 src_skb = __skb_dequeue(ul_list);
1278                 spin_unlock_irqrestore(&ul_list->lock, flags);
1279
1280                 dev_kfree_skb(src_skb);
1281                 nr_of_skb++;
1282                 pkt_to_send--;
1283         }
1284         *out_offset = offset;
1285         return nr_of_skb;
1286 }
1287
1288 /* Process encode session UL data to ADB. */
1289 static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id,
1290                              struct mux_session *session,
1291                              struct sk_buff_head *ul_list, struct mux_adb *adb,
1292                              int pkt_to_send)
1293 {
1294         int adb_updated = -EINVAL;
1295         int head_pad_len, offset;
1296         struct sk_buff *src_skb = NULL;
1297         struct mux_adth_dg *dg;
1298         u32 qlth_n_ql_size;
1299
1300         /* If any of the opened session has set Flow Control ON then limit the
1301          * UL data to mux_flow_ctrl_high_thresh_b bytes
1302          */
1303         if (ipc_mux->ul_data_pend_bytes >=
1304                 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
1305                 ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1306                 return adb_updated;
1307         }
1308
1309         qlth_n_ql_size = offsetof(struct mux_qlth, ql) +
1310                          MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
1311         head_pad_len = session->ul_head_pad_len;
1312
1313         if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1314                 head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1315
1316         if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1317                                     IOSM_AGGR_MUX_SIG_ADBH))
1318                 return -ENOMEM;
1319
1320         offset = le32_to_cpu(adb->adbh->block_length);
1321
1322         if (ipc_mux->size_needed == 0)
1323                 ipc_mux->size_needed = offset;
1324
1325         /* Calculate the size needed for ADTH, QLTH and QL*/
1326         if (adb->dg_count[session_id] == 0) {
1327                 ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1328                 ipc_mux->size_needed += qlth_n_ql_size;
1329         }
1330
1331         dg = adb->dg[session_id] + adb->dg_count[session_id];
1332
1333         if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb,
1334                              session_id, pkt_to_send, qlth_n_ql_size, &offset,
1335                              head_pad_len) > 0) {
1336                 adb_updated = 1;
1337                 *adb->next_table_index = offset;
1338                 ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1339                                          qlth_n_ql_size, ul_list);
1340                 adb->adbh->block_length = cpu_to_le32(offset);
1341         }
1342
1343         return adb_updated;
1344 }
1345
1346 bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
1347 {
1348         struct sk_buff_head *ul_list;
1349         struct mux_session *session;
1350         int updated = 0;
1351         int session_id;
1352         int dg_n;
1353         int i;
1354
1355         if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
1356             ipc_mux->adb_prep_ongoing)
1357                 return false;
1358
1359         ipc_mux->adb_prep_ongoing = true;
1360
1361         for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
1362                 session_id = ipc_mux->rr_next_session;
1363                 session = &ipc_mux->session[session_id];
1364
1365                 /* Go to next handle rr_next_session overflow */
1366                 ipc_mux->rr_next_session++;
1367                 if (ipc_mux->rr_next_session >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
1368                         ipc_mux->rr_next_session = 0;
1369
1370                 if (!session->wwan || session->flow_ctl_mask ||
1371                     session->net_tx_stop)
1372                         continue;
1373
1374                 ul_list = &session->ul_list;
1375
1376                 /* Is something pending in UL and flow ctrl off */
1377                 dg_n = skb_queue_len(ul_list);
1378                 if (dg_n > MUX_MAX_UL_DG_ENTRIES)
1379                         dg_n = MUX_MAX_UL_DG_ENTRIES;
1380
1381                 if (dg_n == 0)
1382                         /* Nothing to do for ipc_mux session
1383                          * -> try next session id.
1384                          */
1385                         continue;
1386                 if (ipc_mux->protocol == MUX_LITE)
1387                         updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
1388                                                          session, ul_list,
1389                                                          &ipc_mux->ul_adb,
1390                                                          dg_n);
1391                 else
1392                         updated = mux_ul_adb_encode(ipc_mux, session_id,
1393                                                     session, ul_list,
1394                                                     &ipc_mux->ul_adb,
1395                                                     dg_n);
1396         }
1397
1398         ipc_mux->adb_prep_ongoing = false;
1399         return updated == 1;
1400 }
1401
1402 /* Calculates the Payload from any given ADB. */
1403 static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
1404                                         struct mux_adbh *p_adbh)
1405 {
1406         struct mux_adth_dg *dg;
1407         struct mux_adth *adth;
1408         u32 payload_size = 0;
1409         u32 next_table_idx;
1410         int nr_of_dg, i;
1411
1412         /* Process the aggregated datagram tables. */
1413         next_table_idx = le32_to_cpu(p_adbh->first_table_index);
1414
1415         if (next_table_idx < sizeof(struct mux_adbh)) {
1416                 dev_err(ipc_mux->dev, "unexpected empty ADB");
1417                 return payload_size;
1418         }
1419
1420         while (next_table_idx != 0) {
1421                 /* Get the reference to the table header. */
1422                 adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx);
1423
1424                 if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
1425                         nr_of_dg = (le16_to_cpu(adth->table_length) -
1426                                         sizeof(struct mux_adth) +
1427                                         sizeof(struct mux_adth_dg)) /
1428                                         sizeof(struct mux_adth_dg);
1429
1430                         if (nr_of_dg <= 0)
1431                                 return payload_size;
1432
1433                         dg = &adth->dg;
1434
1435                         for (i = 0; i < nr_of_dg; i++, dg++) {
1436                                 if (le32_to_cpu(dg->datagram_index) <
1437                                         sizeof(struct mux_adbh)) {
1438                                         return payload_size;
1439                                 }
1440                                 payload_size +=
1441                                         le16_to_cpu(dg->datagram_length);
1442                         }
1443                 }
1444                 next_table_idx = le32_to_cpu(adth->next_table_index);
1445         }
1446
1447         return payload_size;
1448 }
1449
1450 void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
1451 {
1452         union mux_type_header hr;
1453         u16 adgh_len;
1454         int payload;
1455
1456         if (ipc_mux->protocol == MUX_LITE) {
1457                 hr.adgh = (struct mux_adgh *)skb->data;
1458                 adgh_len = le16_to_cpu(hr.adgh->length);
1459                 if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) &&
1460                     ipc_mux->ul_flow == MUX_UL)
1461                         ipc_mux->ul_data_pend_bytes =
1462                                         ipc_mux->ul_data_pend_bytes - adgh_len;
1463         } else {
1464                 hr.adbh = (struct mux_adbh *)(skb->data);
1465                 payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh);
1466                 ipc_mux->ul_data_pend_bytes -= payload;
1467         }
1468
1469         if (ipc_mux->ul_flow == MUX_UL)
1470                 dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
1471                         ipc_mux->ul_data_pend_bytes);
1472
1473         /* Reset the skb settings. */
1474         skb->tail = 0;
1475         skb->len = 0;
1476
1477         /* Add the consumed ADB to the free list. */
1478         skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
1479 }
1480
1481 /* Start the NETIF uplink send transfer in MUX mode. */
1482 static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
1483                                         void *msg, size_t size)
1484 {
1485         struct iosm_mux *ipc_mux = ipc_imem->mux;
1486         bool ul_data_pend = false;
1487
1488         /* Add session UL data to a ADB and ADGH */
1489         ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
1490         if (ul_data_pend) {
1491                 if (ipc_mux->protocol == MUX_AGGREGATION)
1492                         ipc_imem_adb_timer_start(ipc_mux->imem);
1493
1494                 /* Delay the doorbell irq */
1495                 ipc_imem_td_update_timer_start(ipc_mux->imem);
1496         }
1497         /* reset the debounce flag */
1498         ipc_mux->ev_mux_net_transmit_pending = false;
1499
1500         return 0;
1501 }
1502
1503 int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
1504                               struct sk_buff *skb)
1505 {
1506         struct mux_session *session = &ipc_mux->session[if_id];
1507         int ret = -EINVAL;
1508
1509         if (ipc_mux->channel &&
1510             ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
1511                 dev_err(ipc_mux->dev,
1512                         "channel state is not IMEM_CHANNEL_ACTIVE");
1513                 goto out;
1514         }
1515
1516         if (!session->wwan) {
1517                 dev_err(ipc_mux->dev, "session net ID is NULL");
1518                 ret = -EFAULT;
1519                 goto out;
1520         }
1521
1522         /* Session is under flow control.
1523          * Check if packet can be queued in session list, if not
1524          * suspend net tx
1525          */
1526         if (skb_queue_len(&session->ul_list) >=
1527             (session->net_tx_stop ?
1528                      IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
1529                      (IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
1530                       IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
1531                 ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
1532                 ret = -EBUSY;
1533                 goto out;
1534         }
1535
1536         /* Add skb to the uplink skb accumulator. */
1537         skb_queue_tail(&session->ul_list, skb);
1538
1539         /* Inform the IPC kthread to pass uplink IP packets to CP. */
1540         if (!ipc_mux->ev_mux_net_transmit_pending) {
1541                 ipc_mux->ev_mux_net_transmit_pending = true;
1542                 ret = ipc_task_queue_send_task(ipc_mux->imem,
1543                                                ipc_mux_tq_ul_trigger_encode, 0,
1544                                                NULL, 0, false);
1545                 if (ret)
1546                         goto out;
1547         }
1548         dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
1549                 if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
1550                 skb->len, skb->truesize, skb->priority);
1551         ret = 0;
1552 out:
1553         return ret;
1554 }