1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-21 Intel Corporation.
6 #include <linux/nospec.h>
8 #include "iosm_ipc_imem_ops.h"
9 #include "iosm_ipc_mux_codec.h"
10 #include "iosm_ipc_task_queue.h"
12 /* Test the link power state and send a MUX command in blocking mode. */
13 static int ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
16 struct iosm_mux *ipc_mux = ipc_imem->mux;
17 const struct mux_acb *acb = msg;
19 skb_queue_tail(&ipc_mux->channel->ul_list, acb->skb);
20 ipc_imem_ul_send(ipc_mux->imem);
25 static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
27 struct completion *completion = &ipc_mux->channel->ul_sem;
28 int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
30 sizeof(ipc_mux->acb), false);
32 dev_err(ipc_mux->dev, "unable to send mux command");
36 /* if blocking, suspend the app and wait for irq in the flash or
37 * crash phase. return false on timeout to indicate failure.
40 u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
42 reinit_completion(completion);
44 if (wait_for_completion_interruptible_timeout
45 (completion, msecs_to_jiffies(wait_time_milliseconds)) ==
47 dev_err(ipc_mux->dev, "ch[%d] timeout",
49 ipc_uevent_send(ipc_mux->imem->dev, UEVENT_MDM_TIMEOUT);
57 /* Initialize the command header. */
58 static void ipc_mux_acb_init(struct iosm_mux *ipc_mux)
60 struct mux_acb *acb = &ipc_mux->acb;
61 struct mux_acbh *header;
63 header = (struct mux_acbh *)(acb->skb)->data;
64 header->block_length = cpu_to_le32(sizeof(struct mux_acbh));
65 header->first_cmd_index = header->block_length;
66 header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH);
67 header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++);
70 /* Add a command to the ACB. */
71 static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd,
72 void *param, u32 param_size)
74 struct mux_acbh *header;
75 struct mux_cmdh *cmdh;
79 header = (struct mux_acbh *)(acb->skb)->data;
80 cmdh = (struct mux_cmdh *)
81 ((acb->skb)->data + le32_to_cpu(header->block_length));
83 cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
84 cmdh->command_type = cpu_to_le32(cmd);
85 cmdh->if_id = acb->if_id;
88 cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) +
90 cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
92 memcpy(&cmdh->param, param, param_size);
94 skb_put(acb->skb, le32_to_cpu(header->block_length) +
95 le16_to_cpu(cmdh->cmd_len));
100 /* Prepare mux Command */
101 static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
102 u32 cmd, struct mux_acb *acb,
103 void *param, u32 param_size)
105 struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)acb->skb->data;
107 cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
108 cmdh->command_type = cpu_to_le32(cmd);
109 cmdh->if_id = acb->if_id;
113 cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_lite_cmdh, param) +
115 cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
118 memcpy(&cmdh->param, param, param_size);
120 skb_put(acb->skb, le16_to_cpu(cmdh->cmd_len));
125 static int ipc_mux_acb_alloc(struct iosm_mux *ipc_mux)
127 struct mux_acb *acb = &ipc_mux->acb;
131 /* Allocate skb memory for the uplink buffer. */
132 skb = ipc_pcie_alloc_skb(ipc_mux->pcie, MUX_MAX_UL_ACB_BUF_SIZE,
133 GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
137 /* Save the skb address. */
140 memset(skb->data, 0, MUX_MAX_UL_ACB_BUF_SIZE);
145 int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
146 u32 transaction_id, union mux_cmd_param *param,
147 size_t res_size, bool blocking, bool respond)
149 struct mux_acb *acb = &ipc_mux->acb;
150 union mux_type_cmdh cmdh;
154 ret = ipc_mux_acb_alloc(ipc_mux);
158 if (ipc_mux->protocol == MUX_LITE) {
159 cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb,
163 cmdh.ack_lite->transaction_id =
164 cpu_to_le32(transaction_id);
166 /* Initialize the ACB header. */
167 ipc_mux_acb_init(ipc_mux);
168 cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param,
172 cmdh.ack_aggr->transaction_id =
173 cpu_to_le32(transaction_id);
175 ret = ipc_mux_acb_send(ipc_mux, blocking);
180 void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
182 /* Inform the network interface to start/stop flow ctrl */
183 ipc_wwan_tx_flowctrl(session->wwan, idx, on);
186 static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
187 union mux_cmd_param param,
188 __le32 command_type, u8 if_id,
189 __le32 transaction_id)
191 struct mux_acb *acb = &ipc_mux->acb;
193 switch (le32_to_cpu(command_type)) {
194 case MUX_CMD_OPEN_SESSION_RESP:
195 case MUX_CMD_CLOSE_SESSION_RESP:
196 /* Resume the control application. */
197 acb->got_param = param;
200 case MUX_LITE_CMD_FLOW_CTL_ACK:
201 /* This command type is not expected as response for
202 * Aggregation version of the protocol. So return non-zero.
204 if (ipc_mux->protocol != MUX_LITE)
207 dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received",
208 if_id, le32_to_cpu(transaction_id));
211 case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK:
212 /* This command type is not expected as response for
213 * Lite version of the protocol. So return non-zero.
215 if (ipc_mux->protocol == MUX_LITE)
223 acb->wanted_response = MUX_CMD_INVALID;
224 acb->got_response = le32_to_cpu(command_type);
225 complete(&ipc_mux->channel->ul_sem);
230 static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux,
231 union mux_cmd_param *param,
232 __le32 command_type, u8 if_id,
233 __le16 cmd_len, int size)
235 struct mux_session *session;
236 struct hrtimer *adb_timer;
238 dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
239 if_id, le32_to_cpu(command_type));
241 switch (le32_to_cpu(command_type)) {
242 case MUX_LITE_CMD_FLOW_CTL:
243 case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE:
245 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
246 dev_err(ipc_mux->dev, "if_id [%d] not valid",
248 return -EINVAL; /* No session interface id. */
251 session = &ipc_mux->session[if_id];
252 adb_timer = &ipc_mux->imem->adb_timer;
254 if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
255 /* Backward Compatibility */
256 if (cmd_len == cpu_to_le16(size))
257 session->flow_ctl_mask =
258 le32_to_cpu(param->flow_ctl.mask);
260 session->flow_ctl_mask = ~0;
261 /* if CP asks for FLOW CTRL Enable
262 * then set our internal flow control Tx flag
263 * to limit uplink session queueing
265 session->net_tx_stop = true;
267 /* We have to call Finish ADB here.
268 * Otherwise any already queued data
269 * will be sent to CP when ADB is full
270 * for some other sessions.
272 if (ipc_mux->protocol == MUX_AGGREGATION) {
273 ipc_mux_ul_adb_finish(ipc_mux);
274 ipc_imem_hrtimer_stop(adb_timer);
276 /* Update the stats */
277 session->flow_ctl_en_cnt++;
278 } else if (param->flow_ctl.mask == 0) {
279 /* Just reset the Flow control mask and let
280 * mux_flow_ctrl_low_thre_b take control on
281 * our internal Tx flag and enabling kernel
284 dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
285 if_id, le32_to_cpu(param->flow_ctl.mask));
286 /* Backward Compatibility */
287 if (cmd_len == cpu_to_le16(size))
288 session->flow_ctl_mask =
289 le32_to_cpu(param->flow_ctl.mask);
291 session->flow_ctl_mask = 0;
292 /* Update the stats */
293 session->flow_ctl_dis_cnt++;
298 ipc_mux->acc_adb_size = 0;
299 ipc_mux->acc_payload_size = 0;
301 dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id,
302 le32_to_cpu(param->flow_ctl.mask));
305 case MUX_LITE_CMD_LINK_STATUS_REPORT:
314 /* Decode and Send appropriate response to a command block. */
315 static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
317 struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
318 __le32 trans_id = cmdh->transaction_id;
321 if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
322 cmdh->command_type, cmdh->if_id,
323 cmdh->transaction_id)) {
324 /* Unable to decode command response indicates the cmd_type
325 * may be a command instead of response. So try to decoding it.
327 size = offsetof(struct mux_lite_cmdh, param) +
328 sizeof(cmdh->param.flow_ctl);
329 if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
332 cmdh->cmd_len, size)) {
333 /* Decoded command may need a response. Give the
334 * response according to the command type.
336 union mux_cmd_param *mux_cmd = NULL;
338 u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
340 if (cmdh->command_type ==
341 cpu_to_le32(MUX_LITE_CMD_LINK_STATUS_REPORT)) {
342 mux_cmd = &cmdh->param;
343 mux_cmd->link_status_resp.response =
344 cpu_to_le32(MUX_CMD_RESP_SUCCESS);
345 /* response field is u32 */
347 } else if (cmdh->command_type ==
348 cpu_to_le32(MUX_LITE_CMD_FLOW_CTL)) {
349 cmd = MUX_LITE_CMD_FLOW_CTL_ACK;
354 if (ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
355 le32_to_cpu(trans_id),
356 mux_cmd, size, false,
358 dev_err(ipc_mux->dev,
359 "if_id %d: cmd send failed",
365 /* Pass the DL packet to the netif layer. */
366 static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
367 struct iosm_wwan *wwan, u32 offset,
368 u8 service_class, struct sk_buff *skb)
370 struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
375 skb_pull(dest_skb, offset);
376 skb_set_tail_pointer(dest_skb, dest_skb->len);
377 /* Pass the packet to the netif layer. */
378 dest_skb->priority = service_class;
380 return ipc_wwan_receive(wwan, dest_skb, false, if_id);
383 /* Decode Flow Credit Table in the block */
384 static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
385 unsigned char *block)
387 struct ipc_mem_lite_gen_tbl *fct = (struct ipc_mem_lite_gen_tbl *)block;
388 struct iosm_wwan *wwan;
392 if (fct->vfl_length != sizeof(fct->vfl.nr_of_bytes)) {
393 dev_err(ipc_mux->dev, "unexpected FCT length: %d",
399 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
400 dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
404 /* Is the session active ? */
405 if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
406 wwan = ipc_mux->session[if_id].wwan;
408 dev_err(ipc_mux->dev, "session Net ID is NULL");
412 ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
414 dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
415 if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
417 /* Update the Flow Credit information from ADB */
418 ipc_mux->session[if_id].ul_flow_credits += ul_credits;
420 /* Check whether the TX can be started */
421 if (ipc_mux->session[if_id].ul_flow_credits > 0) {
422 ipc_mux->session[if_id].net_tx_stop = false;
423 ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
424 ipc_mux->session[if_id].if_id, false);
428 /* Decode non-aggregated datagram */
429 static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
432 u32 pad_len, packet_offset;
433 struct iosm_wwan *wwan;
434 struct mux_adgh *adgh;
435 u8 *block = skb->data;
439 adgh = (struct mux_adgh *)block;
441 if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) {
442 dev_err(ipc_mux->dev, "invalid ADGH signature received");
447 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
448 dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
452 /* Is the session active ? */
453 if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
454 wwan = ipc_mux->session[if_id].wwan;
456 dev_err(ipc_mux->dev, "session Net ID is NULL");
460 /* Store the pad len for the corresponding session
461 * Pad bytes as negotiated in the open session less the header size
462 * (see session management chapter for details).
463 * If resulting padding is zero or less, the additional head padding is
464 * omitted. For e.g., if HEAD_PAD_LEN = 16 or less, this field is
465 * omitted if HEAD_PAD_LEN = 20, then this field will have 4 bytes
469 ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
470 packet_offset = sizeof(*adgh) + pad_len;
472 if_id += ipc_mux->wwan_q_offset;
474 /* Pass the packet to the netif layer */
475 rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
476 adgh->service_class, skb);
478 dev_err(ipc_mux->dev, "mux adgh decoding error");
481 ipc_mux->session[if_id].flush = 1;
484 static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux,
485 struct mux_cmdh *cmdh, int size)
487 u32 link_st = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP;
488 u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE;
489 u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE;
490 u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK;
491 union mux_cmd_param *cmd_p = NULL;
495 if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
496 cmdh->command_type, cmdh->if_id,
497 cmdh->cmd_len, size)) {
499 if (cmdh->command_type == cpu_to_le32(link_st)) {
500 cmd_p = &cmdh->param;
501 cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS;
502 } else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) ||
503 (cmdh->command_type == cpu_to_le32(fctl_dis))) {
508 trans_id = le32_to_cpu(cmdh->transaction_id);
509 ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
510 trans_id, cmd_p, size, false, true);
514 /* Decode an aggregated command block. */
515 static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
517 struct mux_acbh *acbh;
518 struct mux_cmdh *cmdh;
523 acbh = (struct mux_acbh *)(skb->data);
524 block = (u8 *)(skb->data);
526 next_cmd_index = le32_to_cpu(acbh->first_cmd_index);
527 next_cmd_index = array_index_nospec(next_cmd_index,
528 sizeof(struct mux_cmdh));
530 while (next_cmd_index != 0) {
531 cmdh = (struct mux_cmdh *)&block[next_cmd_index];
532 next_cmd_index = le32_to_cpu(cmdh->next_cmd_index);
533 if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
536 cmdh->transaction_id)) {
537 size = offsetof(struct mux_cmdh, param) +
538 sizeof(cmdh->param.flow_ctl);
539 ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size);
544 /* process datagram */
545 static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
546 struct mux_adth_dg *dg, struct sk_buff *skb,
547 int if_id, int nr_of_dg)
549 u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
550 u32 packet_offset, i, rc;
552 for (i = 0; i < nr_of_dg; i++, dg++) {
553 if (le32_to_cpu(dg->datagram_index)
554 < sizeof(struct mux_adbh))
557 /* Is the packet inside of the ADB */
558 if (le32_to_cpu(dg->datagram_index) >=
559 le32_to_cpu(adbh->block_length)) {
563 le32_to_cpu(dg->datagram_index) +
565 /* Pass the packet to the netif layer. */
566 rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
579 /* Decode an aggregated data block. */
580 static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
583 struct mux_adth_dg *dg;
584 struct iosm_wwan *wwan;
585 struct mux_adbh *adbh;
586 struct mux_adth *adth;
592 adbh = (struct mux_adbh *)block;
594 /* Process the aggregated datagram tables. */
595 adth_index = le32_to_cpu(adbh->first_table_index);
597 /* Has CP sent an empty ADB ? */
598 if (adth_index < 1) {
599 dev_err(ipc_mux->dev, "unexpected empty ADB");
603 /* Loop through mixed session tables. */
605 /* Get the reference to the table header. */
606 adth = (struct mux_adth *)(block + adth_index);
608 /* Get the interface id and map it to the netif id. */
610 if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
613 if_id = array_index_nospec(if_id,
614 IPC_MEM_MUX_IP_SESSION_ENTRIES);
616 /* Is the session active ? */
617 wwan = ipc_mux->session[if_id].wwan;
621 /* Consistency checks for aggregated datagram table. */
622 if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
625 if (le16_to_cpu(adth->table_length) < (sizeof(struct mux_adth) -
626 sizeof(struct mux_adth_dg)))
629 /* Calculate the number of datagrams. */
630 nr_of_dg = (le16_to_cpu(adth->table_length) -
631 sizeof(struct mux_adth) +
632 sizeof(struct mux_adth_dg)) /
633 sizeof(struct mux_adth_dg);
635 /* Is the datagram table empty ? */
637 dev_err(ipc_mux->dev,
638 "adthidx=%u,nr_of_dg=%d,next_tblidx=%u",
639 adth_index, nr_of_dg,
640 le32_to_cpu(adth->next_table_index));
642 /* Move to the next aggregated datagram table. */
643 adth_index = le32_to_cpu(adth->next_table_index);
647 /* New aggregated datagram table. */
649 if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
653 /* mark session for final flush */
654 ipc_mux->session[if_id].flush = 1;
656 /* Move to the next aggregated datagram table. */
657 adth_index = le32_to_cpu(adth->next_table_index);
665 * ipc_mux_dl_decode - Route the DL packet through the IP MUX layer
666 * depending on Header.
667 * @ipc_mux: Pointer to MUX data-struct
668 * @skb: Pointer to ipc_skb.
670 void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
677 /* Decode the MUX header type. */
678 signature = le32_to_cpup((__le32 *)skb->data);
681 case IOSM_AGGR_MUX_SIG_ADBH: /* Aggregated Data Block Header */
682 mux_dl_adb_decode(ipc_mux, skb);
684 case IOSM_AGGR_MUX_SIG_ADGH:
685 ipc_mux_dl_adgh_decode(ipc_mux, skb);
688 ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
690 case IOSM_AGGR_MUX_SIG_ACBH: /* Aggregated Command Block Header */
691 ipc_mux_dl_acb_decode(ipc_mux, skb);
694 ipc_mux_dl_cmd_decode(ipc_mux, skb);
698 dev_err(ipc_mux->dev, "invalid ABH signature");
701 ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
704 static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
705 struct mux_adb *ul_adb, u32 type)
707 /* Take the first element of the free list. */
708 struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
709 u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
715 return -EBUSY; /* Wait for a free ADB skb. */
717 /* Mark it as UL ADB to select the right free operation. */
718 IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
721 case IOSM_AGGR_MUX_SIG_ADBH:
722 /* Save the ADB memory settings. */
723 ul_adb->dest_skb = skb;
724 ul_adb->buf = skb->data;
725 ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
727 /* reset statistic counter */
729 ul_adb->payload_size = 0;
730 ul_adb->dg_cnt_total = 0;
732 /* Initialize the ADBH. */
733 ul_adb->adbh = (struct mux_adbh *)ul_adb->buf;
734 memset(ul_adb->adbh, 0, sizeof(struct mux_adbh));
735 ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH);
736 ul_adb->adbh->block_length =
737 cpu_to_le32(sizeof(struct mux_adbh));
738 next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index;
739 ul_adb->next_table_index = next_tb_id;
741 /* Clear the local copy of DGs for new ADB */
742 memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
744 /* Clear the DG count and QLT updated status for new ADB */
745 for (if_id = 0; if_id < no_if; if_id++) {
746 ul_adb->dg_count[if_id] = 0;
747 ul_adb->qlt_updated[if_id] = 0;
751 case IOSM_AGGR_MUX_SIG_ADGH:
752 /* Save the ADB memory settings. */
753 ul_adb->dest_skb = skb;
754 ul_adb->buf = skb->data;
755 ul_adb->size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
756 /* reset statistic counter */
758 ul_adb->payload_size = 0;
759 ul_adb->dg_cnt_total = 0;
761 ul_adb->adgh = (struct mux_adgh *)skb->data;
762 memset(ul_adb->adgh, 0, sizeof(struct mux_adgh));
766 qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
767 (MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl));
769 if (qlt_size > IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE) {
770 dev_err(ipc_mux->dev,
771 "can't support. QLT size:%d SKB size: %d",
772 qlt_size, IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE);
776 ul_adb->qlth_skb = skb;
777 memset((ul_adb->qlth_skb)->data, 0, qlt_size);
778 skb_put(skb, qlt_size);
785 static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
787 struct mux_adb *ul_adb = &ipc_mux->ul_adb;
792 if (!ul_adb->dest_skb) {
793 dev_err(ipc_mux->dev, "no dest skb");
797 adgh_len = le16_to_cpu(ul_adb->adgh->length);
798 skb_put(ul_adb->dest_skb, adgh_len);
799 skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
800 ul_adb->dest_skb = NULL;
802 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
803 struct mux_session *session;
805 session = &ipc_mux->session[ul_adb->adgh->if_id];
806 str = "available_credits";
807 bytes = (long long)session->ul_flow_credits;
811 bytes = ipc_mux->ul_data_pend_bytes;
812 ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes +
816 dev_dbg(ipc_mux->dev, "UL ADGH: size=%u, if_id=%d, payload=%d, %s=%lld",
817 adgh_len, ul_adb->adgh->if_id, ul_adb->payload_size,
821 static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
822 struct mux_adb *ul_adb, int *out_offset)
824 int i, qlt_size, offset = *out_offset;
825 struct mux_qlth *p_adb_qlt;
826 struct mux_adth_dg *dg;
827 struct mux_adth *adth;
831 qlt_size = offsetof(struct mux_qlth, ql) +
832 MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
834 for (i = 0; i < ipc_mux->nr_sessions; i++) {
835 if (ul_adb->dg_count[i] > 0) {
836 adth_dg_size = offsetof(struct mux_adth, dg) +
837 ul_adb->dg_count[i] * sizeof(*dg);
839 *ul_adb->next_table_index = offset;
840 adth = (struct mux_adth *)&ul_adb->buf[offset];
841 next_tb_id = (unsigned int *)&adth->next_table_index;
842 ul_adb->next_table_index = next_tb_id;
843 offset += adth_dg_size;
844 adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH);
846 adth->table_length = cpu_to_le16(adth_dg_size);
847 adth_dg_size -= offsetof(struct mux_adth, dg);
848 memcpy(&adth->dg, ul_adb->dg[i], adth_dg_size);
852 if (ul_adb->qlt_updated[i]) {
853 *ul_adb->next_table_index = offset;
854 p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset];
855 ul_adb->next_table_index =
856 (u32 *)&p_adb_qlt->next_table_index;
857 memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size);
861 *out_offset = offset;
865 * ipc_mux_ul_adb_finish - Add the TD of the aggregated session packets to TDR.
866 * @ipc_mux: Pointer to MUX data-struct.
868 void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux)
870 bool ul_data_pend = false;
871 struct mux_adb *ul_adb;
875 ul_adb = &ipc_mux->ul_adb;
876 if (!ul_adb->dest_skb)
879 offset = *ul_adb->next_table_index;
880 ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset);
881 ul_adb->adbh->block_length = cpu_to_le32(offset);
883 if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) {
884 ul_adb->dest_skb = NULL;
888 *ul_adb->next_table_index = 0;
889 ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++);
890 skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length));
892 spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags);
893 __skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
894 spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags);
896 ul_adb->dest_skb = NULL;
897 /* Updates the TDs with ul_list */
898 ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem);
900 /* Delay the doorbell irq */
902 ipc_imem_td_update_timer_start(ipc_mux->imem);
904 ipc_mux->acc_adb_size += le32_to_cpu(ul_adb->adbh->block_length);
905 ipc_mux->acc_payload_size += ul_adb->payload_size;
906 ipc_mux->ul_data_pend_bytes += ul_adb->payload_size;
909 /* Allocates an ADB from the free list and initializes it with ADBH */
910 static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
911 struct mux_adb *adb, int *size_needed,
914 bool ret_val = false;
917 if (!adb->dest_skb) {
918 /* Allocate memory for the ADB including of the
919 * datagram table header.
921 status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type);
923 /* Is a pending ADB available ? */
924 ret_val = true; /* None. */
926 /* Update size need to zero only for new ADB memory */
933 /* Informs the network stack to stop sending further packets for all opened
936 static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
938 struct mux_session *session;
941 for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
942 session = &ipc_mux->session[idx];
947 session->net_tx_stop = true;
951 /* Sends Queue Level Table of all opened sessions */
952 static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
954 struct ipc_mem_lite_gen_tbl *qlt;
955 struct mux_session *session;
956 bool qlt_updated = false;
960 if (!ipc_mux->initialized || ipc_mux->state != MUX_S_ACTIVE)
963 qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
964 MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
966 for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
967 session = &ipc_mux->session[i];
969 if (!session->wwan || session->flow_ctl_mask)
972 if (ipc_mux_ul_skb_alloc(ipc_mux, &ipc_mux->ul_adb,
974 dev_err(ipc_mux->dev,
975 "no reserved mem to send QLT of if_id: %d", i);
980 qlt = (struct ipc_mem_lite_gen_tbl *)(ipc_mux->ul_adb.qlth_skb)
982 qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
983 qlt->length = cpu_to_le16(qlt_size);
985 qlt->vfl_length = MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
986 qlt->reserved[0] = 0;
987 qlt->reserved[1] = 0;
989 qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
991 /* Add QLT to the transfer list. */
992 skb_queue_tail(&ipc_mux->channel->ul_list,
993 ipc_mux->ul_adb.qlth_skb);
996 ipc_mux->ul_adb.qlth_skb = NULL;
1000 /* Updates the TDs with ul_list */
1001 (void)ipc_imem_ul_write_td(ipc_mux->imem);
1006 /* Checks the available credits for the specified session and returns
1007 * number of packets for which credits are available.
1009 static int ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux,
1010 struct mux_session *session,
1011 struct sk_buff_head *ul_list,
1014 int pkts_to_send = 0;
1015 struct sk_buff *skb;
1018 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
1019 credits = session->ul_flow_credits;
1021 dev_dbg(ipc_mux->dev,
1022 "FC::if_id[%d] Insuff.Credits/Qlen:%d/%u",
1023 session->if_id, session->ul_flow_credits,
1024 session->ul_list.qlen); /* nr_of_bytes */
1028 credits = IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B -
1029 ipc_mux->ul_data_pend_bytes;
1031 ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1033 dev_dbg(ipc_mux->dev,
1034 "if_id[%d] encod. fail Bytes: %llu, thresh: %d",
1035 session->if_id, ipc_mux->ul_data_pend_bytes,
1036 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B);
1041 /* Check if there are enough credits/bytes available to send the
1042 * requested max_nr_of_pkts. Otherwise restrict the nr_of_pkts
1043 * depending on available credits.
1045 skb_queue_walk(ul_list, skb)
1047 if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts))
1049 credits -= skb->len;
1053 return pkts_to_send;
1056 /* Encode the UL IP packet according to Lite spec. */
1057 static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
1058 struct mux_session *session,
1059 struct sk_buff_head *ul_list,
1060 struct mux_adb *adb, int nr_of_pkts)
1062 int offset = sizeof(struct mux_adgh);
1063 int adb_updated = -EINVAL;
1064 struct sk_buff *src_skb;
1065 int aligned_size = 0;
1069 /* Re-calculate the number of packets depending on number of bytes to be
1070 * processed/available credits.
1072 nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
1075 /* If calculated nr_of_pkts from available credits is <= 0
1076 * then nothing to do.
1078 if (nr_of_pkts <= 0)
1081 /* Read configured UL head_pad_length for session.*/
1082 if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1083 pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1085 /* Process all pending UL packets for this session
1086 * depending on the allocated datagram table size.
1088 while (nr_of_pkts > 0) {
1089 /* get destination skb allocated */
1090 if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1091 IOSM_AGGR_MUX_SIG_ADGH)) {
1092 dev_err(ipc_mux->dev, "no reserved memory for ADGH");
1096 /* Peek at the head of the list. */
1097 src_skb = skb_peek(ul_list);
1099 dev_err(ipc_mux->dev,
1100 "skb peek return NULL with count : %d",
1105 /* Calculate the memory value. */
1106 aligned_size = ALIGN((pad_len + src_skb->len), 4);
1108 ipc_mux->size_needed = sizeof(struct mux_adgh) + aligned_size;
1110 if (ipc_mux->size_needed > adb->size) {
1111 dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
1112 ipc_mux->size_needed, adb->size);
1113 /* Return 1 if any IP packet is added to the transfer
1116 return nr_of_skb ? 1 : 0;
1119 /* Add buffer (without head padding to next pending transfer) */
1120 memcpy(adb->buf + offset + pad_len, src_skb->data,
1123 adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH);
1124 adb->adgh->if_id = session_id;
1126 cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
1128 adb->adgh->service_class = src_skb->priority;
1129 adb->adgh->next_count = --nr_of_pkts;
1130 adb->dg_cnt_total++;
1131 adb->payload_size += src_skb->len;
1133 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS)
1134 /* Decrement the credit value as we are processing the
1135 * datagram from the UL list.
1137 session->ul_flow_credits -= src_skb->len;
1139 /* Remove the processed elements and free it. */
1140 src_skb = skb_dequeue(ul_list);
1141 dev_kfree_skb(src_skb);
1144 ipc_mux_ul_adgh_finish(ipc_mux);
1148 /* Send QLT info to modem if pending bytes > high watermark
1149 * in case of mux lite
1151 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
1152 ipc_mux->ul_data_pend_bytes >=
1153 IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
1154 adb_updated = ipc_mux_lite_send_qlt(ipc_mux);
1158 /* Updates the TDs with ul_list */
1159 (void)ipc_imem_ul_write_td(ipc_mux->imem);
1166 * ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB
1167 * @ipc_mux: pointer to MUX instance data
1168 * @p_adb: pointer to UL aggegated data block
1169 * @session_id: session id
1170 * @qlth_n_ql_size: Length (in bytes) of the datagram table
1171 * @ul_list: pointer to skb buffer head
1173 void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
1174 int session_id, int qlth_n_ql_size,
1175 struct sk_buff_head *ul_list)
1177 int qlevel = ul_list->qlen;
1178 struct mux_qlth *p_qlt;
1180 p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id];
1182 /* Initialize QLTH if not been done */
1183 if (p_adb->qlt_updated[session_id] == 0) {
1184 p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
1185 p_qlt->if_id = session_id;
1186 p_qlt->table_length = cpu_to_le16(qlth_n_ql_size);
1187 p_qlt->reserved = 0;
1188 p_qlt->reserved2 = 0;
1191 /* Update Queue Level information always */
1192 p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel);
1193 p_adb->qlt_updated[session_id] = 1;
1196 /* Update the next table index. */
1197 static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
1199 struct sk_buff_head *ul_list,
1200 struct mux_adth_dg *dg,
1203 struct mux_adb *adb,
1204 struct sk_buff *src_skb)
1206 ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1207 qlth_n_ql_size, ul_list);
1208 ipc_mux_ul_adb_finish(ipc_mux);
1209 if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1210 IOSM_AGGR_MUX_SIG_ADBH)) {
1211 dev_kfree_skb(src_skb);
1214 ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
1216 ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1217 ipc_mux->size_needed += qlth_n_ql_size;
1218 ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1222 /* Process encode session UL data. */
1223 static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb,
1224 struct mux_adth_dg *dg,
1225 struct sk_buff_head *ul_list,
1226 struct sk_buff *src_skb, int session_id,
1227 int pkt_to_send, u32 qlth_n_ql_size,
1228 int *out_offset, int head_pad_len)
1231 int offset = *out_offset;
1232 unsigned long flags;
1235 while (pkt_to_send > 0) {
1236 /* Peek at the head of the list. */
1237 src_skb = skb_peek(ul_list);
1239 dev_err(ipc_mux->dev,
1240 "skb peek return NULL with count : %d",
1244 aligned_size = ALIGN((head_pad_len + src_skb->len), 4);
1245 ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1247 if (ipc_mux->size_needed > adb->size ||
1248 ((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >=
1249 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) {
1250 *adb->next_table_index = offset;
1251 if (mux_ul_dg_update_tbl_index(ipc_mux, session_id,
1254 qlth_n_ql_size, adb,
1258 offset = le32_to_cpu(adb->adbh->block_length);
1259 /* Load pointer to next available datagram entry */
1260 dg = adb->dg[session_id] + adb->dg_count[session_id];
1262 /* Add buffer without head padding to next pending transfer. */
1263 memcpy(adb->buf + offset + head_pad_len,
1264 src_skb->data, src_skb->len);
1265 /* Setup datagram entry. */
1266 dg->datagram_index = cpu_to_le32(offset);
1267 dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len);
1268 dg->service_class = (((struct sk_buff *)src_skb)->priority);
1270 adb->dg_cnt_total++;
1271 adb->payload_size += le16_to_cpu(dg->datagram_length);
1273 adb->dg_count[session_id]++;
1274 offset += aligned_size;
1275 /* Remove the processed elements and free it. */
1276 spin_lock_irqsave(&ul_list->lock, flags);
1277 src_skb = __skb_dequeue(ul_list);
1278 spin_unlock_irqrestore(&ul_list->lock, flags);
1280 dev_kfree_skb(src_skb);
1284 *out_offset = offset;
1288 /* Process encode session UL data to ADB. */
1289 static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id,
1290 struct mux_session *session,
1291 struct sk_buff_head *ul_list, struct mux_adb *adb,
1294 int adb_updated = -EINVAL;
1295 int head_pad_len, offset;
1296 struct sk_buff *src_skb = NULL;
1297 struct mux_adth_dg *dg;
1300 /* If any of the opened session has set Flow Control ON then limit the
1301 * UL data to mux_flow_ctrl_high_thresh_b bytes
1303 if (ipc_mux->ul_data_pend_bytes >=
1304 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
1305 ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1309 qlth_n_ql_size = offsetof(struct mux_qlth, ql) +
1310 MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
1311 head_pad_len = session->ul_head_pad_len;
1313 if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1314 head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1316 if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1317 IOSM_AGGR_MUX_SIG_ADBH))
1320 offset = le32_to_cpu(adb->adbh->block_length);
1322 if (ipc_mux->size_needed == 0)
1323 ipc_mux->size_needed = offset;
1325 /* Calculate the size needed for ADTH, QLTH and QL*/
1326 if (adb->dg_count[session_id] == 0) {
1327 ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1328 ipc_mux->size_needed += qlth_n_ql_size;
1331 dg = adb->dg[session_id] + adb->dg_count[session_id];
1333 if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb,
1334 session_id, pkt_to_send, qlth_n_ql_size, &offset,
1335 head_pad_len) > 0) {
1337 *adb->next_table_index = offset;
1338 ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1339 qlth_n_ql_size, ul_list);
1340 adb->adbh->block_length = cpu_to_le32(offset);
1346 bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
1348 struct sk_buff_head *ul_list;
1349 struct mux_session *session;
1355 if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
1356 ipc_mux->adb_prep_ongoing)
1359 ipc_mux->adb_prep_ongoing = true;
1361 for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
1362 session_id = ipc_mux->rr_next_session;
1363 session = &ipc_mux->session[session_id];
1365 /* Go to next handle rr_next_session overflow */
1366 ipc_mux->rr_next_session++;
1367 if (ipc_mux->rr_next_session >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
1368 ipc_mux->rr_next_session = 0;
1370 if (!session->wwan || session->flow_ctl_mask ||
1371 session->net_tx_stop)
1374 ul_list = &session->ul_list;
1376 /* Is something pending in UL and flow ctrl off */
1377 dg_n = skb_queue_len(ul_list);
1378 if (dg_n > MUX_MAX_UL_DG_ENTRIES)
1379 dg_n = MUX_MAX_UL_DG_ENTRIES;
1382 /* Nothing to do for ipc_mux session
1383 * -> try next session id.
1386 if (ipc_mux->protocol == MUX_LITE)
1387 updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
1392 updated = mux_ul_adb_encode(ipc_mux, session_id,
1398 ipc_mux->adb_prep_ongoing = false;
1399 return updated == 1;
1402 /* Calculates the Payload from any given ADB. */
1403 static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
1404 struct mux_adbh *p_adbh)
1406 struct mux_adth_dg *dg;
1407 struct mux_adth *adth;
1408 u32 payload_size = 0;
1412 /* Process the aggregated datagram tables. */
1413 next_table_idx = le32_to_cpu(p_adbh->first_table_index);
1415 if (next_table_idx < sizeof(struct mux_adbh)) {
1416 dev_err(ipc_mux->dev, "unexpected empty ADB");
1417 return payload_size;
1420 while (next_table_idx != 0) {
1421 /* Get the reference to the table header. */
1422 adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx);
1424 if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
1425 nr_of_dg = (le16_to_cpu(adth->table_length) -
1426 sizeof(struct mux_adth) +
1427 sizeof(struct mux_adth_dg)) /
1428 sizeof(struct mux_adth_dg);
1431 return payload_size;
1435 for (i = 0; i < nr_of_dg; i++, dg++) {
1436 if (le32_to_cpu(dg->datagram_index) <
1437 sizeof(struct mux_adbh)) {
1438 return payload_size;
1441 le16_to_cpu(dg->datagram_length);
1444 next_table_idx = le32_to_cpu(adth->next_table_index);
1447 return payload_size;
1450 void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
1452 union mux_type_header hr;
1456 if (ipc_mux->protocol == MUX_LITE) {
1457 hr.adgh = (struct mux_adgh *)skb->data;
1458 adgh_len = le16_to_cpu(hr.adgh->length);
1459 if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) &&
1460 ipc_mux->ul_flow == MUX_UL)
1461 ipc_mux->ul_data_pend_bytes =
1462 ipc_mux->ul_data_pend_bytes - adgh_len;
1464 hr.adbh = (struct mux_adbh *)(skb->data);
1465 payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh);
1466 ipc_mux->ul_data_pend_bytes -= payload;
1469 if (ipc_mux->ul_flow == MUX_UL)
1470 dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
1471 ipc_mux->ul_data_pend_bytes);
1473 /* Reset the skb settings. */
1477 /* Add the consumed ADB to the free list. */
1478 skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
1481 /* Start the NETIF uplink send transfer in MUX mode. */
1482 static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
1483 void *msg, size_t size)
1485 struct iosm_mux *ipc_mux = ipc_imem->mux;
1486 bool ul_data_pend = false;
1488 /* Add session UL data to a ADB and ADGH */
1489 ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
1491 if (ipc_mux->protocol == MUX_AGGREGATION)
1492 ipc_imem_adb_timer_start(ipc_mux->imem);
1494 /* Delay the doorbell irq */
1495 ipc_imem_td_update_timer_start(ipc_mux->imem);
1497 /* reset the debounce flag */
1498 ipc_mux->ev_mux_net_transmit_pending = false;
1503 int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
1504 struct sk_buff *skb)
1506 struct mux_session *session = &ipc_mux->session[if_id];
1509 if (ipc_mux->channel &&
1510 ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
1511 dev_err(ipc_mux->dev,
1512 "channel state is not IMEM_CHANNEL_ACTIVE");
1516 if (!session->wwan) {
1517 dev_err(ipc_mux->dev, "session net ID is NULL");
1522 /* Session is under flow control.
1523 * Check if packet can be queued in session list, if not
1526 if (skb_queue_len(&session->ul_list) >=
1527 (session->net_tx_stop ?
1528 IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
1529 (IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
1530 IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
1531 ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
1536 /* Add skb to the uplink skb accumulator. */
1537 skb_queue_tail(&session->ul_list, skb);
1539 /* Inform the IPC kthread to pass uplink IP packets to CP. */
1540 if (!ipc_mux->ev_mux_net_transmit_pending) {
1541 ipc_mux->ev_mux_net_transmit_pending = true;
1542 ret = ipc_task_queue_send_task(ipc_mux->imem,
1543 ipc_mux_tq_ul_trigger_encode, 0,
1548 dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
1549 if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
1550 skb->len, skb->truesize, skb->priority);