1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-21 Intel Corporation.
6 #include <linux/delay.h>
7 #include <linux/pm_runtime.h>
9 #include "iosm_ipc_chnl_cfg.h"
10 #include "iosm_ipc_devlink.h"
11 #include "iosm_ipc_flash.h"
12 #include "iosm_ipc_imem.h"
13 #include "iosm_ipc_port.h"
14 #include "iosm_ipc_trace.h"
15 #include "iosm_ipc_debugfs.h"
17 /* Check the wwan ips if it is valid with Channel as input. */
18 static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
21 return chnl->ctype == IPC_CTYPE_WWAN &&
22 chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
26 static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
28 union ipc_msg_prep_args prep_args = {
33 ipc_imem->device_sleep = state;
35 return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
36 IPC_MSG_PREP_SLEEP, &prep_args, NULL);
39 static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
40 struct ipc_pipe *pipe)
42 /* limit max. nr of entries */
43 if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
46 return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
49 /* This timer handler will retry DL buff allocation if a pipe has no free buf
50 * and gives doorbell if TD is available
52 static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
53 void *msg, size_t size)
55 bool new_buffers_available = false;
56 bool retry_allocation = false;
59 for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
60 struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
62 if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
65 while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
66 new_buffers_available = true;
68 if (pipe->nr_of_queued_entries == 0)
69 retry_allocation = true;
72 if (new_buffers_available)
73 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
76 if (retry_allocation) {
77 ipc_imem->hrtimer_period =
78 ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
79 if (!hrtimer_active(&ipc_imem->td_alloc_timer))
80 hrtimer_start(&ipc_imem->td_alloc_timer,
81 ipc_imem->hrtimer_period,
87 static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
89 struct iosm_imem *ipc_imem =
90 container_of(hr_timer, struct iosm_imem, td_alloc_timer);
91 /* Post an async tasklet event to trigger HP update Doorbell */
92 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
94 return HRTIMER_NORESTART;
97 /* Fast update timer tasklet handler to trigger HP update */
98 static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
99 void *msg, size_t size)
101 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
102 IPC_HP_FAST_TD_UPD_TMR);
107 static enum hrtimer_restart
108 ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
110 struct iosm_imem *ipc_imem =
111 container_of(hr_timer, struct iosm_imem, fast_update_timer);
112 /* Post an async tasklet event to trigger HP update Doorbell */
113 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
115 return HRTIMER_NORESTART;
118 static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
119 void *msg, size_t size)
121 ipc_mux_ul_adb_finish(ipc_imem->mux);
125 static enum hrtimer_restart
126 ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
128 struct iosm_imem *ipc_imem =
129 container_of(hr_timer, struct iosm_imem, adb_timer);
131 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
133 return HRTIMER_NORESTART;
136 static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
137 struct ipc_mux_config *cfg)
139 ipc_mmio_update_cp_capability(ipc_imem->mmio);
141 if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
142 dev_err(ipc_imem->dev, "Failed to get Mux capability.");
146 cfg->protocol = ipc_imem->mmio->mux_protocol;
148 cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
152 /* The instance ID is same as channel ID because this is been reused
153 * for channel alloc function.
155 cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
160 void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
161 unsigned int reset_enable, bool atomic_ctx)
163 union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
167 ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
168 IPC_MSG_PREP_FEATURE_SET, &prep_args,
171 ipc_protocol_msg_send(ipc_imem->ipc_protocol,
172 IPC_MSG_PREP_FEATURE_SET, &prep_args);
176 * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started.
177 * @ipc_imem: Pointer to imem data-struct
179 void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
181 /* Use the TD update timer only in the runtime phase */
182 if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
183 /* trigger the doorbell irq on CP directly. */
184 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
185 IPC_HP_TD_UPD_TMR_START);
189 if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
190 ipc_imem->hrtimer_period =
191 ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
192 if (!hrtimer_active(&ipc_imem->tdupdate_timer))
193 hrtimer_start(&ipc_imem->tdupdate_timer,
194 ipc_imem->hrtimer_period,
199 void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
201 if (hrtimer_active(hr_timer))
202 hrtimer_cancel(hr_timer);
206 * ipc_imem_adb_timer_start - Starts the adb Timer if not starting.
207 * @ipc_imem: Pointer to imem data-struct
209 void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
211 if (!hrtimer_active(&ipc_imem->adb_timer)) {
212 ipc_imem->hrtimer_period =
213 ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
214 hrtimer_start(&ipc_imem->adb_timer,
215 ipc_imem->hrtimer_period,
220 bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
222 struct ipc_mem_channel *channel;
223 bool hpda_ctrl_pending = false;
224 struct sk_buff_head *ul_list;
225 bool hpda_pending = false;
226 struct ipc_pipe *pipe;
229 /* Analyze the uplink pipe of all active channels. */
230 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
231 channel = &ipc_imem->channels[i];
233 if (channel->state != IMEM_CHANNEL_ACTIVE)
236 pipe = &channel->ul_pipe;
238 /* Get the reference to the skbuf accumulator list. */
239 ul_list = &channel->ul_list;
241 /* Fill the transfer descriptor with the uplink buffer info. */
242 if (!ipc_imem_check_wwan_ips(channel)) {
244 ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
248 ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
253 /* forced HP update needed for non data channels */
254 if (hpda_ctrl_pending) {
255 hpda_pending = false;
256 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
263 void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
265 int timeout = IPC_MODEM_BOOT_TIMEOUT;
267 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
269 /* Trigger the CP interrupt to enter the init state. */
270 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
271 IPC_MEM_DEVICE_IPC_INIT);
272 /* Wait for the CP update. */
274 if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
275 ipc_imem->ipc_requested_state) {
276 /* Prepare the MMIO space */
277 ipc_mmio_config(ipc_imem->mmio);
279 /* Trigger the CP irq to enter the running state. */
280 ipc_imem->ipc_requested_state =
281 IPC_MEM_DEVICE_IPC_RUNNING;
282 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
283 IPC_MEM_DEVICE_IPC_RUNNING);
291 dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
292 ipc_imem_phase_get_string(ipc_imem->phase),
293 ipc_mmio_get_ipc_state(ipc_imem->mmio));
295 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
298 /* Analyze the packet type and distribute it. */
299 static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
300 struct ipc_pipe *pipe, struct sk_buff *skb)
307 /* An AT/control or IP packet is expected. */
308 switch (pipe->channel->ctype) {
310 port_id = pipe->channel->channel_id;
311 ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len,
312 IPC_CB(skb)->mapping,
313 IPC_CB(skb)->direction);
314 if (port_id == IPC_MEM_CTRL_CHL_ID_7)
315 ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
317 else if (ipc_is_trace_channel(ipc_imem, port_id))
318 ipc_trace_port_rx(ipc_imem, skb);
320 wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
325 if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
326 ipc_mux_dl_decode(ipc_imem->mux, skb);
329 dev_err(ipc_imem->dev, "Invalid channel type");
334 /* Process the downlink data and pass them to the char or net layer. */
335 static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
336 struct ipc_pipe *pipe)
338 s32 cnt = 0, processed_td_cnt = 0;
339 struct ipc_mem_channel *channel;
340 u32 head = 0, tail = 0;
341 bool processed = false;
344 channel = pipe->channel;
346 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
348 if (pipe->old_tail != tail) {
349 if (pipe->old_tail < tail)
350 cnt = tail - pipe->old_tail;
352 cnt = pipe->nr_of_entries - pipe->old_tail + tail;
355 processed_td_cnt = cnt;
357 /* Seek for pipes with pending DL data. */
359 skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
361 /* Analyze the packet type and distribute it. */
362 ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
365 /* try to allocate new empty DL SKbs from head..tail - 1*/
366 while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
369 if (processed && !ipc_imem_check_wwan_ips(channel)) {
370 /* Force HP update for non IP channels */
371 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
375 /* If Fast Update timer is already running then stop */
376 ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
379 /* Any control channel process will get immediate HP update.
380 * Start Fast update timer only for IP channel if all the TDs were
381 * used in last process.
383 if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
384 ipc_imem->hrtimer_period =
385 ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
386 hrtimer_start(&ipc_imem->fast_update_timer,
387 ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
390 if (ipc_imem->app_notify_dl_pend)
391 complete(&ipc_imem->dl_pend_sem);
394 /* process open uplink pipe */
395 static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
396 struct ipc_pipe *pipe)
398 struct ipc_mem_channel *channel;
399 u32 tail = 0, head = 0;
403 channel = pipe->channel;
405 /* Get the internal phase. */
406 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
409 if (pipe->old_tail != tail) {
410 if (pipe->old_tail < tail)
411 cnt = tail - pipe->old_tail;
413 cnt = pipe->nr_of_entries - pipe->old_tail + tail;
416 /* Free UL buffers. */
418 skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
423 /* If the user app was suspended in uplink direction - blocking
426 if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
427 complete(&channel->ul_sem);
429 /* Free the skbuf element. */
430 if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
431 if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
432 ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
434 dev_err(ipc_imem->dev,
435 "OP Type is UL_MUX, unknown if_id %d",
438 ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
442 /* Trace channel stats for IP UL pipe. */
443 if (ipc_imem_check_wwan_ips(pipe->channel))
444 ipc_mux_check_n_restart_tx(ipc_imem->mux);
446 if (ipc_imem->app_notify_ul_pend)
447 complete(&ipc_imem->ul_pend_sem);
450 /* Executes the irq. */
451 static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
453 struct ipc_mem_channel *channel;
455 channel = ipc_imem->ipc_devlink->devlink_sio.channel;
456 ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
457 complete(&channel->ul_sem);
460 /* Execute the UL bundle timer actions, generating the doorbell irq. */
461 static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
462 void *msg, size_t size)
464 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
469 /* Consider link power management in the runtime phase. */
470 static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
472 /* link will go down, Test pending UL packets.*/
473 if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
474 hrtimer_active(&ipc_imem->tdupdate_timer)) {
475 /* Generate the doorbell irq. */
476 ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
477 /* Stop the TD update timer. */
478 ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
479 /* Stop the fast update timer. */
480 ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
484 /* Execute startup timer and wait for delayed start (e.g. NAND) */
485 static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
486 void *msg, size_t size)
488 /* Update & check the current operation phase. */
489 if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
492 if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
493 IPC_MEM_DEVICE_IPC_UNINIT) {
494 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
496 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
497 IPC_MEM_DEVICE_IPC_INIT);
499 ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
500 /* reduce period to 100 ms to check for mmio init state */
501 if (!hrtimer_active(&ipc_imem->startup_timer))
502 hrtimer_start(&ipc_imem->startup_timer,
503 ipc_imem->hrtimer_period,
505 } else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
506 IPC_MEM_DEVICE_IPC_INIT) {
507 /* Startup complete - disable timer */
508 ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
510 /* Prepare the MMIO space */
511 ipc_mmio_config(ipc_imem->mmio);
512 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
513 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
514 IPC_MEM_DEVICE_IPC_RUNNING);
520 static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
522 enum hrtimer_restart result = HRTIMER_NORESTART;
523 struct iosm_imem *ipc_imem =
524 container_of(hr_timer, struct iosm_imem, startup_timer);
526 if (ktime_to_ns(ipc_imem->hrtimer_period)) {
527 hrtimer_forward_now(&ipc_imem->startup_timer,
528 ipc_imem->hrtimer_period);
529 result = HRTIMER_RESTART;
532 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
537 /* Get the CP execution stage */
538 static enum ipc_mem_exec_stage
539 ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
541 return (ipc_imem->phase == IPC_P_RUN &&
542 ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
543 ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
544 ipc_mmio_get_exec_stage(ipc_imem->mmio);
547 /* Callback to send the modem ready uevent */
548 static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
549 void *msg, size_t size)
551 enum ipc_mem_exec_stage exec_stage =
552 ipc_imem_get_exec_stage_buffered(ipc_imem);
554 if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
555 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
560 /* This function is executed in a task context via an ipc_worker object,
561 * as the creation or removal of device can't be done from tasklet.
563 static void ipc_imem_run_state_worker(struct work_struct *instance)
565 struct ipc_chnl_cfg chnl_cfg_port = { 0 };
566 struct ipc_mux_config mux_cfg;
567 struct iosm_imem *ipc_imem;
571 ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
573 if (ipc_imem->phase != IPC_P_RUN) {
574 dev_err(ipc_imem->dev,
575 "Modem link down. Exit run state worker.");
579 if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
580 ipc_devlink_deinit(ipc_imem->ipc_devlink);
582 ret = ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg);
586 ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
590 ret = ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
592 goto err_ipc_mux_deinit;
594 ipc_imem->mux->wwan = ipc_imem->wwan;
596 while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
597 if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
598 ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
600 if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID &&
601 chnl_cfg_port.wwan_port_type == WWAN_PORT_XMMRPC) {
606 if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
607 chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
611 if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
612 ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
615 ipc_imem->ipc_port[ctrl_chl_idx] =
616 ipc_port_init(ipc_imem, chnl_cfg_port);
622 ipc_debugfs_init(ipc_imem);
624 ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
627 /* Complete all memory stores before setting bit */
628 smp_mb__before_atomic();
630 set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
632 /* Complete all memory stores after setting bit */
633 smp_mb__after_atomic();
635 if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID) {
636 pm_runtime_mark_last_busy(ipc_imem->dev);
637 pm_runtime_put_autosuspend(ipc_imem->dev);
643 ipc_mux_deinit(ipc_imem->mux);
645 ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
648 static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
650 enum ipc_mem_device_ipc_state curr_ipc_status;
651 enum ipc_phase old_phase, phase;
652 bool retry_allocation = false;
653 bool ul_pending = false;
656 if (irq != IMEM_IRQ_DONT_CARE)
657 ipc_imem->ev_irq_pending[irq] = false;
659 /* Get the internal phase. */
660 old_phase = ipc_imem->phase;
662 if (old_phase == IPC_P_OFF_REQ) {
663 dev_dbg(ipc_imem->dev,
664 "[%s]: Ignoring MSI. Deinit sequence in progress!",
665 ipc_imem_phase_get_string(old_phase));
669 /* Update the phase controlled by CP. */
670 phase = ipc_imem_phase_update(ipc_imem);
674 if (!ipc_imem->enter_runtime) {
675 /* Excute the transition from flash/boot to runtime. */
676 ipc_imem->enter_runtime = 1;
678 /* allow device to sleep, default value is
679 * IPC_HOST_SLEEP_ENTER_SLEEP
681 ipc_imem_msg_send_device_sleep(ipc_imem,
682 ipc_imem->device_sleep);
684 ipc_imem_msg_send_feature_set(ipc_imem,
685 IPC_MEM_INBAND_CRASH_SIG,
690 ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
692 /* check ipc_status change */
693 if (ipc_imem->ipc_status != curr_ipc_status) {
694 ipc_imem->ipc_status = curr_ipc_status;
696 if (ipc_imem->ipc_status ==
697 IPC_MEM_DEVICE_IPC_RUNNING) {
698 schedule_work(&ipc_imem->run_state_worker);
702 /* Consider power management in the runtime phase. */
703 ipc_imem_slp_control_exec(ipc_imem);
704 break; /* Continue with skbuf processing. */
706 /* Unexpected phases. */
709 dev_err(ipc_imem->dev, "confused phase %s",
710 ipc_imem_phase_get_string(phase));
714 if (old_phase != IPC_P_ROM)
718 /* On CP the PSI phase is already active. */
721 /* Before CP ROM driver starts the PSI image, it sets
722 * the exit_code field on the doorbell scratchpad and
725 ipc_imem_rom_irq_exec(ipc_imem);
732 /* process message ring */
733 ipc_protocol_msg_process(ipc_imem, irq);
735 /* process all open pipes */
736 for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
737 struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
738 struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
740 if (dl_pipe->is_open &&
741 (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
742 ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
744 if (dl_pipe->nr_of_queued_entries == 0)
745 retry_allocation = true;
748 if (ul_pipe->is_open)
749 ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
752 /* Try to generate new ADB or ADGH. */
753 if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
754 ipc_imem_td_update_timer_start(ipc_imem);
755 if (ipc_imem->mux->protocol == MUX_AGGREGATION)
756 ipc_imem_adb_timer_start(ipc_imem);
759 /* Continue the send procedure with accumulated SIO or NETIF packets.
760 * Reset the debounce flags.
762 ul_pending |= ipc_imem_ul_write_td(ipc_imem);
764 /* if UL data is pending restart TD update timer */
766 ipc_imem->hrtimer_period =
767 ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
768 if (!hrtimer_active(&ipc_imem->tdupdate_timer))
769 hrtimer_start(&ipc_imem->tdupdate_timer,
770 ipc_imem->hrtimer_period,
774 /* If CP has executed the transition
775 * from IPC_INIT to IPC_RUNNING in the PSI
776 * phase, wake up the flash app to open the pipes.
778 if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
779 ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
780 ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
781 IPC_MEM_DEVICE_IPC_RUNNING) {
782 complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
785 /* Reset the expected CP state. */
786 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
788 if (retry_allocation) {
789 ipc_imem->hrtimer_period =
790 ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
791 if (!hrtimer_active(&ipc_imem->td_alloc_timer))
792 hrtimer_start(&ipc_imem->td_alloc_timer,
793 ipc_imem->hrtimer_period,
798 /* Callback by tasklet for handling interrupt events. */
799 static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
802 ipc_imem_handle_irq(ipc_imem, arg);
807 void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
809 /* start doorbell irq delay timer if UL is pending */
810 if (ipc_imem_ul_write_td(ipc_imem))
811 ipc_imem_td_update_timer_start(ipc_imem);
814 /* Check the execution stage and update the AP phase */
815 static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
816 enum ipc_mem_exec_stage stage)
819 case IPC_MEM_EXEC_STAGE_BOOT:
820 if (ipc_imem->phase != IPC_P_ROM) {
821 /* Send this event only once */
822 ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
825 ipc_imem->phase = IPC_P_ROM;
828 case IPC_MEM_EXEC_STAGE_PSI:
829 ipc_imem->phase = IPC_P_PSI;
832 case IPC_MEM_EXEC_STAGE_EBL:
833 ipc_imem->phase = IPC_P_EBL;
836 case IPC_MEM_EXEC_STAGE_RUN:
837 if (ipc_imem->phase != IPC_P_RUN &&
838 ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
839 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
841 ipc_imem->phase = IPC_P_RUN;
844 case IPC_MEM_EXEC_STAGE_CRASH:
845 if (ipc_imem->phase != IPC_P_CRASH)
846 ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
848 ipc_imem->phase = IPC_P_CRASH;
851 case IPC_MEM_EXEC_STAGE_CD_READY:
852 if (ipc_imem->phase != IPC_P_CD_READY)
853 ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
854 ipc_imem->phase = IPC_P_CD_READY;
858 /* unknown exec stage:
859 * assume that link is down and send info to listeners
861 ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
865 return ipc_imem->phase;
868 /* Send msg to device to open pipe */
869 static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
870 struct ipc_pipe *pipe)
872 union ipc_msg_prep_args prep_args = {
873 .pipe_open.pipe = pipe,
876 if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
877 IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
878 pipe->is_open = true;
880 return pipe->is_open;
883 /* Allocates the TDs for the given pipe along with firing HP update DB. */
884 static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
885 void *msg, size_t size)
887 struct ipc_pipe *dl_pipe = msg;
888 bool processed = false;
891 for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
892 processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
894 /* Trigger the doorbell irq to inform CP that new downlink buffers are
898 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
903 static enum hrtimer_restart
904 ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
906 struct iosm_imem *ipc_imem =
907 container_of(hr_timer, struct iosm_imem, tdupdate_timer);
909 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
911 return HRTIMER_NORESTART;
914 /* Get the CP execution state and map it to the AP phase. */
915 enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
917 enum ipc_mem_exec_stage exec_stage =
918 ipc_imem_get_exec_stage_buffered(ipc_imem);
919 /* If the CP stage is undef, return the internal precalculated phase. */
920 return ipc_imem->phase == IPC_P_OFF_REQ ?
922 ipc_imem_phase_update_check(ipc_imem, exec_stage);
925 const char *ipc_imem_phase_get_string(enum ipc_phase phase)
957 void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
959 union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
961 pipe->is_open = false;
962 ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
965 ipc_imem_pipe_cleanup(ipc_imem, pipe);
968 void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
970 struct ipc_mem_channel *channel;
972 if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
973 dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
977 channel = &ipc_imem->channels[channel_id];
979 if (channel->state == IMEM_CHANNEL_FREE) {
980 dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
981 channel_id, channel->state);
985 /* Free only the channel id in the CP power off mode. */
986 if (channel->state == IMEM_CHANNEL_RESERVED)
987 /* Release only the channel id. */
990 if (ipc_imem->phase == IPC_P_RUN) {
991 ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
992 ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
995 ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
996 ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
999 ipc_imem_channel_free(channel);
1002 struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
1003 int channel_id, u32 db_id)
1005 struct ipc_mem_channel *channel;
1007 if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
1008 dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
1012 channel = &ipc_imem->channels[channel_id];
1014 channel->state = IMEM_CHANNEL_ACTIVE;
1016 if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
1019 if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
1022 /* Allocate the downlink buffers in tasklet context. */
1023 if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
1024 &channel->dl_pipe, 0, false)) {
1025 dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
1029 /* Active channel. */
1032 ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
1034 ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
1036 ipc_imem_channel_free(channel);
1040 void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
1042 ipc_protocol_suspend(ipc_imem->ipc_protocol);
1045 void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
1047 ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
1050 void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
1052 enum ipc_mem_exec_stage stage;
1054 if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
1055 stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1056 ipc_imem_phase_update_check(ipc_imem, stage);
1060 void ipc_imem_channel_free(struct ipc_mem_channel *channel)
1062 /* Reset dynamic channel elements. */
1063 channel->state = IMEM_CHANNEL_FREE;
1066 int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
1067 enum ipc_ctype ctype)
1069 struct ipc_mem_channel *channel;
1072 /* Find channel of given type/index */
1073 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1074 channel = &ipc_imem->channels[i];
1075 if (channel->ctype == ctype && channel->index == index)
1079 if (i >= ipc_imem->nr_of_channels) {
1080 dev_dbg(ipc_imem->dev,
1081 "no channel definition for index=%d ctype=%d", index,
1086 if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
1087 dev_dbg(ipc_imem->dev, "channel is in use");
1091 if (channel->ctype == IPC_CTYPE_WWAN &&
1092 index == IPC_MEM_MUX_IP_CH_IF_ID)
1093 channel->if_id = index;
1095 channel->channel_id = index;
1096 channel->state = IMEM_CHANNEL_RESERVED;
1101 void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
1102 struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1104 struct ipc_mem_channel *channel;
1106 if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
1107 chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
1108 dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
1109 chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
1113 if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
1114 dev_err(ipc_imem->dev, "too many channels");
1118 channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
1119 channel->channel_id = ipc_imem->nr_of_channels;
1120 channel->ctype = ctype;
1121 channel->index = chnl_cfg.id;
1122 channel->net_err_count = 0;
1123 channel->state = IMEM_CHANNEL_FREE;
1124 ipc_imem->nr_of_channels++;
1126 ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
1129 skb_queue_head_init(&channel->ul_list);
1131 init_completion(&channel->ul_sem);
1134 void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
1135 struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1137 struct ipc_mem_channel *channel;
1139 if (id < 0 || id >= ipc_imem->nr_of_channels) {
1140 dev_err(ipc_imem->dev, "invalid channel id %d", id);
1144 channel = &ipc_imem->channels[id];
1146 if (channel->state != IMEM_CHANNEL_FREE &&
1147 channel->state != IMEM_CHANNEL_RESERVED) {
1148 dev_err(ipc_imem->dev, "invalid channel state %d",
1153 channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
1154 channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
1155 channel->ul_pipe.is_open = false;
1156 channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
1157 channel->ul_pipe.channel = channel;
1158 channel->ul_pipe.dir = IPC_MEM_DIR_UL;
1159 channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1160 channel->ul_pipe.irq_moderation = irq_moderation;
1161 channel->ul_pipe.buf_size = 0;
1163 channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
1164 channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
1165 channel->dl_pipe.is_open = false;
1166 channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
1167 channel->dl_pipe.channel = channel;
1168 channel->dl_pipe.dir = IPC_MEM_DIR_DL;
1169 channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1170 channel->dl_pipe.irq_moderation = irq_moderation;
1171 channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
1174 static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
1178 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1179 struct ipc_mem_channel *channel;
1181 channel = &ipc_imem->channels[i];
1183 ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
1184 ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
1186 ipc_imem_channel_free(channel);
1190 void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
1192 struct sk_buff *skb;
1194 /* Force pipe to closed state also when not explicitly closed through
1195 * ipc_imem_pipe_close()
1197 pipe->is_open = false;
1199 /* Empty the uplink skb accumulator. */
1200 while ((skb = skb_dequeue(&pipe->channel->ul_list)))
1201 ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
1203 ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
1206 /* Send IPC protocol uninit to the modem when Link is active. */
1207 static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
1209 int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
1210 enum ipc_mem_device_ipc_state ipc_state;
1212 /* When PCIe link is up set IPC_UNINIT
1213 * of the modem otherwise ignore it when PCIe link down happens.
1215 if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
1216 /* set modem to UNINIT
1217 * (in case we want to reload the AP driver without resetting
1220 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
1221 IPC_MEM_DEVICE_IPC_UNINIT);
1222 ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1224 /* Wait for maximum 30ms to allow the Modem to uninitialize the
1227 while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
1228 (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
1230 usleep_range(1000, 1250);
1232 ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1237 void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
1239 ipc_imem->phase = IPC_P_OFF_REQ;
1241 /* forward MDM_NOT_READY to listeners */
1242 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
1243 pm_runtime_get_sync(ipc_imem->dev);
1245 hrtimer_cancel(&ipc_imem->td_alloc_timer);
1246 hrtimer_cancel(&ipc_imem->tdupdate_timer);
1247 hrtimer_cancel(&ipc_imem->fast_update_timer);
1248 hrtimer_cancel(&ipc_imem->startup_timer);
1250 /* cancel the workqueue */
1251 cancel_work_sync(&ipc_imem->run_state_worker);
1253 if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
1254 ipc_mux_deinit(ipc_imem->mux);
1255 ipc_debugfs_deinit(ipc_imem);
1256 ipc_wwan_deinit(ipc_imem->wwan);
1257 ipc_port_deinit(ipc_imem->ipc_port);
1260 if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
1261 ipc_devlink_deinit(ipc_imem->ipc_devlink);
1263 ipc_imem_device_ipc_uninit(ipc_imem);
1264 ipc_imem_channel_reset(ipc_imem);
1266 ipc_protocol_deinit(ipc_imem->ipc_protocol);
1267 ipc_task_deinit(ipc_imem->ipc_task);
1269 kfree(ipc_imem->ipc_task);
1270 kfree(ipc_imem->mmio);
1272 ipc_imem->phase = IPC_P_OFF;
1275 /* After CP has unblocked the PCIe link, save the start address of the doorbell
1276 * scratchpad and prepare the shared memory region. If the flashing to RAM
1277 * procedure shall be executed, copy the chip information from the doorbell
1278 * scratchtpad to the application buffer and wake up the flash app.
1280 static int ipc_imem_config(struct iosm_imem *ipc_imem)
1282 enum ipc_phase phase;
1284 /* Initialize the semaphore for the blocking read UL/DL transfer. */
1285 init_completion(&ipc_imem->ul_pend_sem);
1287 init_completion(&ipc_imem->dl_pend_sem);
1289 /* clear internal flags */
1290 ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
1291 ipc_imem->enter_runtime = 0;
1293 phase = ipc_imem_phase_update(ipc_imem);
1295 /* Either CP shall be in the power off or power on phase. */
1298 ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
1299 /* poll execution stage (for delayed start, e.g. NAND) */
1300 if (!hrtimer_active(&ipc_imem->startup_timer))
1301 hrtimer_start(&ipc_imem->startup_timer,
1302 ipc_imem->hrtimer_period,
1309 /* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
1310 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
1312 /* Verify the exepected initial state. */
1313 if (ipc_imem->ipc_requested_state ==
1314 ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
1315 ipc_imem_ipc_init_check(ipc_imem);
1319 dev_err(ipc_imem->dev,
1320 "ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
1321 ipc_mmio_get_ipc_state(ipc_imem->mmio));
1324 case IPC_P_CD_READY:
1325 dev_dbg(ipc_imem->dev,
1326 "Modem is in phase %d, reset Modem to collect CD",
1330 dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
1334 complete(&ipc_imem->dl_pend_sem);
1335 complete(&ipc_imem->ul_pend_sem);
1336 ipc_imem->phase = IPC_P_OFF;
1340 /* Pass the dev ptr to the shared memory driver and request the entry points */
1341 struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
1342 void __iomem *mmio, struct device *dev)
1344 struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
1345 enum ipc_mem_exec_stage stage;
1350 /* Save the device address. */
1351 ipc_imem->pcie = pcie;
1352 ipc_imem->dev = dev;
1354 ipc_imem->pci_device_id = device_id;
1356 ipc_imem->cp_version = 0;
1357 ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
1359 /* Reset the max number of configured channels */
1360 ipc_imem->nr_of_channels = 0;
1362 /* allocate IPC MMIO */
1363 ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
1364 if (!ipc_imem->mmio) {
1365 dev_err(ipc_imem->dev, "failed to initialize mmio region");
1366 goto mmio_init_fail;
1369 ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
1372 /* Create tasklet for event handling*/
1373 if (!ipc_imem->ipc_task)
1376 if (ipc_task_init(ipc_imem->ipc_task))
1377 goto ipc_task_init_fail;
1379 ipc_imem->ipc_task->dev = ipc_imem->dev;
1381 INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
1383 ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
1385 if (!ipc_imem->ipc_protocol)
1386 goto protocol_init_fail;
1388 /* The phase is set to power off. */
1389 ipc_imem->phase = IPC_P_OFF;
1391 hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
1393 ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
1395 hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
1397 ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
1399 hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
1401 ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
1403 hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
1405 ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
1407 hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1408 ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
1410 if (ipc_imem_config(ipc_imem)) {
1411 dev_err(ipc_imem->dev, "failed to initialize the imem");
1412 goto imem_config_fail;
1415 stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1416 if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
1417 /* Alloc and Register devlink */
1418 ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
1419 if (!ipc_imem->ipc_devlink) {
1420 dev_err(ipc_imem->dev, "Devlink register failed");
1421 goto imem_config_fail;
1424 if (ipc_flash_link_establish(ipc_imem))
1425 goto devlink_channel_fail;
1427 set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
1430 if (!pm_runtime_enabled(ipc_imem->dev))
1431 pm_runtime_enable(ipc_imem->dev);
1433 pm_runtime_set_autosuspend_delay(ipc_imem->dev,
1434 IPC_MEM_AUTO_SUSPEND_DELAY_MS);
1435 pm_runtime_use_autosuspend(ipc_imem->dev);
1436 pm_runtime_allow(ipc_imem->dev);
1437 pm_runtime_mark_last_busy(ipc_imem->dev);
1440 devlink_channel_fail:
1441 ipc_devlink_deinit(ipc_imem->ipc_devlink);
1443 hrtimer_cancel(&ipc_imem->td_alloc_timer);
1444 hrtimer_cancel(&ipc_imem->fast_update_timer);
1445 hrtimer_cancel(&ipc_imem->tdupdate_timer);
1446 hrtimer_cancel(&ipc_imem->startup_timer);
1448 cancel_work_sync(&ipc_imem->run_state_worker);
1449 ipc_task_deinit(ipc_imem->ipc_task);
1451 kfree(ipc_imem->ipc_task);
1453 kfree(ipc_imem->mmio);
1459 void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
1461 /* Debounce IPC_EV_IRQ. */
1462 if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
1463 ipc_imem->ev_irq_pending[irq] = true;
1464 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
1469 void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
1471 ipc_imem->td_update_timer_suspended = suspend;
1474 /* Verify the CP execution state, copy the chip info,
1475 * change the execution phase to ROM
1477 static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
1481 enum ipc_mem_exec_stage stage;
1482 struct sk_buff *skb;
1486 /* Test the CP execution state. */
1487 stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1488 if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
1489 dev_err(ipc_imem->dev,
1490 "Execution_stage: expected BOOT, received = %X", stage);
1491 goto trigger_chip_info_fail;
1493 /* Allocate a new sk buf for the chip info. */
1494 size = ipc_imem->mmio->chip_info_size;
1495 if (size > IOSM_CHIP_INFO_SIZE_MAX)
1496 goto trigger_chip_info_fail;
1498 skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
1500 dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
1502 goto trigger_chip_info_fail;
1504 /* Copy the chip info characters into the ipc_skb. */
1505 ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
1506 /* First change to the ROM boot phase. */
1507 dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
1508 ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
1509 ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
1511 trigger_chip_info_fail:
1515 int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
1517 return ipc_task_queue_send_task(ipc_imem,
1518 ipc_imem_devlink_trigger_chip_info_cb,