1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
7 #include <linux/module.h>
8 #include <linux/platform_device.h>
9 #include <linux/of_device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/of_address.h>
13 #include <linux/iommu.h>
17 #include <linux/remoteproc.h>
19 #include <linux/soc/qcom/smem.h>
20 #include <linux/soc/qcom/smem_state.h>
22 static const struct of_device_id ath11k_ahb_of_match[] = {
23 /* TODO: Should we change the compatible string to something similar
24 * to one that ath10k uses?
26 { .compatible = "qcom,ipq8074-wifi",
27 .data = (void *)ATH11K_HW_IPQ8074,
29 { .compatible = "qcom,ipq6018-wifi",
30 .data = (void *)ATH11K_HW_IPQ6018_HW10,
32 { .compatible = "qcom,wcn6750-wifi",
33 .data = (void *)ATH11K_HW_WCN6750_HW10,
35 { .compatible = "qcom,ipq5018-wifi",
36 .data = (void *)ATH11K_HW_IPQ5018_HW10,
41 MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
43 #define ATH11K_IRQ_CE0_OFFSET 4
45 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
63 "host2reo-re-injection",
65 "host2rxdma-monitor-ring3",
66 "host2rxdma-monitor-ring2",
67 "host2rxdma-monitor-ring1",
69 "wbm2host-rx-release",
71 "reo2host-destination-ring4",
72 "reo2host-destination-ring3",
73 "reo2host-destination-ring2",
74 "reo2host-destination-ring1",
75 "rxdma2host-monitor-destination-mac3",
76 "rxdma2host-monitor-destination-mac2",
77 "rxdma2host-monitor-destination-mac1",
78 "ppdu-end-interrupts-mac3",
79 "ppdu-end-interrupts-mac2",
80 "ppdu-end-interrupts-mac1",
81 "rxdma2host-monitor-status-ring-mac3",
82 "rxdma2host-monitor-status-ring-mac2",
83 "rxdma2host-monitor-status-ring-mac1",
84 "host2rxdma-host-buf-ring-mac3",
85 "host2rxdma-host-buf-ring-mac2",
86 "host2rxdma-host-buf-ring-mac1",
87 "rxdma2host-destination-ring-mac3",
88 "rxdma2host-destination-ring-mac2",
89 "rxdma2host-destination-ring-mac1",
90 "host2tcl-input-ring4",
91 "host2tcl-input-ring3",
92 "host2tcl-input-ring2",
93 "host2tcl-input-ring1",
94 "wbm2host-tx-completions-ring3",
95 "wbm2host-tx-completions-ring2",
96 "wbm2host-tx-completions-ring1",
97 "tcl2host-status-ring",
100 /* enum ext_irq_num - irq numbers that can be used by external modules
104 host2wbm_desc_feed = 16,
105 host2reo_re_injection,
107 host2rxdma_monitor_ring3,
108 host2rxdma_monitor_ring2,
109 host2rxdma_monitor_ring1,
113 reo2host_destination_ring4,
114 reo2host_destination_ring3,
115 reo2host_destination_ring2,
116 reo2host_destination_ring1,
117 rxdma2host_monitor_destination_mac3,
118 rxdma2host_monitor_destination_mac2,
119 rxdma2host_monitor_destination_mac1,
120 ppdu_end_interrupts_mac3,
121 ppdu_end_interrupts_mac2,
122 ppdu_end_interrupts_mac1,
123 rxdma2host_monitor_status_ring_mac3,
124 rxdma2host_monitor_status_ring_mac2,
125 rxdma2host_monitor_status_ring_mac1,
126 host2rxdma_host_buf_ring_mac3,
127 host2rxdma_host_buf_ring_mac2,
128 host2rxdma_host_buf_ring_mac1,
129 rxdma2host_destination_ring_mac3,
130 rxdma2host_destination_ring_mac2,
131 rxdma2host_destination_ring_mac1,
132 host2tcl_input_ring4,
133 host2tcl_input_ring3,
134 host2tcl_input_ring2,
135 host2tcl_input_ring1,
136 wbm2host_tx_completions_ring3,
137 wbm2host_tx_completions_ring2,
138 wbm2host_tx_completions_ring1,
139 tcl2host_status_ring,
143 ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
145 return ab->pci.msi.irqs[vector];
149 ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
151 u32 window_start = 0;
153 /* If offset lies within DP register range, use 1st window */
154 if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
155 window_start = ATH11K_PCI_WINDOW_START;
156 /* If offset lies within CE register range, use 2nd window */
157 else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
158 ATH11K_PCI_WINDOW_RANGE_MASK)
159 window_start = 2 * ATH11K_PCI_WINDOW_START;
165 ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
169 /* WCN6750 uses static window based register access*/
170 window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
172 iowrite32(value, ab->mem + window_start +
173 (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
176 static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
181 /* WCN6750 uses static window based register access */
182 window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
184 val = ioread32(ab->mem + window_start +
185 (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
189 static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
192 .get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
193 .window_write32 = ath11k_ahb_window_write32_wcn6750,
194 .window_read32 = ath11k_ahb_window_read32_wcn6750,
197 static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
199 return ioread32(ab->mem + offset);
202 static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
204 iowrite32(value, ab->mem + offset);
207 static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
211 for (i = 0; i < ab->hw_params.ce_count; i++) {
212 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
214 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
217 tasklet_kill(&ce_pipe->intr_tq);
221 static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
225 for (i = 0; i < irq_grp->num_irq; i++)
226 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
229 static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
233 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
234 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
236 ath11k_ahb_ext_grp_disable(irq_grp);
238 if (irq_grp->napi_enabled) {
239 napi_synchronize(&irq_grp->napi);
240 napi_disable(&irq_grp->napi);
241 irq_grp->napi_enabled = false;
246 static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
250 for (i = 0; i < irq_grp->num_irq; i++)
251 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
254 static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
258 val = ath11k_ahb_read32(ab, offset);
259 ath11k_ahb_write32(ab, offset, val | BIT(bit));
262 static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
266 val = ath11k_ahb_read32(ab, offset);
267 ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
270 static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
272 const struct ce_attr *ce_attr;
273 const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
274 u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
276 ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
277 ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
278 ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
280 ce_attr = &ab->hw_params.host_ce_config[ce_id];
281 if (ce_attr->src_nentries)
282 ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
284 if (ce_attr->dest_nentries) {
285 ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
286 ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
291 static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
293 const struct ce_attr *ce_attr;
294 const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
295 u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
297 ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
298 ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
299 ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
301 ce_attr = &ab->hw_params.host_ce_config[ce_id];
302 if (ce_attr->src_nentries)
303 ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
305 if (ce_attr->dest_nentries) {
306 ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
307 ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
312 static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
317 for (i = 0; i < ab->hw_params.ce_count; i++) {
318 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
321 irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
322 synchronize_irq(ab->irq_num[irq_idx]);
326 static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
331 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
332 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
334 for (j = 0; j < irq_grp->num_irq; j++) {
335 irq_idx = irq_grp->irqs[j];
336 synchronize_irq(ab->irq_num[irq_idx]);
341 static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
345 for (i = 0; i < ab->hw_params.ce_count; i++) {
346 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
348 ath11k_ahb_ce_irq_enable(ab, i);
352 static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
356 for (i = 0; i < ab->hw_params.ce_count; i++) {
357 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
359 ath11k_ahb_ce_irq_disable(ab, i);
363 static int ath11k_ahb_start(struct ath11k_base *ab)
365 ath11k_ahb_ce_irqs_enable(ab);
366 ath11k_ce_rx_post_buf(ab);
371 static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
375 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
376 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
378 if (!irq_grp->napi_enabled) {
379 dev_set_threaded(&irq_grp->napi_ndev, true);
380 napi_enable(&irq_grp->napi);
381 irq_grp->napi_enabled = true;
383 ath11k_ahb_ext_grp_enable(irq_grp);
387 static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
389 __ath11k_ahb_ext_irq_disable(ab);
390 ath11k_ahb_sync_ext_irqs(ab);
393 static void ath11k_ahb_stop(struct ath11k_base *ab)
395 if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
396 ath11k_ahb_ce_irqs_disable(ab);
397 ath11k_ahb_sync_ce_irqs(ab);
398 ath11k_ahb_kill_tasklets(ab);
399 del_timer_sync(&ab->rx_replenish_retry);
400 ath11k_ce_cleanup_pipes(ab);
403 static int ath11k_ahb_power_up(struct ath11k_base *ab)
405 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
408 ret = rproc_boot(ab_ahb->tgt_rproc);
410 ath11k_err(ab, "failed to boot the remote processor Q6\n");
415 static void ath11k_ahb_power_down(struct ath11k_base *ab)
417 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
419 rproc_shutdown(ab_ahb->tgt_rproc);
422 static int ath11k_ahb_fwreset_from_cold_boot(struct ath11k_base *ab)
426 if (ath11k_cold_boot_cal == 0 || ab->qmi.cal_done ||
427 ab->hw_params.cold_boot_calib == 0 ||
428 ab->hw_params.cbcal_restart_fw == 0)
431 ath11k_dbg(ab, ATH11K_DBG_AHB, "wait for cold boot done\n");
432 timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
433 (ab->qmi.cal_done == 1),
434 ATH11K_COLD_BOOT_FW_RESET_DELAY);
436 ath11k_cold_boot_cal = 0;
437 ath11k_warn(ab, "Coldboot Calibration failed timed out\n");
440 /* reset the firmware */
441 ath11k_ahb_power_down(ab);
442 ath11k_ahb_power_up(ab);
444 ath11k_dbg(ab, ATH11K_DBG_AHB, "exited from cold boot mode\n");
448 static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
450 struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
452 cfg->tgt_ce_len = ab->hw_params.target_ce_count;
453 cfg->tgt_ce = ab->hw_params.target_ce_config;
454 cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
455 cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
456 ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
459 static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
463 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
464 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
466 for (j = 0; j < irq_grp->num_irq; j++)
467 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
469 netif_napi_del(&irq_grp->napi);
473 static void ath11k_ahb_free_irq(struct ath11k_base *ab)
478 if (ab->hw_params.hybrid_bus_type)
479 return ath11k_pcic_free_irq(ab);
481 for (i = 0; i < ab->hw_params.ce_count; i++) {
482 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
484 irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
485 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
488 ath11k_ahb_free_ext_irq(ab);
491 static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
493 struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
495 ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
497 ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
500 static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
502 struct ath11k_ce_pipe *ce_pipe = arg;
504 /* last interrupt received for this CE */
505 ce_pipe->timestamp = jiffies;
507 ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
509 tasklet_schedule(&ce_pipe->intr_tq);
514 static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
516 struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
517 struct ath11k_ext_irq_grp,
519 struct ath11k_base *ab = irq_grp->ab;
522 work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
523 if (work_done < budget) {
524 napi_complete_done(napi, work_done);
525 ath11k_ahb_ext_grp_enable(irq_grp);
528 if (work_done > budget)
534 static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
536 struct ath11k_ext_irq_grp *irq_grp = arg;
538 /* last interrupt received for this group */
539 irq_grp->timestamp = jiffies;
541 ath11k_ahb_ext_grp_disable(irq_grp);
543 napi_schedule(&irq_grp->napi);
548 static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
550 struct ath11k_hw_params *hw = &ab->hw_params;
555 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
556 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
561 init_dummy_netdev(&irq_grp->napi_ndev);
562 netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
563 ath11k_ahb_ext_grp_napi_poll);
565 for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
566 if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
567 irq_grp->irqs[num_irq++] =
568 wbm2host_tx_completions_ring1 - j;
571 if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
572 irq_grp->irqs[num_irq++] =
573 reo2host_destination_ring1 - j;
576 if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
577 irq_grp->irqs[num_irq++] = reo2host_exception;
579 if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
580 irq_grp->irqs[num_irq++] = wbm2host_rx_release;
582 if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
583 irq_grp->irqs[num_irq++] = reo2host_status;
585 if (j < ab->hw_params.max_radios) {
586 if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
587 irq_grp->irqs[num_irq++] =
588 rxdma2host_destination_ring_mac1 -
589 ath11k_hw_get_mac_from_pdev_id(hw, j);
592 if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
593 irq_grp->irqs[num_irq++] =
594 host2rxdma_host_buf_ring_mac1 -
595 ath11k_hw_get_mac_from_pdev_id(hw, j);
598 if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
599 irq_grp->irqs[num_irq++] =
600 ppdu_end_interrupts_mac1 -
601 ath11k_hw_get_mac_from_pdev_id(hw, j);
602 irq_grp->irqs[num_irq++] =
603 rxdma2host_monitor_status_ring_mac1 -
604 ath11k_hw_get_mac_from_pdev_id(hw, j);
608 irq_grp->num_irq = num_irq;
610 for (j = 0; j < irq_grp->num_irq; j++) {
611 int irq_idx = irq_grp->irqs[j];
613 irq = platform_get_irq_byname(ab->pdev,
615 ab->irq_num[irq_idx] = irq;
616 irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
617 ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
619 irq_name[irq_idx], irq_grp);
621 ath11k_err(ab, "failed request_irq for %d\n",
630 static int ath11k_ahb_config_irq(struct ath11k_base *ab)
635 if (ab->hw_params.hybrid_bus_type)
636 return ath11k_pcic_config_irq(ab);
638 /* Configure CE irqs */
639 for (i = 0; i < ab->hw_params.ce_count; i++) {
640 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
642 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
645 irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
647 tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
648 irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
649 ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
650 IRQF_TRIGGER_RISING, irq_name[irq_idx],
655 ab->irq_num[irq_idx] = irq;
658 /* Configure external interrupts */
659 ret = ath11k_ahb_config_ext_irq(ab);
664 static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
665 u8 *ul_pipe, u8 *dl_pipe)
667 const struct service_to_pipe *entry;
668 bool ul_set = false, dl_set = false;
671 for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
672 entry = &ab->hw_params.svc_to_ce_map[i];
674 if (__le32_to_cpu(entry->service_id) != service_id)
677 switch (__le32_to_cpu(entry->pipedir)) {
682 *dl_pipe = __le32_to_cpu(entry->pipenum);
687 *ul_pipe = __le32_to_cpu(entry->pipenum);
693 *dl_pipe = __le32_to_cpu(entry->pipenum);
694 *ul_pipe = __le32_to_cpu(entry->pipenum);
701 if (WARN_ON(!ul_set || !dl_set))
707 static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
709 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
714 if (!device_may_wakeup(ab->dev))
717 wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
719 ret = enable_irq_wake(wake_irq);
721 ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
725 value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
726 ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
727 value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
728 ATH11K_AHB_SMP2P_SMEM_MSG);
730 ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
731 ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
733 ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
737 ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n");
742 static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
744 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
749 if (!device_may_wakeup(ab->dev))
752 wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
754 ret = disable_irq_wake(wake_irq);
756 ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
760 reinit_completion(&ab->wow.wakeup_completed);
762 value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
763 ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
764 value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
765 ATH11K_AHB_SMP2P_SMEM_MSG);
767 ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
768 ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
770 ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
774 ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
776 ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
780 ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n");
785 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
786 .start = ath11k_ahb_start,
787 .stop = ath11k_ahb_stop,
788 .read32 = ath11k_ahb_read32,
789 .write32 = ath11k_ahb_write32,
791 .irq_enable = ath11k_ahb_ext_irq_enable,
792 .irq_disable = ath11k_ahb_ext_irq_disable,
793 .map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
794 .power_down = ath11k_ahb_power_down,
795 .power_up = ath11k_ahb_power_up,
798 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
799 .start = ath11k_pcic_start,
800 .stop = ath11k_pcic_stop,
801 .read32 = ath11k_pcic_read32,
802 .write32 = ath11k_pcic_write32,
804 .irq_enable = ath11k_pcic_ext_irq_enable,
805 .irq_disable = ath11k_pcic_ext_irq_disable,
806 .get_msi_address = ath11k_pcic_get_msi_address,
807 .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
808 .map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
809 .power_down = ath11k_ahb_power_down,
810 .power_up = ath11k_ahb_power_up,
811 .suspend = ath11k_ahb_hif_suspend,
812 .resume = ath11k_ahb_hif_resume,
813 .ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
814 .ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
817 static int ath11k_core_get_rproc(struct ath11k_base *ab)
819 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
820 struct device *dev = ab->dev;
821 struct rproc *prproc;
822 phandle rproc_phandle;
824 if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
825 ath11k_err(ab, "failed to get q6_rproc handle\n");
829 prproc = rproc_get_by_phandle(rproc_phandle);
831 ath11k_err(ab, "failed to get rproc\n");
834 ab_ahb->tgt_rproc = prproc;
839 static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
841 struct platform_device *pdev = ab->pdev;
842 phys_addr_t msi_addr_pa;
843 dma_addr_t msi_addr_iova;
844 struct resource *res;
849 ret = ath11k_pcic_init_msi_config(ab);
851 ath11k_err(ab, "failed to init msi config: %d\n", ret);
855 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
857 ath11k_err(ab, "failed to fetch msi_addr\n");
861 msi_addr_pa = res->start;
862 msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
864 if (dma_mapping_error(ab->dev, msi_addr_iova))
867 ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
868 ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
870 ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
874 ab->pci.msi.ep_base_data = int_prop + 32;
876 for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
877 ret = platform_get_irq(pdev, i);
881 ab->pci.msi.irqs[i] = ret;
884 set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
889 static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
891 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
893 if (!ab->hw_params.smp2p_wow_exit)
896 ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
897 &ab_ahb->smp2p_info.smem_bit);
898 if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
899 ath11k_err(ab, "failed to fetch smem state: %ld\n",
900 PTR_ERR(ab_ahb->smp2p_info.smem_state));
901 return PTR_ERR(ab_ahb->smp2p_info.smem_state);
907 static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
909 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
911 if (!ab->hw_params.smp2p_wow_exit)
914 qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
917 static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
919 struct platform_device *pdev = ab->pdev;
920 struct resource *mem_res;
923 if (ab->hw_params.hybrid_bus_type)
924 return ath11k_ahb_setup_msi_resources(ab);
926 mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
928 dev_err(&pdev->dev, "ioremap error\n");
933 ab->mem_len = resource_size(mem_res);
938 static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
940 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
941 struct device *dev = ab->dev;
942 struct device_node *node;
946 node = of_parse_phandle(dev->of_node, "memory-region", 0);
950 ret = of_address_to_resource(node, 0, &r);
953 dev_err(dev, "failed to resolve msa fixed region\n");
957 ab_ahb->fw.msa_paddr = r.start;
958 ab_ahb->fw.msa_size = resource_size(&r);
960 node = of_parse_phandle(dev->of_node, "memory-region", 1);
964 ret = of_address_to_resource(node, 0, &r);
967 dev_err(dev, "failed to resolve ce fixed region\n");
971 ab_ahb->fw.ce_paddr = r.start;
972 ab_ahb->fw.ce_size = resource_size(&r);
977 static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
979 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
980 struct device *host_dev = ab->dev;
981 struct platform_device_info info = {0};
982 struct iommu_domain *iommu_dom;
983 struct platform_device *pdev;
984 struct device_node *node;
987 /* Chipsets not requiring MSA need not initialize
988 * MSA resources, return success in such cases.
990 if (!ab->hw_params.fixed_fw_mem)
993 ret = ath11k_ahb_setup_msa_resources(ab);
995 ath11k_err(ab, "failed to setup msa resources\n");
999 node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
1001 ab_ahb->fw.use_tz = true;
1005 info.fwnode = &node->fwnode;
1006 info.parent = host_dev;
1007 info.name = node->name;
1008 info.dma_mask = DMA_BIT_MASK(32);
1010 pdev = platform_device_register_full(&info);
1013 return PTR_ERR(pdev);
1016 ret = of_dma_configure(&pdev->dev, node, true);
1018 ath11k_err(ab, "dma configure fail: %d\n", ret);
1019 goto err_unregister;
1022 ab_ahb->fw.dev = &pdev->dev;
1024 iommu_dom = iommu_domain_alloc(&platform_bus_type);
1026 ath11k_err(ab, "failed to allocate iommu domain\n");
1028 goto err_unregister;
1031 ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
1033 ath11k_err(ab, "could not attach device: %d\n", ret);
1034 goto err_iommu_free;
1037 ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
1038 ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
1039 IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1041 ath11k_err(ab, "failed to map firmware region: %d\n", ret);
1042 goto err_iommu_detach;
1045 ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
1046 ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
1047 IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
1049 ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
1050 goto err_iommu_unmap;
1053 ab_ahb->fw.use_tz = false;
1054 ab_ahb->fw.iommu_domain = iommu_dom;
1060 iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1063 iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
1066 iommu_domain_free(iommu_dom);
1069 platform_device_unregister(pdev);
1075 static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
1077 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
1078 struct iommu_domain *iommu;
1079 size_t unmapped_size;
1081 /* Chipsets not requiring MSA would have not initialized
1082 * MSA resources, return success in such cases.
1084 if (!ab->hw_params.fixed_fw_mem)
1087 if (ab_ahb->fw.use_tz)
1090 iommu = ab_ahb->fw.iommu_domain;
1092 unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
1093 if (unmapped_size != ab_ahb->fw.msa_size)
1094 ath11k_err(ab, "failed to unmap firmware: %zu\n",
1097 unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
1098 if (unmapped_size != ab_ahb->fw.ce_size)
1099 ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
1102 iommu_detach_device(iommu, ab_ahb->fw.dev);
1103 iommu_domain_free(iommu);
1105 platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
1110 static int ath11k_ahb_probe(struct platform_device *pdev)
1112 struct ath11k_base *ab;
1113 const struct of_device_id *of_id;
1114 const struct ath11k_hif_ops *hif_ops;
1115 const struct ath11k_pci_ops *pci_ops;
1116 enum ath11k_hw_rev hw_rev;
1119 of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev);
1121 dev_err(&pdev->dev, "failed to find matching device tree id\n");
1125 hw_rev = (enum ath11k_hw_rev)of_id->data;
1128 case ATH11K_HW_IPQ8074:
1129 case ATH11K_HW_IPQ6018_HW10:
1130 case ATH11K_HW_IPQ5018_HW10:
1131 hif_ops = &ath11k_ahb_hif_ops_ipq8074;
1134 case ATH11K_HW_WCN6750_HW10:
1135 hif_ops = &ath11k_ahb_hif_ops_wcn6750;
1136 pci_ops = &ath11k_ahb_pci_ops_wcn6750;
1139 dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
1143 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1145 dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
1149 ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
1152 dev_err(&pdev->dev, "failed to allocate ath11k base\n");
1156 ab->hif.ops = hif_ops;
1158 ab->hw_rev = hw_rev;
1159 ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
1160 platform_set_drvdata(pdev, ab);
1162 ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
1164 ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
1168 ret = ath11k_core_pre_init(ab);
1172 ret = ath11k_ahb_setup_resources(ab);
1176 ab->mem_ce = ab->mem;
1178 if (ab->hw_params.ce_remap) {
1179 const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
1180 /* ce register space is moved out of wcss unlike ipq8074 or ipq6018
1181 * and the space is not contiguous, hence remapping the CE registers
1182 * to a new space for accessing them.
1184 ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
1186 dev_err(&pdev->dev, "ce ioremap error\n");
1192 ret = ath11k_ahb_fw_resources_init(ab);
1196 ret = ath11k_ahb_setup_smp2p_handle(ab);
1200 ret = ath11k_hal_srng_init(ab);
1202 goto err_release_smp2p_handle;
1204 ret = ath11k_ce_alloc_pipes(ab);
1206 ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1207 goto err_hal_srng_deinit;
1210 ath11k_ahb_init_qmi_ce_config(ab);
1212 ret = ath11k_core_get_rproc(ab);
1214 ath11k_err(ab, "failed to get rproc: %d\n", ret);
1218 ret = ath11k_core_init(ab);
1220 ath11k_err(ab, "failed to init core: %d\n", ret);
1224 ret = ath11k_ahb_config_irq(ab);
1226 ath11k_err(ab, "failed to configure irq: %d\n", ret);
1230 ath11k_ahb_fwreset_from_cold_boot(ab);
1235 ath11k_ce_free_pipes(ab);
1237 err_hal_srng_deinit:
1238 ath11k_hal_srng_deinit(ab);
1240 err_release_smp2p_handle:
1241 ath11k_ahb_release_smp2p_handle(ab);
1244 ath11k_ahb_fw_resource_deinit(ab);
1247 ath11k_core_free(ab);
1248 platform_set_drvdata(pdev, NULL);
1253 static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
1257 if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
1258 left = wait_for_completion_timeout(&ab->driver_recovery,
1259 ATH11K_AHB_RECOVERY_TIMEOUT);
1261 ath11k_warn(ab, "failed to receive recovery response completion\n");
1264 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
1265 cancel_work_sync(&ab->restart_work);
1266 cancel_work_sync(&ab->qmi.event_work);
1269 static void ath11k_ahb_free_resources(struct ath11k_base *ab)
1271 struct platform_device *pdev = ab->pdev;
1273 ath11k_ahb_free_irq(ab);
1274 ath11k_hal_srng_deinit(ab);
1275 ath11k_ahb_release_smp2p_handle(ab);
1276 ath11k_ahb_fw_resource_deinit(ab);
1277 ath11k_ce_free_pipes(ab);
1279 if (ab->hw_params.ce_remap)
1280 iounmap(ab->mem_ce);
1282 ath11k_core_free(ab);
1283 platform_set_drvdata(pdev, NULL);
1286 static int ath11k_ahb_remove(struct platform_device *pdev)
1288 struct ath11k_base *ab = platform_get_drvdata(pdev);
1290 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1291 ath11k_ahb_power_down(ab);
1292 ath11k_debugfs_soc_destroy(ab);
1293 ath11k_qmi_deinit_service(ab);
1297 ath11k_ahb_remove_prepare(ab);
1298 ath11k_core_deinit(ab);
1301 ath11k_ahb_free_resources(ab);
1306 static void ath11k_ahb_shutdown(struct platform_device *pdev)
1308 struct ath11k_base *ab = platform_get_drvdata(pdev);
1310 /* platform shutdown() & remove() are mutually exclusive.
1311 * remove() is invoked during rmmod & shutdown() during
1312 * system reboot/shutdown.
1314 ath11k_ahb_remove_prepare(ab);
1316 if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
1317 goto free_resources;
1319 ath11k_core_deinit(ab);
1322 ath11k_ahb_free_resources(ab);
1325 static struct platform_driver ath11k_ahb_driver = {
1328 .of_match_table = ath11k_ahb_of_match,
1330 .probe = ath11k_ahb_probe,
1331 .remove = ath11k_ahb_remove,
1332 .shutdown = ath11k_ahb_shutdown,
1335 static int ath11k_ahb_init(void)
1337 return platform_driver_register(&ath11k_ahb_driver);
1339 module_init(ath11k_ahb_init);
1341 static void ath11k_ahb_exit(void)
1343 platform_driver_unregister(&ath11k_ahb_driver);
1345 module_exit(ath11k_ahb_exit);
1347 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
1348 MODULE_LICENSE("Dual BSD/GPL");