1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
63 #include <linux/pci.h>
64 #include <linux/pci-aspm.h>
65 #include <linux/interrupt.h>
66 #include <linux/debugfs.h>
67 #include <linux/sched.h>
68 #include <linux/bitops.h>
69 #include <linux/gfp.h>
72 #include "iwl-trans.h"
75 #include "iwl-agn-hw.h"
78 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
80 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
81 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
82 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
83 ~APMG_PS_CTRL_MSK_PWR_SRC);
85 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
86 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
87 ~APMG_PS_CTRL_MSK_PWR_SRC);
91 #define PCI_CFG_RETRY_TIMEOUT 0x041
93 static void iwl_pcie_apm_config(struct iwl_trans *trans)
95 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
100 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
101 * Check if BIOS (or OS) enabled L1-ASPM on this device.
102 * If so (likely), disable L0S, so device moves directly L0->L1;
103 * costs negligible amount of power savings.
104 * If not (unlikely), enable L0S, so there is at least some
105 * power savings, even without L1.
107 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
108 if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
109 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
111 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
112 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
114 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
115 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
116 dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
117 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
118 trans->ltr_enabled ? "En" : "Dis");
122 * Start up NIC's basic functionality after it has been reset
123 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
124 * NOTE: This does not load uCode nor start the embedded processor
126 static int iwl_pcie_apm_init(struct iwl_trans *trans)
129 IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
132 * Use "set_bit" below rather than "write", to preserve any hardware
133 * bits already set by default after reset.
136 /* Disable L0S exit timer (platform NMI Work/Around) */
137 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
138 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
141 * Disable L0s without affecting L1;
142 * don't wait for ICH L0s (ICH bug W/A)
144 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
145 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
147 /* Set FH wait threshold to maximum (HW error during stress W/A) */
148 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
151 * Enable HAP INTA (interrupt from management bus) to
152 * wake device's PCI Express link L1a -> L0s
154 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
155 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
157 iwl_pcie_apm_config(trans);
159 /* Configure analog phase-lock-loop before activating to D0A */
160 if (trans->cfg->base_params->pll_cfg_val)
161 iwl_set_bit(trans, CSR_ANA_PLL_CFG,
162 trans->cfg->base_params->pll_cfg_val);
165 * Set "initialization complete" bit to move adapter from
166 * D0U* --> D0A* (powered-up active) state.
168 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
171 * Wait for clock stabilization; once stabilized, access to
172 * device-internal resources is supported, e.g. iwl_write_prph()
173 * and accesses to uCode SRAM.
175 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
176 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
177 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
179 IWL_DEBUG_INFO(trans, "Failed to init the card\n");
183 if (trans->cfg->host_interrupt_operation_mode) {
185 * This is a bit of an abuse - This is needed for 7260 / 3160
186 * only check host_interrupt_operation_mode even if this is
187 * not related to host_interrupt_operation_mode.
189 * Enable the oscillator to count wake up time for L1 exit. This
190 * consumes slightly more power (100uA) - but allows to be sure
191 * that we wake up from L1 on time.
193 * This looks weird: read twice the same register, discard the
194 * value, set a bit, and yet again, read that same register
195 * just to discard the value. But that's the way the hardware
198 iwl_read_prph(trans, OSC_CLK);
199 iwl_read_prph(trans, OSC_CLK);
200 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
201 iwl_read_prph(trans, OSC_CLK);
202 iwl_read_prph(trans, OSC_CLK);
206 * Enable DMA clock and wait for it to stabilize.
208 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
209 * do not disable clocks. This preserves any hardware bits already
210 * set by default in "CLK_CTRL_REG" after reset.
212 iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
215 /* Disable L1-Active */
216 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
217 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
219 /* Clear the interrupt in APMG if the NIC is in RFKILL */
220 iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
222 set_bit(STATUS_DEVICE_ENABLED, &trans->status);
228 static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
232 /* stop device's busmaster DMA activity */
233 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
235 ret = iwl_poll_bit(trans, CSR_RESET,
236 CSR_RESET_REG_FLAG_MASTER_DISABLED,
237 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
239 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
241 IWL_DEBUG_INFO(trans, "stop master\n");
246 static void iwl_pcie_apm_stop(struct iwl_trans *trans)
248 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
250 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
252 /* Stop device's DMA activity */
253 iwl_pcie_apm_stop_master(trans);
255 /* Reset the entire device */
256 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
261 * Clear "initialization complete" bit to move adapter from
262 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
264 iwl_clear_bit(trans, CSR_GP_CNTRL,
265 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
268 static int iwl_pcie_nic_init(struct iwl_trans *trans)
270 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
273 spin_lock(&trans_pcie->irq_lock);
274 iwl_pcie_apm_init(trans);
276 spin_unlock(&trans_pcie->irq_lock);
278 iwl_pcie_set_pwr(trans, false);
280 iwl_op_mode_nic_config(trans->op_mode);
282 /* Allocate the RX queue, or reset if it is already allocated */
283 iwl_pcie_rx_init(trans);
285 /* Allocate or reset and init all Tx and Command queues */
286 if (iwl_pcie_tx_init(trans))
289 if (trans->cfg->base_params->shadow_reg_enable) {
290 /* enable shadow regs in HW */
291 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
292 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
298 #define HW_READY_TIMEOUT (50)
300 /* Note: returns poll_bit return value, which is >= 0 if success */
301 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
305 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
306 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
308 /* See if we got it */
309 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
310 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
311 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
314 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
318 /* Note: returns standard 0/-ERROR code */
319 static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
325 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
327 ret = iwl_pcie_set_hw_ready(trans);
328 /* If the card is ready, exit 0 */
332 for (iter = 0; iter < 10; iter++) {
333 /* If HW is not ready, prepare the conditions to check again */
334 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
335 CSR_HW_IF_CONFIG_REG_PREPARE);
338 ret = iwl_pcie_set_hw_ready(trans);
342 usleep_range(200, 1000);
344 } while (t < 150000);
348 IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
356 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
357 dma_addr_t phy_addr, u32 byte_cnt)
359 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
362 trans_pcie->ucode_write_complete = false;
364 iwl_write_direct32(trans,
365 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
366 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
368 iwl_write_direct32(trans,
369 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
372 iwl_write_direct32(trans,
373 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
374 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
376 iwl_write_direct32(trans,
377 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
378 (iwl_get_dma_hi_addr(phy_addr)
379 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
381 iwl_write_direct32(trans,
382 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
383 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
384 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
385 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
387 iwl_write_direct32(trans,
388 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
389 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
390 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
391 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
393 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
394 trans_pcie->ucode_write_complete, 5 * HZ);
396 IWL_ERR(trans, "Failed to load firmware chunk!\n");
403 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
404 const struct fw_desc *section)
408 u32 offset, chunk_sz = section->len;
411 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
414 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
415 GFP_KERNEL | __GFP_NOWARN);
417 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
418 chunk_sz = PAGE_SIZE;
419 v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
420 &p_addr, GFP_KERNEL);
425 for (offset = 0; offset < section->len; offset += chunk_sz) {
428 copy_size = min_t(u32, chunk_sz, section->len - offset);
430 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
431 ret = iwl_pcie_load_firmware_chunk(trans,
432 section->offset + offset,
436 "Could not load the [%d] uCode section\n",
442 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
446 static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu)
454 address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR;
457 address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR;
460 /* set CPU to started */
461 iwl_trans_set_bits_mask(trans,
462 CSR_UCODE_LOAD_STATUS_ADDR,
463 CSR_CPU_STATUS_LOADING_STARTED << shift_param,
466 /* set last complete descriptor number */
467 iwl_trans_set_bits_mask(trans,
468 CSR_UCODE_LOAD_STATUS_ADDR,
469 CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED
473 /* set last loaded block */
474 iwl_trans_set_bits_mask(trans,
475 CSR_UCODE_LOAD_STATUS_ADDR,
476 CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK
480 /* image loading complete */
481 iwl_trans_set_bits_mask(trans,
482 CSR_UCODE_LOAD_STATUS_ADDR,
483 CSR_CPU_STATUS_LOADING_COMPLETED
487 /* set FH_TCSR_0_REG */
488 iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1);
490 /* verify image verification started */
491 ret = iwl_poll_bit(trans, address,
492 CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
493 CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
494 CSR_SECURE_TIME_OUT);
496 IWL_ERR(trans, "secure boot process didn't start\n");
500 /* wait for image verification to complete */
501 ret = iwl_poll_bit(trans, address,
502 CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
503 CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
504 CSR_SECURE_TIME_OUT);
507 IWL_ERR(trans, "Time out on secure boot process\n");
514 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
515 const struct fw_img *image)
520 "working with %s image\n",
521 image->is_secure ? "Secured" : "Non Secured");
523 "working with %s CPU\n",
524 image->is_dual_cpus ? "Dual" : "Single");
526 /* configure the ucode to be ready to get the secured image */
527 if (image->is_secure) {
528 /* set secure boot inspector addresses */
529 iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0);
530 iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0);
532 /* release CPU1 reset if secure inspector image burned in OTP */
533 iwl_write32(trans, CSR_RESET, 0);
536 /* load to FW the binary sections of CPU1 */
537 IWL_DEBUG_INFO(trans, "Loading CPU1\n");
539 i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
541 if (!image->sec[i].data)
543 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
548 /* configure the ucode to start secure process on CPU1 */
549 if (image->is_secure) {
550 /* config CPU1 to start secure protocol */
551 ret = iwl_pcie_secure_set(trans, 1);
555 /* Remove all resets to allow NIC to operate */
556 iwl_write32(trans, CSR_RESET, 0);
559 if (image->is_dual_cpus) {
560 /* load to FW the binary sections of CPU2 */
561 IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n");
562 for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
563 i < IWL_UCODE_SECTION_MAX; i++) {
564 if (!image->sec[i].data)
566 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
571 if (image->is_secure) {
572 /* set CPU2 for secure protocol */
573 ret = iwl_pcie_secure_set(trans, 2);
582 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
583 const struct fw_img *fw, bool run_in_rfkill)
588 /* This may fail if AMT took ownership of the device */
589 if (iwl_pcie_prepare_card_hw(trans)) {
590 IWL_WARN(trans, "Exit HW not ready\n");
594 iwl_enable_rfkill_int(trans);
596 /* If platform's RF_KILL switch is NOT set to KILL */
597 hw_rfkill = iwl_is_rfkill_set(trans);
599 set_bit(STATUS_RFKILL, &trans->status);
601 clear_bit(STATUS_RFKILL, &trans->status);
602 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
603 if (hw_rfkill && !run_in_rfkill)
606 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
608 ret = iwl_pcie_nic_init(trans);
610 IWL_ERR(trans, "Unable to init nic\n");
614 /* make sure rfkill handshake bits are cleared */
615 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
616 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
617 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
619 /* clear (again), then enable host interrupts */
620 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
621 iwl_enable_interrupts(trans);
623 /* really make sure rfkill handshake bits are cleared */
624 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
625 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
627 /* Load the given image to the HW */
628 return iwl_pcie_load_given_ucode(trans, fw);
631 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
633 iwl_pcie_reset_ict(trans);
634 iwl_pcie_tx_start(trans, scd_addr);
637 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
639 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
640 bool hw_rfkill, was_hw_rfkill;
642 was_hw_rfkill = iwl_is_rfkill_set(trans);
644 /* tell the device to stop sending interrupts */
645 spin_lock(&trans_pcie->irq_lock);
646 iwl_disable_interrupts(trans);
647 spin_unlock(&trans_pcie->irq_lock);
649 /* device going down, Stop using ICT table */
650 iwl_pcie_disable_ict(trans);
653 * If a HW restart happens during firmware loading,
654 * then the firmware loading might call this function
655 * and later it might be called again due to the
656 * restart. So don't process again if the device is
659 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
660 iwl_pcie_tx_stop(trans);
661 iwl_pcie_rx_stop(trans);
663 /* Power-down device's busmaster DMA clocks */
664 iwl_write_prph(trans, APMG_CLK_DIS_REG,
665 APMG_CLK_VAL_DMA_CLK_RQT);
669 /* Make sure (redundant) we've released our request to stay awake */
670 iwl_clear_bit(trans, CSR_GP_CNTRL,
671 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
673 /* Stop the device, and put it in low power state */
674 iwl_pcie_apm_stop(trans);
676 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
677 * Clean again the interrupt here
679 spin_lock(&trans_pcie->irq_lock);
680 iwl_disable_interrupts(trans);
681 spin_unlock(&trans_pcie->irq_lock);
683 /* stop and reset the on-board processor */
684 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
686 /* clear all status bits */
687 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
688 clear_bit(STATUS_INT_ENABLED, &trans->status);
689 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
690 clear_bit(STATUS_TPOWER_PMI, &trans->status);
691 clear_bit(STATUS_RFKILL, &trans->status);
694 * Even if we stop the HW, we still want the RF kill
697 iwl_enable_rfkill_int(trans);
700 * Check again since the RF kill state may have changed while
701 * all the interrupts were disabled, in this case we couldn't
702 * receive the RF kill interrupt and update the state in the
704 * Don't call the op_mode if the rkfill state hasn't changed.
705 * This allows the op_mode to call stop_device from the rfkill
706 * notification without endless recursion. Under very rare
707 * circumstances, we might have a small recursion if the rfkill
708 * state changed exactly now while we were called from stop_device.
709 * This is very unlikely but can happen and is supported.
711 hw_rfkill = iwl_is_rfkill_set(trans);
713 set_bit(STATUS_RFKILL, &trans->status);
715 clear_bit(STATUS_RFKILL, &trans->status);
716 if (hw_rfkill != was_hw_rfkill)
717 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
720 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
722 iwl_disable_interrupts(trans);
725 * in testing mode, the host stays awake and the
726 * hardware won't be reset (not even partially)
731 iwl_pcie_disable_ict(trans);
733 iwl_clear_bit(trans, CSR_GP_CNTRL,
734 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
735 iwl_clear_bit(trans, CSR_GP_CNTRL,
736 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
739 * reset TX queues -- some of their registers reset during S3
740 * so if we don't reset everything here the D3 image would try
741 * to execute some invalid memory upon resume
743 iwl_trans_pcie_tx_reset(trans);
745 iwl_pcie_set_pwr(trans, true);
748 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
749 enum iwl_d3_status *status,
756 iwl_enable_interrupts(trans);
757 *status = IWL_D3_STATUS_ALIVE;
761 iwl_pcie_set_pwr(trans, false);
763 val = iwl_read32(trans, CSR_RESET);
764 if (val & CSR_RESET_REG_FLAG_NEVO_RESET) {
765 *status = IWL_D3_STATUS_RESET;
770 * Also enables interrupts - none will happen as the device doesn't
771 * know we're waking it up, only when the opmode actually tells it
774 iwl_pcie_reset_ict(trans);
776 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
777 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
779 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
780 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
781 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
784 IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
788 iwl_trans_pcie_tx_reset(trans);
790 ret = iwl_pcie_rx_init(trans);
792 IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
796 *status = IWL_D3_STATUS_ALIVE;
800 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
805 err = iwl_pcie_prepare_card_hw(trans);
807 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
811 /* Reset the entire device */
812 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
814 usleep_range(10, 15);
816 iwl_pcie_apm_init(trans);
818 /* From now on, the op_mode will be kept updated about RF kill state */
819 iwl_enable_rfkill_int(trans);
821 hw_rfkill = iwl_is_rfkill_set(trans);
823 set_bit(STATUS_RFKILL, &trans->status);
825 clear_bit(STATUS_RFKILL, &trans->status);
826 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
831 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
833 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
835 /* disable interrupts - don't enable HW RF kill interrupt */
836 spin_lock(&trans_pcie->irq_lock);
837 iwl_disable_interrupts(trans);
838 spin_unlock(&trans_pcie->irq_lock);
840 iwl_pcie_apm_stop(trans);
842 spin_lock(&trans_pcie->irq_lock);
843 iwl_disable_interrupts(trans);
844 spin_unlock(&trans_pcie->irq_lock);
846 iwl_pcie_disable_ict(trans);
849 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
851 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
854 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
856 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
859 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
861 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
864 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
866 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
867 ((reg & 0x000FFFFF) | (3 << 24)));
868 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
871 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
874 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
875 ((addr & 0x000FFFFF) | (3 << 24)));
876 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
879 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
880 const struct iwl_trans_config *trans_cfg)
882 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
884 trans_pcie->cmd_queue = trans_cfg->cmd_queue;
885 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
886 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
887 trans_pcie->n_no_reclaim_cmds = 0;
889 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
890 if (trans_pcie->n_no_reclaim_cmds)
891 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
892 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
894 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
895 if (trans_pcie->rx_buf_size_8k)
896 trans_pcie->rx_page_order = get_order(8 * 1024);
898 trans_pcie->rx_page_order = get_order(4 * 1024);
900 trans_pcie->wd_timeout =
901 msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
903 trans_pcie->command_names = trans_cfg->command_names;
904 trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
907 void iwl_trans_pcie_free(struct iwl_trans *trans)
909 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
911 synchronize_irq(trans_pcie->pci_dev->irq);
913 iwl_pcie_tx_free(trans);
914 iwl_pcie_rx_free(trans);
916 free_irq(trans_pcie->pci_dev->irq, trans);
917 iwl_pcie_free_ict(trans);
919 pci_disable_msi(trans_pcie->pci_dev);
920 iounmap(trans_pcie->hw_base);
921 pci_release_regions(trans_pcie->pci_dev);
922 pci_disable_device(trans_pcie->pci_dev);
923 kmem_cache_destroy(trans->dev_cmd_pool);
928 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
931 set_bit(STATUS_TPOWER_PMI, &trans->status);
933 clear_bit(STATUS_TPOWER_PMI, &trans->status);
936 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
937 unsigned long *flags)
940 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
942 spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
944 if (trans_pcie->cmd_in_flight)
947 /* this bit wakes up the NIC */
948 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
949 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
952 * These bits say the device is running, and should keep running for
953 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
954 * but they do not indicate that embedded SRAM is restored yet;
955 * 3945 and 4965 have volatile SRAM, and must save/restore contents
956 * to/from host DRAM when sleeping/waking for power-saving.
957 * Each direction takes approximately 1/4 millisecond; with this
958 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
959 * series of register accesses are expected (e.g. reading Event Log),
960 * to keep device from sleeping.
962 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
963 * SRAM is okay/restored. We don't check that here because this call
964 * is just for hardware register access; but GP1 MAC_SLEEP check is a
965 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
967 * 5000 series and later (including 1000 series) have non-volatile SRAM,
968 * and do not save/restore SRAM when power cycling.
970 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
971 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
972 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
973 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
974 if (unlikely(ret < 0)) {
975 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
977 u32 val = iwl_read32(trans, CSR_GP_CNTRL);
979 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
981 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
988 * Fool sparse by faking we release the lock - sparse will
989 * track nic_access anyway.
991 __release(&trans_pcie->reg_lock);
995 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
996 unsigned long *flags)
998 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1000 lockdep_assert_held(&trans_pcie->reg_lock);
1003 * Fool sparse by faking we acquiring the lock - sparse will
1004 * track nic_access anyway.
1006 __acquire(&trans_pcie->reg_lock);
1008 if (trans_pcie->cmd_in_flight)
1011 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1012 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1014 * Above we read the CSR_GP_CNTRL register, which will flush
1015 * any previous writes, but we need the write that clears the
1016 * MAC_ACCESS_REQ bit to be performed before any other writes
1017 * scheduled on different CPUs (after we drop reg_lock).
1021 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1024 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
1025 void *buf, int dwords)
1027 unsigned long flags;
1031 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
1032 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
1033 for (offs = 0; offs < dwords; offs++)
1034 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1035 iwl_trans_release_nic_access(trans, &flags);
1042 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
1043 const void *buf, int dwords)
1045 unsigned long flags;
1047 const u32 *vals = buf;
1049 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
1050 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
1051 for (offs = 0; offs < dwords; offs++)
1052 iwl_write32(trans, HBUS_TARG_MEM_WDAT,
1053 vals ? vals[offs] : 0);
1054 iwl_trans_release_nic_access(trans, &flags);
1061 #define IWL_FLUSH_WAIT_MS 2000
1063 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
1065 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1066 struct iwl_txq *txq;
1067 struct iwl_queue *q;
1069 unsigned long now = jiffies;
1074 /* waiting for all the tx frames complete might take a while */
1075 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1076 if (cnt == trans_pcie->cmd_queue)
1078 txq = &trans_pcie->txq[cnt];
1080 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1081 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
1084 if (q->read_ptr != q->write_ptr) {
1086 "fail to flush all tx fifo queues Q %d\n", cnt);
1095 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
1096 txq->q.read_ptr, txq->q.write_ptr);
1098 scd_sram_addr = trans_pcie->scd_base_addr +
1099 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
1100 iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
1102 iwl_print_hex_error(trans, buf, sizeof(buf));
1104 for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
1105 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
1106 iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
1108 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1109 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
1110 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1111 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1113 iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
1114 SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
1117 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
1119 tbl_dw = tbl_dw & 0x0000FFFF;
1122 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1123 cnt, active ? "" : "in", fifo, tbl_dw,
1124 iwl_read_prph(trans,
1125 SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1),
1126 iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
1132 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
1133 u32 mask, u32 value)
1135 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1136 unsigned long flags;
1138 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1139 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
1140 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1143 static const char *get_csr_string(int cmd)
1145 #define IWL_CMD(x) case x: return #x
1147 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1148 IWL_CMD(CSR_INT_COALESCING);
1150 IWL_CMD(CSR_INT_MASK);
1151 IWL_CMD(CSR_FH_INT_STATUS);
1152 IWL_CMD(CSR_GPIO_IN);
1154 IWL_CMD(CSR_GP_CNTRL);
1155 IWL_CMD(CSR_HW_REV);
1156 IWL_CMD(CSR_EEPROM_REG);
1157 IWL_CMD(CSR_EEPROM_GP);
1158 IWL_CMD(CSR_OTP_GP_REG);
1159 IWL_CMD(CSR_GIO_REG);
1160 IWL_CMD(CSR_GP_UCODE_REG);
1161 IWL_CMD(CSR_GP_DRIVER_REG);
1162 IWL_CMD(CSR_UCODE_DRV_GP1);
1163 IWL_CMD(CSR_UCODE_DRV_GP2);
1164 IWL_CMD(CSR_LED_REG);
1165 IWL_CMD(CSR_DRAM_INT_TBL_REG);
1166 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1167 IWL_CMD(CSR_ANA_PLL_CFG);
1168 IWL_CMD(CSR_HW_REV_WA_REG);
1169 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1176 void iwl_pcie_dump_csr(struct iwl_trans *trans)
1179 static const u32 csr_tbl[] = {
1180 CSR_HW_IF_CONFIG_REG,
1198 CSR_DRAM_INT_TBL_REG,
1199 CSR_GIO_CHICKEN_BITS,
1202 CSR_DBG_HPET_MEM_REG
1204 IWL_ERR(trans, "CSR values:\n");
1205 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1206 "CSR_INT_PERIODIC_REG)\n");
1207 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1208 IWL_ERR(trans, " %25s: 0X%08x\n",
1209 get_csr_string(csr_tbl[i]),
1210 iwl_read32(trans, csr_tbl[i]));
1214 #ifdef CONFIG_IWLWIFI_DEBUGFS
1215 /* create and remove of files */
1216 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1217 if (!debugfs_create_file(#name, mode, parent, trans, \
1218 &iwl_dbgfs_##name##_ops)) \
1222 /* file operation */
1223 #define DEBUGFS_READ_FILE_OPS(name) \
1224 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1225 .read = iwl_dbgfs_##name##_read, \
1226 .open = simple_open, \
1227 .llseek = generic_file_llseek, \
1230 #define DEBUGFS_WRITE_FILE_OPS(name) \
1231 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1232 .write = iwl_dbgfs_##name##_write, \
1233 .open = simple_open, \
1234 .llseek = generic_file_llseek, \
1237 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1238 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1239 .write = iwl_dbgfs_##name##_write, \
1240 .read = iwl_dbgfs_##name##_read, \
1241 .open = simple_open, \
1242 .llseek = generic_file_llseek, \
1245 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1246 char __user *user_buf,
1247 size_t count, loff_t *ppos)
1249 struct iwl_trans *trans = file->private_data;
1250 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1251 struct iwl_txq *txq;
1252 struct iwl_queue *q;
1259 bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
1261 if (!trans_pcie->txq)
1264 buf = kzalloc(bufsz, GFP_KERNEL);
1268 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1269 txq = &trans_pcie->txq[cnt];
1271 pos += scnprintf(buf + pos, bufsz - pos,
1272 "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1273 cnt, q->read_ptr, q->write_ptr,
1274 !!test_bit(cnt, trans_pcie->queue_used),
1275 !!test_bit(cnt, trans_pcie->queue_stopped));
1277 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1282 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1283 char __user *user_buf,
1284 size_t count, loff_t *ppos)
1286 struct iwl_trans *trans = file->private_data;
1287 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1288 struct iwl_rxq *rxq = &trans_pcie->rxq;
1291 const size_t bufsz = sizeof(buf);
1293 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1295 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1297 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1300 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1301 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1303 pos += scnprintf(buf + pos, bufsz - pos,
1304 "closed_rb_num: Not Allocated\n");
1306 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1309 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1310 char __user *user_buf,
1311 size_t count, loff_t *ppos)
1313 struct iwl_trans *trans = file->private_data;
1314 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1315 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1319 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1322 buf = kzalloc(bufsz, GFP_KERNEL);
1326 pos += scnprintf(buf + pos, bufsz - pos,
1327 "Interrupt Statistics Report:\n");
1329 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1331 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1333 if (isr_stats->sw || isr_stats->hw) {
1334 pos += scnprintf(buf + pos, bufsz - pos,
1335 "\tLast Restarting Code: 0x%X\n",
1336 isr_stats->err_code);
1338 #ifdef CONFIG_IWLWIFI_DEBUG
1339 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1341 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1344 pos += scnprintf(buf + pos, bufsz - pos,
1345 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1347 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1350 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1353 pos += scnprintf(buf + pos, bufsz - pos,
1354 "Rx command responses:\t\t %u\n", isr_stats->rx);
1356 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1359 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1360 isr_stats->unhandled);
1362 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1367 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1368 const char __user *user_buf,
1369 size_t count, loff_t *ppos)
1371 struct iwl_trans *trans = file->private_data;
1372 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1373 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1379 memset(buf, 0, sizeof(buf));
1380 buf_size = min(count, sizeof(buf) - 1);
1381 if (copy_from_user(buf, user_buf, buf_size))
1383 if (sscanf(buf, "%x", &reset_flag) != 1)
1385 if (reset_flag == 0)
1386 memset(isr_stats, 0, sizeof(*isr_stats));
1391 static ssize_t iwl_dbgfs_csr_write(struct file *file,
1392 const char __user *user_buf,
1393 size_t count, loff_t *ppos)
1395 struct iwl_trans *trans = file->private_data;
1400 memset(buf, 0, sizeof(buf));
1401 buf_size = min(count, sizeof(buf) - 1);
1402 if (copy_from_user(buf, user_buf, buf_size))
1404 if (sscanf(buf, "%d", &csr) != 1)
1407 iwl_pcie_dump_csr(trans);
1412 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1413 char __user *user_buf,
1414 size_t count, loff_t *ppos)
1416 struct iwl_trans *trans = file->private_data;
1419 ssize_t ret = -EFAULT;
1421 ret = pos = iwl_dump_fh(trans, &buf);
1423 ret = simple_read_from_buffer(user_buf,
1424 count, ppos, buf, pos);
1431 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
1432 DEBUGFS_READ_FILE_OPS(fh_reg);
1433 DEBUGFS_READ_FILE_OPS(rx_queue);
1434 DEBUGFS_READ_FILE_OPS(tx_queue);
1435 DEBUGFS_WRITE_FILE_OPS(csr);
1438 * Create the debugfs files and directories
1441 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1444 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1445 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1446 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
1447 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
1448 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
1452 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
1456 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1461 #endif /*CONFIG_IWLWIFI_DEBUGFS */
1463 static const struct iwl_trans_ops trans_ops_pcie = {
1464 .start_hw = iwl_trans_pcie_start_hw,
1465 .op_mode_leave = iwl_trans_pcie_op_mode_leave,
1466 .fw_alive = iwl_trans_pcie_fw_alive,
1467 .start_fw = iwl_trans_pcie_start_fw,
1468 .stop_device = iwl_trans_pcie_stop_device,
1470 .d3_suspend = iwl_trans_pcie_d3_suspend,
1471 .d3_resume = iwl_trans_pcie_d3_resume,
1473 .send_cmd = iwl_trans_pcie_send_hcmd,
1475 .tx = iwl_trans_pcie_tx,
1476 .reclaim = iwl_trans_pcie_reclaim,
1478 .txq_disable = iwl_trans_pcie_txq_disable,
1479 .txq_enable = iwl_trans_pcie_txq_enable,
1481 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
1483 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
1485 .write8 = iwl_trans_pcie_write8,
1486 .write32 = iwl_trans_pcie_write32,
1487 .read32 = iwl_trans_pcie_read32,
1488 .read_prph = iwl_trans_pcie_read_prph,
1489 .write_prph = iwl_trans_pcie_write_prph,
1490 .read_mem = iwl_trans_pcie_read_mem,
1491 .write_mem = iwl_trans_pcie_write_mem,
1492 .configure = iwl_trans_pcie_configure,
1493 .set_pmi = iwl_trans_pcie_set_pmi,
1494 .grab_nic_access = iwl_trans_pcie_grab_nic_access,
1495 .release_nic_access = iwl_trans_pcie_release_nic_access,
1496 .set_bits_mask = iwl_trans_pcie_set_bits_mask,
1499 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1500 const struct pci_device_id *ent,
1501 const struct iwl_cfg *cfg)
1503 struct iwl_trans_pcie *trans_pcie;
1504 struct iwl_trans *trans;
1508 trans = kzalloc(sizeof(struct iwl_trans) +
1509 sizeof(struct iwl_trans_pcie), GFP_KERNEL);
1515 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1517 trans->ops = &trans_ops_pcie;
1519 trans_lockdep_init(trans);
1520 trans_pcie->trans = trans;
1521 spin_lock_init(&trans_pcie->irq_lock);
1522 spin_lock_init(&trans_pcie->reg_lock);
1523 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1525 err = pci_enable_device(pdev);
1529 if (!cfg->base_params->pcie_l1_allowed) {
1531 * W/A - seems to solve weird behavior. We need to remove this
1532 * if we don't want to stay in L1 all the time. This wastes a
1535 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
1536 PCIE_LINK_STATE_L1 |
1537 PCIE_LINK_STATE_CLKPM);
1540 pci_set_master(pdev);
1542 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
1544 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
1546 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1548 err = pci_set_consistent_dma_mask(pdev,
1550 /* both attempts failed: */
1552 dev_err(&pdev->dev, "No suitable DMA available\n");
1553 goto out_pci_disable_device;
1557 err = pci_request_regions(pdev, DRV_NAME);
1559 dev_err(&pdev->dev, "pci_request_regions failed\n");
1560 goto out_pci_disable_device;
1563 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
1564 if (!trans_pcie->hw_base) {
1565 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
1567 goto out_pci_release_regions;
1570 /* We disable the RETRY_TIMEOUT register (0x41) to keep
1571 * PCI Tx retries from interfering with C3 CPU state */
1572 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
1574 trans->dev = &pdev->dev;
1575 trans_pcie->pci_dev = pdev;
1576 iwl_disable_interrupts(trans);
1578 err = pci_enable_msi(pdev);
1580 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
1581 /* enable rfkill interrupt: hw bug w/a */
1582 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1583 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1584 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1585 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1589 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
1590 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
1591 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
1592 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
1594 /* Initialize the wait queue for commands */
1595 init_waitqueue_head(&trans_pcie->wait_command_queue);
1597 snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
1598 "iwl_cmd_pool:%s", dev_name(trans->dev));
1600 trans->dev_cmd_headroom = 0;
1601 trans->dev_cmd_pool =
1602 kmem_cache_create(trans->dev_cmd_pool_name,
1603 sizeof(struct iwl_device_cmd)
1604 + trans->dev_cmd_headroom,
1609 if (!trans->dev_cmd_pool) {
1611 goto out_pci_disable_msi;
1614 if (iwl_pcie_alloc_ict(trans))
1615 goto out_free_cmd_pool;
1617 err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
1618 iwl_pcie_irq_handler,
1619 IRQF_SHARED, DRV_NAME, trans);
1621 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
1625 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1630 iwl_pcie_free_ict(trans);
1632 kmem_cache_destroy(trans->dev_cmd_pool);
1633 out_pci_disable_msi:
1634 pci_disable_msi(pdev);
1635 out_pci_release_regions:
1636 pci_release_regions(pdev);
1637 out_pci_disable_device:
1638 pci_disable_device(pdev);
1642 return ERR_PTR(err);