} __packed; /* PERIPH_SCRATCH_CONTROL_S */
/*
- * struct iwl_prph_scratch_pnvm_cfg - ror config
+ * struct iwl_prph_scratch_pnvm_cfg - PNVM scratch
* @pnvm_base_addr: PNVM start address
- * @pnvm_size: PNVM size in DWs
+ * @pnvm_size: the size of the PNVM image in bytes
* @reserved: reserved
*/
struct iwl_prph_scratch_pnvm_cfg {
/*
* struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table
* @base_addr: reduce power table address
- * @size: table size in dwords
+ * @size: the size of the entire power table image
*/
struct iwl_prph_scratch_uefi_cfg {
__le64 base_addr;
const struct iwl_ucode_capabilities *capa);
void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa);
-int iwl_trans_pcie_ctx_info_gen3_load_reduce_power
- (struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads);
-void iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans);
+int
+iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa);
+void
+iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
int iwl_trans_pcie_ctx_info_gen3_set_step(struct iwl_trans *trans,
u32 mbx_addr_0_step, u32 mbx_addr_1_step);
#endif /* __iwl_context_info_file_gen3_h__ */
void (*set_pnvm)(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa);
int (*load_reduce_power)(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads);
- void (*set_reduce_power)(struct iwl_trans *trans);
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa);
+ void (*set_reduce_power)(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
void (*interrupts)(struct iwl_trans *trans, bool enable);
int (*imr_dma_data)(struct iwl_trans *trans,
int size;
};
+/**
+ * @drams: array of several DRAM areas that contains the pnvm and power
+ * reduction table payloads.
+ * @n_regions: number of DRAM regions that were allocated
+ * @prph_scratch_mem_desc: points to a structure allocated in dram,
+ * designed to show FW where all the payloads are.
+ */
+struct iwl_dram_regions {
+ struct iwl_dram_data drams[IPC_DRAM_MAP_ENTRY_NUM_MAX];
+ struct iwl_dram_data prph_scratch_mem_desc;
+ u8 n_regions;
+};
+
/**
* struct iwl_fw_mon - fw monitor per allocation id
* @num_frags: number of fragments
static inline int iwl_trans_load_reduce_power
(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads)
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa)
{
- return trans->ops->load_reduce_power(trans, payloads);
+ return trans->ops->load_reduce_power(trans, payloads, capa);
}
-static inline void iwl_trans_set_reduce_power(struct iwl_trans *trans)
+static inline void
+iwl_trans_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
{
if (trans->ops->set_reduce_power)
- trans->ops->set_reduce_power(trans);
+ trans->ops->set_reduce_power(trans, capa);
}
static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
static int iwl_pcie_load_payloads_segments
(struct iwl_trans *trans,
+ struct iwl_dram_regions *dram_regions,
const struct iwl_pnvm_image *pnvm_data)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_dram_data *cur_pnvm_dram = &trans_pcie->pnvm_dram[0],
- *desc_dram = &trans_pcie->pnvm_regions_desc_array;
+ struct iwl_dram_data *cur_payload_dram = &dram_regions->drams[0];
+ struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
struct iwl_prph_scrath_mem_desc_addr_array *addresses;
const void *data;
u32 len;
memset(desc_dram->block, 0, len);
/* allocate DRAM region for each payload */
- trans_pcie->n_pnvm_regions = 0;
+ dram_regions->n_regions = 0;
for (i = 0; i < pnvm_data->n_chunks; i++) {
len = pnvm_data->chunks[i].len;
data = pnvm_data->chunks[i].data;
- if (iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
- cur_pnvm_dram)) {
- iwl_trans_pcie_free_pnvm_dram(trans_pcie, trans->dev);
+ if (iwl_pcie_ctxt_info_alloc_dma(trans,
+ data,
+ len,
+ cur_payload_dram)) {
+ iwl_trans_pcie_free_pnvm_dram_regions(dram_regions,
+ trans->dev);
return -ENOMEM;
}
- trans_pcie->n_pnvm_regions++;
- cur_pnvm_dram++;
+ dram_regions->n_regions++;
+ cur_payload_dram++;
}
/* fill desc with the DRAM payloads addresses */
addresses = desc_dram->block;
-
for (i = 0; i < pnvm_data->n_chunks; i++) {
addresses->mem_descs[i] =
- cpu_to_le64(trans_pcie->pnvm_dram[i].physical);
+ cpu_to_le64(dram_regions->drams[i].physical);
}
- trans->pnvm_loaded = true;
return 0;
}
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg;
- struct iwl_dram_data *dram = &trans_pcie->pnvm_dram[0];
+ struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
int ret = 0;
/* only allocate the DRAM if not allocated yet */
return -EINVAL;
}
- /* allocate several DRAM sections */
- if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
- return iwl_pcie_load_payloads_segments(trans, pnvm_payloads);
-
- /* allocate one DRAM section */
- ret = iwl_pcie_load_payloads_continuously(trans, pnvm_payloads, dram);
- if (!ret) {
- trans_pcie->n_pnvm_regions = 1;
- trans->pnvm_loaded = true;
+ /* save payloads in several DRAM sections */
+ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
+ ret = iwl_pcie_load_payloads_segments(trans,
+ dram_regions,
+ pnvm_payloads);
+ if (!ret)
+ trans->pnvm_loaded = true;
+ } else {
+ /* save only in one DRAM section */
+ ret = iwl_pcie_load_payloads_continuously
+ (trans,
+ pnvm_payloads,
+ &dram_regions->drams[0]);
+ if (!ret) {
+ dram_regions->n_regions = 1;
+ trans->pnvm_loaded = true;
+ }
}
return ret;
}
+static inline size_t
+iwl_dram_regions_size(const struct iwl_dram_regions *dram_regions)
+{
+ size_t total_size = 0;
+ int i;
+
+ for (i = 0; i < dram_regions->n_regions; i++)
+ total_size += dram_regions->drams[i].size;
+
+ return total_size;
+}
+
static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg;
+ struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
- cpu_to_le64(trans_pcie->pnvm_regions_desc_array.physical);
+ cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
+ prph_sc_ctrl->pnvm_cfg.pnvm_size =
+ cpu_to_le32(iwl_dram_regions_size(dram_regions));
}
static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)
&trans_pcie->prph_scratch->ctrl_cfg;
prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
- cpu_to_le64(trans_pcie->pnvm_dram[0].physical);
+ cpu_to_le64(trans_pcie->pnvm_data.drams[0].physical);
prph_sc_ctrl->pnvm_cfg.pnvm_size =
- cpu_to_le32(trans_pcie->pnvm_dram[0].size);
+ cpu_to_le32(trans_pcie->pnvm_data.drams[0].size);
}
void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
}
int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads)
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg;
- struct iwl_dram_data *dram = &trans_pcie->reduce_power_dram;
+ struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
+ int ret = 0;
+
+ /* only allocate the DRAM if not allocated yet */
+ if (trans->reduce_power_loaded)
+ return 0;
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return 0;
if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
return -EBUSY;
- /* only allocate the DRAM if not allocated yet */
- if (!trans->reduce_power_loaded)
- return iwl_pcie_load_payloads_continuously(trans,
- payloads,
- dram);
- return 0;
+ if (!payloads->n_chunks) {
+ IWL_DEBUG_FW(trans, "no payloads\n");
+ return -EINVAL;
+ }
+
+ /* save payloads in several DRAM sections */
+ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
+ ret = iwl_pcie_load_payloads_segments(trans,
+ dram_regions,
+ payloads);
+ if (!ret)
+ trans->reduce_power_loaded = true;
+ } else {
+ /* save only in one DRAM section */
+ ret = iwl_pcie_load_payloads_continuously
+ (trans,
+ payloads,
+ &dram_regions->drams[0]);
+ if (!ret) {
+ dram_regions->n_regions = 1;
+ trans->reduce_power_loaded = true;
+ }
+ }
+
+ return ret;
}
-void iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans)
+static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg;
+ struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- return;
+ prph_sc_ctrl->reduce_power_cfg.base_addr =
+ cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
+ prph_sc_ctrl->reduce_power_cfg.size =
+ cpu_to_le32(iwl_dram_regions_size(dram_regions));
+}
+
+static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
+ &trans_pcie->prph_scratch->ctrl_cfg;
prph_sc_ctrl->reduce_power_cfg.base_addr =
- cpu_to_le64(trans_pcie->reduce_power_dram.physical);
+ cpu_to_le64(trans_pcie->reduced_tables_data.drams[0].physical);
prph_sc_ctrl->reduce_power_cfg.size =
- cpu_to_le32(trans_pcie->reduce_power_dram.size);
+ cpu_to_le32(trans_pcie->reduced_tables_data.drams[0].size);
+}
+
+void
+iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
+{
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return;
+
+ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
+ iwl_pcie_set_reduce_power_segments(trans);
+ else
+ iwl_pcie_set_continuous_reduce_power(trans);
}
trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
}
-void iwl_trans_pcie_free_pnvm_dram(struct iwl_trans_pcie *trans_pcie,
- struct device *dev)
+void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
+ struct device *dev)
{
u8 i;
- struct iwl_dram_data *desc_dram = &trans_pcie->pnvm_regions_desc_array;
+ struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
- for (i = 0; i < trans_pcie->n_pnvm_regions; i++) {
- dma_free_coherent(dev, trans_pcie->pnvm_dram[i].size,
- trans_pcie->pnvm_dram[i].block,
- trans_pcie->pnvm_dram[i].physical);
+ /* free DRAM payloads */
+ for (i = 0; i < dram_regions->n_regions; i++) {
+ dma_free_coherent(dev, dram_regions->drams[i].size,
+ dram_regions->drams[i].block,
+ dram_regions->drams[i].physical);
}
- trans_pcie->n_pnvm_regions = 0;
+ dram_regions->n_regions = 0;
+ /* free DRAM addresses array */
if (desc_dram->block) {
dma_free_coherent(dev, desc_dram->size,
desc_dram->block,
desc_dram->physical);
}
- desc_dram->block = NULL;
+ memset(desc_dram, 0, sizeof(*desc_dram));
}
void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_free_fw_monitor(trans);
- iwl_trans_pcie_free_pnvm_dram(trans_pcie, trans->dev);
-
- if (trans_pcie->reduce_power_dram.size)
- dma_free_coherent(trans->dev,
- trans_pcie->reduce_power_dram.size,
- trans_pcie->reduce_power_dram.block,
- trans_pcie->reduce_power_dram.physical);
+ iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data,
+ trans->dev);
+ iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data,
+ trans->dev);
mutex_destroy(&trans_pcie->mutex);
iwl_trans_free(trans);