if (switch_mmu_context &&
gpu->sec_mode == ETNA_SEC_KERNEL) {
- unsigned short id =
- etnaviv_iommuv2_get_pta_id(gpu->mmu_context);
+ unsigned short id;
+
+ etnaviv_iommuv2_update_pta_entry(gpu->mmu_context);
+
+ id = etnaviv_iommuv2_get_pta_id(gpu->mmu_context);
CMD_LOAD_STATE(buffer,
VIVS_MMUv2_PTA_CONFIG,
VIVS_MMUv2_PTA_CONFIG_INDEX(id));
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
}
+void etnaviv_iommuv2_update_pta_entry(struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+
+ context->global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma |
+ VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
+}
+
static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *context)
{
VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
upper_32_bits(context->global->bad_page_dma)));
- context->global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma |
- VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
+ etnaviv_iommuv2_update_pta_entry(context);
/* trigger a PTA load through the FE */
prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id);
return v2_context->id;
}
+
static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *context)
{
if (!v2_context)
return NULL;
+ v2_context->id = 0;
+#if 0
mutex_lock(&global->lock);
v2_context->id = find_first_zero_bit(global->v2.pta_alloc,
ETNAVIV_PTA_ENTRIES);
goto out_free;
}
mutex_unlock(&global->lock);
+#endif
v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K,
&v2_context->mtlb_dma, GFP_KERNEL);