#endif
.gpu_state_get = a3xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
- .create_address_space = adreno_iommu_create_address_space,
+ .create_address_space = adreno_create_address_space,
.get_rptr = a3xx_get_rptr,
},
};
#endif
.gpu_state_get = a4xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
- .create_address_space = adreno_iommu_create_address_space,
+ .create_address_space = adreno_create_address_space,
.get_rptr = a4xx_get_rptr,
},
.get_timestamp = a4xx_get_timestamp,
.gpu_busy = a5xx_gpu_busy,
.gpu_state_get = a5xx_gpu_state_get,
.gpu_state_put = a5xx_gpu_state_put,
- .create_address_space = adreno_iommu_create_address_space,
+ .create_address_space = adreno_create_address_space,
.get_rptr = a5xx_get_rptr,
},
.get_timestamp = a5xx_get_timestamp,
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- struct iommu_domain_geometry *geometry;
- struct msm_mmu *mmu;
- struct msm_gem_address_space *aspace;
- u64 start, size;
unsigned long quirks = 0;
/*
if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- mmu = msm_iommu_new(&pdev->dev, quirks);
- if (IS_ERR_OR_NULL(mmu))
- return ERR_CAST(mmu);
-
- geometry = msm_iommu_get_geometry(mmu);
- if (IS_ERR(geometry))
- return ERR_CAST(geometry);
-
- /*
- * Use the aperture start or SZ_16M, whichever is greater. This will
- * ensure that we align with the allocated pagetable range while still
- * allowing room in the lower 32 bits for GMEM and whatnot
- */
- start = max_t(u64, SZ_16M, geometry->aperture_start);
- size = geometry->aperture_end - start + 1;
-
- aspace = msm_gem_address_space_create(mmu, "gpu",
- start & GENMASK_ULL(48, 0), size);
-
- if (IS_ERR(aspace) && !IS_ERR(mmu))
- mmu->funcs->destroy(mmu);
-
- return aspace;
+ return adreno_iommu_create_address_space(gpu, pdev, quirks);
}
static struct msm_gem_address_space *
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
+struct msm_gem_address_space *
+adreno_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev)
+{
+ return adreno_iommu_create_address_space(gpu, pdev, 0);
+}
+
struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev)
+ struct platform_device *pdev,
+ unsigned long quirks)
{
struct iommu_domain_geometry *geometry;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
u64 start, size;
- mmu = msm_iommu_new(&pdev->dev, 0);
+ mmu = msm_iommu_new(&pdev->dev, quirks);
if (IS_ERR_OR_NULL(mmu))
return ERR_CAST(mmu);
* attached targets
*/
struct msm_gem_address_space *
+adreno_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev);
+
+struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev);
+ struct platform_device *pdev,
+ unsigned long quirks);
int adreno_read_speedbin(struct device *dev, u32 *speedbin);