if (gart->active_domain && gart->active_domain != domain) {
ret = -EBUSY;
- } else if (dev->archdata.iommu != domain) {
- dev->archdata.iommu = domain;
+ } else if (dev_iommu_priv_get(dev) != domain) {
+ dev_iommu_priv_set(dev, domain);
gart->active_domain = domain;
gart->active_devices++;
}
spin_lock(&gart->dom_lock);
- if (dev->archdata.iommu == domain) {
- dev->archdata.iommu = NULL;
+ if (dev_iommu_priv_get(dev) == domain) {
+ dev_iommu_priv_set(dev, NULL);
if (--gart->active_devices == 0)
gart->active_domain = NULL;
static int tegra_smmu_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct tegra_smmu *smmu = dev->archdata.iommu;
+ struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
struct tegra_smmu_as *as = to_smmu_as(domain);
struct device_node *np = dev->of_node;
struct of_phandle_args args;
* supported by the Linux kernel, so abort after the
* first match.
*/
- dev->archdata.iommu = smmu;
+ dev_iommu_priv_set(dev, smmu);
break;
}
static void tegra_smmu_release_device(struct device *dev)
{
- dev->archdata.iommu = NULL;
+ dev_iommu_priv_set(dev, NULL);
}
static const struct tegra_smmu_group_soc *
static struct iommu_group *tegra_smmu_device_group(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct tegra_smmu *smmu = dev->archdata.iommu;
+ struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
struct iommu_group *group;
group = tegra_smmu_group_get(smmu, fwspec->ids[0]);