struct kobject kobj;
struct kobject *devices_kobj;
struct list_head devices;
+ struct xarray pasid_array;
struct mutex mutex;
void *iommu_data;
void (*iommu_data_release)(void *iommu_data);
mutex_init(&group->mutex);
INIT_LIST_HEAD(&group->devices);
INIT_LIST_HEAD(&group->entry);
+ xa_init(&group->pasid_array);
ret = ida_alloc(&iommu_group_ida, GFP_KERNEL);
if (ret < 0) {
mutex_lock(&group->mutex);
if (group->owner_cnt) {
- if (group->owner || !iommu_is_default_domain(group)) {
+ if (group->owner || !iommu_is_default_domain(group) ||
+ !xa_empty(&group->pasid_array)) {
ret = -EBUSY;
goto unlock_out;
}
return;
mutex_lock(&group->mutex);
- if (!WARN_ON(!group->owner_cnt))
+ if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array)))
group->owner_cnt--;
mutex_unlock(&group->mutex);
ret = -EPERM;
goto unlock_out;
} else {
- if (group->domain && group->domain != group->default_domain) {
+ if ((group->domain && group->domain != group->default_domain) ||
+ !xa_empty(&group->pasid_array)) {
ret = -EBUSY;
goto unlock_out;
}
int ret;
mutex_lock(&group->mutex);
- if (WARN_ON(!group->owner_cnt || !group->owner))
+ if (WARN_ON(!group->owner_cnt || !group->owner ||
+ !xa_empty(&group->pasid_array)))
goto unlock_out;
group->owner_cnt = 0;
return user;
}
EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
+
+static int __iommu_set_group_pasid(struct iommu_domain *domain,
+ struct iommu_group *group, ioasid_t pasid)
+{
+ struct group_device *device;
+ int ret = 0;
+
+ list_for_each_entry(device, &group->devices, list) {
+ ret = domain->ops->set_dev_pasid(domain, device->dev, pasid);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void __iommu_remove_group_pasid(struct iommu_group *group,
+ ioasid_t pasid)
+{
+ struct group_device *device;
+ const struct iommu_ops *ops;
+
+ list_for_each_entry(device, &group->devices, list) {
+ ops = dev_iommu_ops(device->dev);
+ ops->remove_dev_pasid(device->dev, pasid);
+ }
+}
+
+/*
+ * iommu_attach_device_pasid() - Attach a domain to pasid of device
+ * @domain: the iommu domain.
+ * @dev: the attached device.
+ * @pasid: the pasid of the device.
+ *
+ * Return: 0 on success, or an error.
+ */
+int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ struct iommu_group *group;
+ void *curr;
+ int ret;
+
+ if (!domain->ops->set_dev_pasid)
+ return -EOPNOTSUPP;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return -ENODEV;
+
+ mutex_lock(&group->mutex);
+ curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
+ if (curr) {
+ ret = xa_err(curr) ? : -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = __iommu_set_group_pasid(domain, group, pasid);
+ if (ret) {
+ __iommu_remove_group_pasid(group, pasid);
+ xa_erase(&group->pasid_array, pasid);
+ }
+out_unlock:
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_attach_device_pasid);
+
+/*
+ * iommu_detach_device_pasid() - Detach the domain from pasid of device
+ * @domain: the iommu domain.
+ * @dev: the attached device.
+ * @pasid: the pasid of the device.
+ *
+ * The @domain must have been attached to @pasid of the @dev with
+ * iommu_attach_device_pasid().
+ */
+void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid)
+{
+ struct iommu_group *group = iommu_group_get(dev);
+
+ mutex_lock(&group->mutex);
+ __iommu_remove_group_pasid(group, pasid);
+ WARN_ON(xa_erase(&group->pasid_array, pasid) != domain);
+ mutex_unlock(&group->mutex);
+
+ iommu_group_put(group);
+}
+EXPORT_SYMBOL_GPL(iommu_detach_device_pasid);
+
+/*
+ * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev
+ * @dev: the queried device
+ * @pasid: the pasid of the device
+ * @type: matched domain type, 0 for any match
+ *
+ * This is a variant of iommu_get_domain_for_dev(). It returns the existing
+ * domain attached to pasid of a device. Callers must hold a lock around this
+ * function, and both iommu_attach/detach_dev_pasid() whenever a domain of
+ * type is being manipulated. This API does not internally resolve races with
+ * attach/detach.
+ *
+ * Return: attached domain on success, NULL otherwise.
+ */
+struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
+ ioasid_t pasid,
+ unsigned int type)
+{
+ struct iommu_domain *domain;
+ struct iommu_group *group;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return NULL;
+
+ xa_lock(&group->pasid_array);
+ domain = xa_load(&group->pasid_array, pasid);
+ if (type && domain && domain->type != type)
+ domain = ERR_PTR(-EBUSY);
+ xa_unlock(&group->pasid_array);
+ iommu_group_put(group);
+
+ return domain;
+}
+EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid);
* - IOMMU_DOMAIN_DMA: must use a dma domain
* - 0: use the default setting
* @default_domain_ops: the default ops for domains
+ * @remove_dev_pasid: Remove any translation configurations of a specific
+ * pasid, so that any DMA transactions with this pasid
+ * will be blocked by the hardware.
* @pgsize_bitmap: bitmap of all possible supported page sizes
* @owner: Driver module providing these ops
*/
struct iommu_page_response *msg);
int (*def_domain_type)(struct device *dev);
+ void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid);
const struct iommu_domain_ops *default_domain_ops;
unsigned long pgsize_bitmap;
* struct iommu_domain_ops - domain specific operations
* @attach_dev: attach an iommu domain to a device
* @detach_dev: detach an iommu domain from a device
+ * @set_dev_pasid: set an iommu domain to a pasid of device
* @map: map a physically contiguous memory region to an iommu domain
* @map_pages: map a physically contiguous set of pages of the same size to
* an iommu domain.
struct iommu_domain_ops {
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
+ int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid);
int (*map)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
void iommu_group_release_dma_owner(struct iommu_group *group);
bool iommu_group_dma_owner_claimed(struct iommu_group *group);
+int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
+void iommu_detach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
+struct iommu_domain *
+iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
+ unsigned int type);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
{
return false;
}
+
+static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+}
+
+static inline struct iommu_domain *
+iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
+ unsigned int type)
+{
+ return NULL;
+}
#endif /* CONFIG_IOMMU_API */
/**