MSI_DESC_ASSOCIATED,
};
+
+/**
+ * struct msi_dev_domain - The internals of MSI domain info per device
+ * @store: Xarray for storing MSI descriptor pointers
+ */
+struct msi_dev_domain {
+ struct xarray store;
+};
+
/**
* msi_device_data - MSI per device data
* @properties: MSI properties which are interesting to drivers
* @platform_data: Platform-MSI specific data
* @mutex: Mutex protecting the MSI descriptor store
- * @__store: Xarray for storing MSI descriptor pointers
+ * @__domains: Internal data for per device MSI domains
* @__iter_idx: Index to search the next entry for iterators
*/
struct msi_device_data {
unsigned long properties;
struct platform_msi_priv_data *platform_data;
struct mutex mutex;
- struct xarray __store;
+ struct msi_dev_domain __domains[MSI_MAX_DEVICE_IRQDOMAINS];
unsigned long __iter_idx;
};
static int msi_insert_desc(struct msi_device_data *md, struct msi_desc *desc, unsigned int index)
{
+ struct xarray *xa = &md->__domains[MSI_DEFAULT_DOMAIN].store;
int ret;
desc->msi_index = index;
- ret = xa_insert(&md->__store, index, desc, GFP_KERNEL);
+ ret = xa_insert(xa, index, desc, GFP_KERNEL);
if (ret)
msi_free_desc(desc);
return ret;
void msi_free_msi_descs_range(struct device *dev, unsigned int first_index,
unsigned int last_index)
{
- struct xarray *xa = &dev->msi.data->__store;
+ struct xarray *xa = &dev->msi.data->__domains[MSI_DEFAULT_DOMAIN].store;
struct msi_desc *desc;
unsigned long idx;
static void msi_device_data_release(struct device *dev, void *res)
{
struct msi_device_data *md = res;
+ int i;
- WARN_ON_ONCE(!xa_empty(&md->__store));
- xa_destroy(&md->__store);
+ for (i = 0; i < MSI_MAX_DEVICE_IRQDOMAINS; i++) {
+ WARN_ON_ONCE(!xa_empty(&md->__domains[i].store));
+ xa_destroy(&md->__domains[i].store);
+ }
dev->msi.data = NULL;
}
int msi_setup_device_data(struct device *dev)
{
struct msi_device_data *md;
- int ret;
+ int ret, i;
if (dev->msi.data)
return 0;
return ret;
}
- xa_init(&md->__store);
+ for (i = 0; i < MSI_MAX_DEVICE_IRQDOMAINS; i++)
+ xa_init(&md->__domains[i].store);
+
mutex_init(&md->mutex);
dev->msi.data = md;
devres_add(dev, md);
*/
void msi_unlock_descs(struct device *dev)
{
- /* Invalidate the index wich was cached by the iterator */
+ /* Invalidate the index which was cached by the iterator */
dev->msi.data->__iter_idx = MSI_MAX_INDEX;
mutex_unlock(&dev->msi.data->mutex);
}
static struct msi_desc *msi_find_desc(struct msi_device_data *md, enum msi_desc_filter filter)
{
+ struct xarray *xa = &md->__domains[MSI_DEFAULT_DOMAIN].store;
struct msi_desc *desc;
- xa_for_each_start(&md->__store, md->__iter_idx, desc, md->__iter_idx) {
+ xa_for_each_start(xa, md->__iter_idx, desc, md->__iter_idx) {
if (msi_desc_match(desc, filter))
return desc;
}
{
struct msi_desc *desc;
unsigned int ret = 0;
+ struct xarray *xa;
bool pcimsi;
if (!dev->msi.data)
pcimsi = dev_is_pci(dev) ? to_pci_dev(dev)->msi_enabled : false;
msi_lock_descs(dev);
- desc = xa_load(&dev->msi.data->__store, pcimsi ? 0 : index);
+ xa = &dev->msi.data->__domains[MSI_DEFAULT_DOMAIN].store;
+ desc = xa_load(xa, pcimsi ? 0 : index);
if (desc && desc->irq) {
/*
* PCI-MSI has only one descriptor for multiple interrupts.
struct msi_domain_info *info = domain->host_data;
struct msi_domain_ops *ops = info->ops;
struct msi_desc *desc;
+ struct xarray *xa;
int ret, virq;
msi_lock_descs(dev);
if (ret)
goto unlock;
+ xa = &dev->msi.data->__domains[MSI_DEFAULT_DOMAIN].store;
+
for (virq = virq_base; virq < virq_base + nvec; virq++) {
- desc = xa_load(&dev->msi.data->__store, virq);
+ desc = xa_load(xa, virq);
desc->irq = virq;
ops->set_desc(arg, desc);