i915_engines_cleanup(dev_priv);
}
-static int i915_mmio_setup(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- int mmio_bar;
- int mmio_size;
-
- mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
- /*
- * Before gen4, the registers and the GTT are behind different BARs.
- * However, from gen4 onwards, the registers and the GTT are shared
- * in the same BAR, so we want to restrict this ioremap from
- * clobbering the GTT which we want ioremap_wc instead. Fortunately,
- * the register BAR remains the same size for all the earlier
- * generations up to Ironlake.
- */
- if (INTEL_GEN(dev_priv) < 5)
- mmio_size = 512 * 1024;
- else
- mmio_size = 2 * 1024 * 1024;
- dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
- if (dev_priv->regs == NULL) {
- DRM_ERROR("failed to map registers\n");
-
- return -EIO;
- }
-
- /* Try to make sure MCHBAR is enabled before poking at it */
- intel_setup_mchbar(dev_priv);
-
- return 0;
-}
-
-static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
-
- intel_teardown_mchbar(dev_priv);
- pci_iounmap(pdev, dev_priv->regs);
-}
-
/**
* i915_driver_init_mmio - setup device MMIO
* @dev_priv: device private
if (i915_get_bridge_dev(dev_priv))
return -EIO;
- ret = i915_mmio_setup(dev_priv);
+ ret = intel_uncore_init(&dev_priv->uncore);
if (ret < 0)
goto err_bridge;
- intel_uncore_init(&dev_priv->uncore);
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_setup_mchbar(dev_priv);
intel_device_info_init_mmio(dev_priv);
return 0;
err_uncore:
+ intel_teardown_mchbar(dev_priv);
intel_uncore_fini(&dev_priv->uncore);
- i915_mmio_cleanup(dev_priv);
err_bridge:
pci_dev_put(dev_priv->bridge_dev);
*/
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
+ intel_teardown_mchbar(dev_priv);
intel_uncore_fini(&dev_priv->uncore);
- i915_mmio_cleanup(dev_priv);
pci_dev_put(dev_priv->bridge_dev);
}
*/
resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
- void __iomem *regs;
-
struct intel_uncore uncore;
struct i915_virtual_gpu vgpu;
static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
i915_reg_t reg) \
{ \
- return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
+ return read##s(dev_priv->uncore.regs + i915_mmio_reg_offset(reg)); \
}
#define __raw_write(x, s) \
static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
i915_reg_t reg, uint##x##_t val) \
{ \
- write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
+ write##s(val, dev_priv->uncore.regs + i915_mmio_reg_offset(reg)); \
}
__raw_read(8, b)
__raw_read(16, w)
const unsigned int bank,
const unsigned int bit)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 dw;
lockdep_assert_held(&i915->irq_lock);
static void gen8_gt_irq_ack(struct drm_i915_private *i915,
u32 master_ctl, u32 gt_iir[4])
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
GEN8_GT_BCS_IRQ | \
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = to_i915(arg);
- void __iomem * const regs = dev_priv->regs;
+ void __iomem * const regs = dev_priv->uncore.regs;
u32 master_ctl;
u32 gt_iir[4];
gen11_gt_engine_identity(struct drm_i915_private * const i915,
const unsigned int bank, const unsigned int bit)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 timeout_ts;
u32 ident;
gen11_gt_bank_handler(struct drm_i915_private * const i915,
const unsigned int bank)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
unsigned long intr_dw;
unsigned int bit;
static u32
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
{
- void __iomem * const regs = dev_priv->regs;
+ void __iomem * const regs = dev_priv->uncore.regs;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = to_i915(arg);
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 master_ctl;
u32 gu_misc_iir;
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- gen8_master_intr_disable(dev_priv->regs);
+ gen8_master_intr_disable(dev_priv->uncore.regs);
gen8_gt_irq_reset(dev_priv);
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- gen11_master_intr_disable(dev_priv->regs);
+ gen11_master_intr_disable(dev_priv->uncore.regs);
gen11_gt_irq_reset(dev_priv);
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
- gen8_master_intr_enable(dev_priv->regs);
+ gen8_master_intr_enable(dev_priv->uncore.regs);
return 0;
}
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- gen11_master_intr_enable(dev_priv->regs);
+ gen11_master_intr_enable(dev_priv->uncore.regs);
POSTING_READ(GEN11_GFX_MSTR_IRQ);
return 0;
intel_engine_init_workarounds(engine);
if (HAS_LOGICAL_RING_ELSQ(i915)) {
- execlists->submit_reg = i915->regs +
+ execlists->submit_reg = i915->uncore.regs +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
- execlists->ctrl_reg = i915->regs +
+ execlists->ctrl_reg = i915->uncore.regs +
i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
} else {
- execlists->submit_reg = i915->regs +
+ execlists->submit_reg = i915->uncore.regs +
i915_mmio_reg_offset(RING_ELSP(engine));
}
i915_reg_t reg_ack)
{
struct intel_uncore_forcewake_domain *d;
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
return;
WARN_ON(!i915_mmio_reg_valid(reg_ack));
d->wake_count = 0;
- d->reg_set = i915->regs + i915_mmio_reg_offset(reg_set);
- d->reg_ack = i915->regs + i915_mmio_reg_offset(reg_ack);
+ d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
+ d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
d->id = domain_id;
return NOTIFY_OK;
}
-void intel_uncore_init(struct intel_uncore *uncore)
+static int uncore_mmio_setup(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct pci_dev *pdev = i915->drm.pdev;
+ int mmio_bar;
+ int mmio_size;
+
+ mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
+ /*
+ * Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (INTEL_GEN(i915) < 5)
+ mmio_size = 512 * 1024;
+ else
+ mmio_size = 2 * 1024 * 1024;
+ uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
+ if (uncore->regs == NULL) {
+ DRM_ERROR("failed to map registers\n");
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void uncore_mmio_cleanup(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ pci_iounmap(pdev, uncore->regs);
+}
+
+
+int intel_uncore_init(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ int ret;
+
+ ret = uncore_mmio_setup(uncore);
+ if (ret)
+ return ret;
i915_check_vgpu(i915);
}
iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
+
+ return 0;
}
/*
&uncore->pmic_bus_access_nb);
intel_uncore_forcewake_reset(uncore);
iosf_mbi_punit_release();
+ uncore_mmio_cleanup(uncore);
}
static const struct reg_whitelist {
};
struct intel_uncore {
+ void __iomem *regs;
+
spinlock_t lock; /** lock is also taken in irq contexts. */
const struct intel_forcewake_range *fw_domains_table;
}
void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-void intel_uncore_init(struct intel_uncore *uncore);
+int intel_uncore_init(struct intel_uncore *uncore);
void intel_uncore_prune(struct intel_uncore *uncore);
bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
for_each_engine(engine, i915, id) {
i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
- u32 __iomem *reg = i915->regs + engine->mmio_base + r->offset;
+ u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
enum forcewake_domains fw_domains;
u32 val;