3 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
8 #define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
9 #define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980
10 #define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
11 #define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
12 #define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
13 #define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
14 #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
15 #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
16 #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
17 #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
19 #define I915_IFPADDR 0x60
20 #define I965_IFPADDR 0x70
22 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
23 #define upper_32_bits(_val) (((u64)(_val)) >> 32)
26 static struct _i9xx_private_compat {
27 void __iomem *flush_page;
29 struct resource ifp_resource;
32 static struct _i8xx_private_compat {
38 intel_compat_align_resource(void *data, struct resource *res,
39 resource_size_t size, resource_size_t align)
45 static int intel_alloc_chipset_flush_resource(struct pci_dev *pdev)
48 ret = pci_bus_alloc_resource(pdev->bus, &i9xx_private.ifp_resource, PAGE_SIZE,
49 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
50 intel_compat_align_resource, pdev);
57 static void intel_i915_setup_chipset_flush(struct pci_dev *pdev)
62 pci_read_config_dword(pdev, I915_IFPADDR, &temp);
64 intel_alloc_chipset_flush_resource(pdev);
65 i9xx_private.resource_valid = 1;
66 pci_write_config_dword(pdev, I915_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1);
70 i9xx_private.resource_valid = 1;
71 i9xx_private.ifp_resource.start = temp;
72 i9xx_private.ifp_resource.end = temp + PAGE_SIZE;
73 ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource);
75 i9xx_private.resource_valid = 0;
76 printk("Failed inserting resource into tree\n");
81 static void intel_i965_g33_setup_chipset_flush(struct pci_dev *pdev)
86 pci_read_config_dword(pdev, I965_IFPADDR + 4, &temp_hi);
87 pci_read_config_dword(pdev, I965_IFPADDR, &temp_lo);
89 if (!(temp_lo & 0x1)) {
91 intel_alloc_chipset_flush_resource(pdev);
93 i9xx_private.resource_valid = 1;
94 pci_write_config_dword(pdev, I965_IFPADDR + 4,
95 upper_32_bits(i9xx_private.ifp_resource.start));
96 pci_write_config_dword(pdev, I965_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1);
101 l64 = ((u64)temp_hi << 32) | temp_lo;
103 i9xx_private.resource_valid = 1;
104 i9xx_private.ifp_resource.start = l64;
105 i9xx_private.ifp_resource.end = l64 + PAGE_SIZE;
106 ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource);
108 i9xx_private.resource_valid = 0;
109 printk("Failed inserting resource into tree\n");
114 static void intel_i8xx_fini_flush(struct drm_device *dev)
116 kunmap(i8xx_private.page);
117 i8xx_private.flush_page = NULL;
118 unmap_page_from_agp(i8xx_private.page);
119 flush_agp_mappings();
121 __free_page(i8xx_private.page);
124 static void intel_i8xx_setup_flush(struct drm_device *dev)
127 i8xx_private.page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
128 if (!i8xx_private.page) {
132 /* make page uncached */
133 map_page_into_agp(i8xx_private.page);
134 flush_agp_mappings();
136 i8xx_private.flush_page = kmap(i8xx_private.page);
137 if (!i8xx_private.flush_page)
138 intel_i8xx_fini_flush(dev);
142 static void intel_i8xx_flush_page(struct drm_device *dev)
144 unsigned int *pg = i8xx_private.flush_page;
147 /* HAI NUT CAN I HAZ HAMMER?? */
148 for (i = 0; i < 256; i++)
154 static void intel_i9xx_setup_flush(struct drm_device *dev)
156 struct pci_dev *agp_dev = dev->agp->agp_info.device;
158 i9xx_private.ifp_resource.name = "GMCH IFPBAR";
159 i9xx_private.ifp_resource.flags = IORESOURCE_MEM;
161 /* Setup chipset flush for 915 */
162 if (IS_I965G(dev) || IS_G33(dev)) {
163 intel_i965_g33_setup_chipset_flush(agp_dev);
165 intel_i915_setup_chipset_flush(agp_dev);
168 if (i9xx_private.ifp_resource.start) {
169 i9xx_private.flush_page = ioremap_nocache(i9xx_private.ifp_resource.start, PAGE_SIZE);
170 if (!i9xx_private.flush_page)
171 printk("unable to ioremap flush page - no chipset flushing");
175 static void intel_i9xx_fini_flush(struct drm_device *dev)
177 iounmap(i9xx_private.flush_page);
178 if (i9xx_private.resource_valid)
179 release_resource(&i9xx_private.ifp_resource);
180 i9xx_private.resource_valid = 0;
183 static void intel_i9xx_flush_page(struct drm_device *dev)
185 if (i9xx_private.flush_page)
186 writel(1, i9xx_private.flush_page);
189 void intel_init_chipset_flush_compat(struct drm_device *dev)
191 /* not flush on i8xx */
193 intel_i9xx_setup_flush(dev);
195 intel_i8xx_setup_flush(dev);
199 void intel_fini_chipset_flush_compat(struct drm_device *dev)
201 /* not flush on i8xx */
203 intel_i9xx_fini_flush(dev);
205 intel_i8xx_fini_flush(dev);
208 void drm_agp_chipset_flush(struct drm_device *dev)
211 intel_i9xx_flush_page(dev);
213 intel_i8xx_flush_page(dev);