2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
33 /** @file i915_gem_tiling.c
35 * Support for managing tiling state of buffer objects.
37 * The idea behind tiling is to increase cache hit rates by rearranging
38 * pixel data so that a group of pixel accesses are in the same cacheline.
39 * Performance improvement from doing this on the back/depth buffer are on
42 * Intel architectures make this somewhat more complicated, though, by
43 * adjustments made to addressing of data when the memory is in interleaved
44 * mode (matched pairs of DIMMS) to improve memory bandwidth.
45 * For interleaved memory, the CPU sends every sequential 64 bytes
46 * to an alternate memory channel so it can get the bandwidth from both.
48 * The GPU also rearranges its accesses for increased bandwidth to interleaved
49 * memory, and it matches what the CPU does for non-tiled. However, when tiled
50 * it does it a little differently, since one walks addresses not just in the
51 * X direction but also Y. So, along with alternating channels when bit
52 * 6 of the address flips, it also alternates when other bits flip -- Bits 9
53 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
54 * are common to both the 915 and 965-class hardware.
56 * The CPU also sometimes XORs in higher bits as well, to improve
57 * bandwidth doing strided access like we do so frequently in graphics. This
58 * is called "Channel XOR Randomization" in the MCH documentation. The result
59 * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
62 * All of this bit 6 XORing has an effect on our memory management,
63 * as we need to make sure that the 3d driver can correctly address object
66 * If we don't have interleaved memory, all tiling is safe and no swizzling is
69 * When bit 17 is XORed in, we simply refuse to tile at all. Bit
70 * 17 is not just a page offset, so as we page an objet out and back in,
71 * individual pages in it will have different bit 17 addresses, resulting in
72 * each 64 bytes being swapped with its neighbor!
74 * Otherwise, if interleaved, we have to tell the 3d driver what the address
75 * swizzling it needs to do is, since it's writing with the CPU to the pages
76 * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
77 * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
78 * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
79 * to match what the GPU expects.
83 * Detects bit 6 swizzling of address lookup between IGD access and CPU
84 * access through main memory.
87 i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
89 struct drm_i915_private *dev_priv = dev->dev_private;
90 struct pci_dev *bridge;
91 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
92 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
97 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
99 DRM_ERROR("Couldn't get bridge device\n");
103 ret = pci_enable_device(bridge);
105 DRM_ERROR("pci_enable_device failed: %d\n", ret);
110 mchbar_offset = 0x48;
112 mchbar_offset = 0x44;
114 /* Use resource 2 for our BAR that's stashed in a nonstandard location,
115 * since the bridge would only ever use standard BARs 0-1 (though it
118 ret = pci_read_base(bridge, pci_bar_mem64, &bridge->resource[2],
121 DRM_ERROR("pci_read_base failed: %d\n", ret);
125 mchbar = ioremap(pci_resource_start(bridge, 2),
126 pci_resource_len(bridge, 2));
127 if (mchbar == NULL) {
128 DRM_ERROR("Couldn't map MCHBAR to determine tile swizzling\n");
132 if (IS_I965G(dev) && !IS_I965GM(dev)) {
135 /* On the 965, channel interleave appears to be determined by
136 * the flex bit. If flex is set, then the ranks (sides of a
137 * DIMM) of memory will be "stacked" (physical addresses walk
138 * through one rank then move on to the next, flipping channels
139 * or not depending on rank configuration). The GPU in this
140 * case does exactly the same addressing as the CPU.
142 * Unlike the 945, channel randomization based does not
143 * appear to be available.
145 * XXX: While the G965 doesn't appear to do any interleaving
146 * when the DIMMs are not exactly matched, the G4x chipsets
147 * might be for "L-shaped" configurations, and will need to be
150 * L-shaped configuration:
154 * |DIMM2| <-- non-interleaved
158 * |DIMM0| |DIMM1| <-- interleaved area
161 chdecmisc = readb(mchbar + CHDECMISC);
163 if (chdecmisc == 0xff) {
164 DRM_ERROR("Couldn't read from MCHBAR. "
165 "Disabling tiling.\n");
166 } else if (chdecmisc & CHDECMISC_FLEXMEMORY) {
167 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
168 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
170 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
171 swizzle_y = I915_BIT_6_SWIZZLE_9;
173 } else if (IS_I9XX(dev)) {
176 /* On 915-945 and GM965, channel interleave by the CPU is
177 * determined by DCC. The CPU will alternate based on bit 6
178 * in interleaved mode, and the GPU will then also alternate
179 * on bit 6, 9, and 10 for X, but the CPU may also optionally
180 * alternate based on bit 17 (XOR not disabled and XOR
183 dcc = readl(mchbar + DCC);
184 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
185 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
186 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
187 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
188 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
190 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
191 if (IS_I915G(dev) || IS_I915GM(dev) ||
192 dcc & DCC_CHANNEL_XOR_DISABLE) {
193 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
194 swizzle_y = I915_BIT_6_SWIZZLE_9;
195 } else if (IS_I965GM(dev)) {
196 /* GM965 only does bit 11-based channel
199 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
200 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
202 /* Bit 17 or perhaps other swizzling */
203 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
204 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
208 if (dcc == 0xffffffff) {
209 DRM_ERROR("Couldn't read from MCHBAR. "
210 "Disabling tiling.\n");
211 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
212 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
215 /* As far as we know, the 865 doesn't have these bit 6
218 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
219 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
224 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
225 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
229 * Sets the tiling mode of an object, returning the required swizzling of
230 * bit 6 of addresses in the object.
233 i915_gem_set_tiling(struct drm_device *dev, void *data,
234 struct drm_file *file_priv)
236 struct drm_i915_gem_set_tiling *args = data;
237 struct drm_i915_private *dev_priv = dev->dev_private;
238 struct drm_gem_object *obj;
239 struct drm_i915_gem_object *obj_priv;
241 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
244 obj_priv = obj->driver_private;
246 mutex_lock(&dev->struct_mutex);
248 if (args->tiling_mode == I915_TILING_NONE) {
249 obj_priv->tiling_mode = I915_TILING_NONE;
250 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
252 if (args->tiling_mode == I915_TILING_X)
253 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
255 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
256 /* If we can't handle the swizzling, make it untiled. */
257 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
258 args->tiling_mode = I915_TILING_NONE;
259 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
262 obj_priv->tiling_mode = args->tiling_mode;
264 mutex_unlock(&dev->struct_mutex);
266 drm_gem_object_unreference(obj);
272 * Returns the current tiling mode and required bit 6 swizzling for the object.
275 i915_gem_get_tiling(struct drm_device *dev, void *data,
276 struct drm_file *file_priv)
278 struct drm_i915_gem_get_tiling *args = data;
279 struct drm_i915_private *dev_priv = dev->dev_private;
280 struct drm_gem_object *obj;
281 struct drm_i915_gem_object *obj_priv;
283 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
286 obj_priv = obj->driver_private;
288 mutex_lock(&dev->struct_mutex);
290 args->tiling_mode = obj_priv->tiling_mode;
291 switch (obj_priv->tiling_mode) {
293 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
296 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
298 case I915_TILING_NONE:
299 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
302 DRM_ERROR("unknown tiling mode\n");
305 mutex_unlock(&dev->struct_mutex);
307 drm_gem_object_unreference(obj);