[intel] Quirk away MSI support on 945G/GM.
[platform/upstream/libdrm.git] / linux-core / xgi_cmdlist.c
1 /****************************************************************************
2  * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
3  *
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining
7  * a copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation on the rights to use, copy, modify, merge,
10  * publish, distribute, sublicense, and/or sell copies of the Software,
11  * and to permit persons to whom the Software is furnished to do so,
12  * subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial
16  * portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
21  * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24  * DEALINGS IN THE SOFTWARE.
25  ***************************************************************************/
26
27 #include "xgi_drv.h"
28 #include "xgi_regs.h"
29 #include "xgi_misc.h"
30 #include "xgi_cmdlist.h"
31
32 static void xgi_emit_flush(struct xgi_info * info, bool stop);
33 static void xgi_emit_nop(struct xgi_info * info);
34 static unsigned int get_batch_command(enum xgi_batch_type type);
35 static void triggerHWCommandList(struct xgi_info * info);
36 static void xgi_cmdlist_reset(struct xgi_info * info);
37
38
39 /**
40  * Graphic engine register (2d/3d) acessing interface
41  */
42 static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data)
43 {
44 #ifdef XGI_MMIO_DEBUG
45         DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n",
46                  map->handle, addr, data);
47 #endif
48         DRM_WRITE32(map, addr, data);
49 }
50
51
52 int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,
53                            struct drm_file * filp)
54 {
55         struct xgi_mem_alloc mem_alloc = {
56                 .location = XGI_MEMLOC_NON_LOCAL,
57                 .size = size,
58         };
59         int err;
60
61         err = xgi_alloc(info, &mem_alloc, filp);
62         if (err) {
63                 return err;
64         }
65
66         info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_addr);
67         info->cmdring.size = mem_alloc.size;
68         info->cmdring.ring_hw_base = mem_alloc.hw_addr;
69         info->cmdring.last_ptr = NULL;
70         info->cmdring.ring_offset = 0;
71
72         return 0;
73 }
74
75
76 /**
77  * get_batch_command - Get the command ID for the current begin type.
78  * @type: Type of the current batch
79  *
80  * See section 3.2.2 "Begin" (page 15) of the 3D SPG.
81  *
82  * This function assumes that @type is on the range [0,3].
83  */
84 unsigned int get_batch_command(enum xgi_batch_type type)
85 {
86         static const unsigned int ports[4] = {
87                 0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2
88         };
89
90         return ports[type];
91 }
92
93
94 int xgi_submit_cmdlist(struct drm_device * dev, void * data,
95                        struct drm_file * filp)
96 {
97         struct xgi_info *const info = dev->dev_private;
98         const struct xgi_cmd_info *const pCmdInfo =
99                 (struct xgi_cmd_info *) data;
100         const unsigned int cmd = get_batch_command(pCmdInfo->type);
101         u32 begin[4];
102
103
104         begin[0] = (cmd << 24) | BEGIN_VALID_MASK
105                 | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence);
106         begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->size;
107         begin[2] = pCmdInfo->hw_addr >> 4;
108         begin[3] = 0;
109
110         if (info->cmdring.last_ptr == NULL) {
111                 const unsigned int portOffset = BASE_3D_ENG + (cmd << 2);
112
113
114                 /* Enable PCI Trigger Mode
115                  */
116                 dwWriteReg(info->mmio_map,
117                            BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
118                            (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
119                            M2REG_CLEAR_COUNTERS_MASK | 0x08 |
120                            M2REG_PCI_TRIGGER_MODE_MASK);
121
122                 dwWriteReg(info->mmio_map,
123                            BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
124                            (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 |
125                            M2REG_PCI_TRIGGER_MODE_MASK);
126
127
128                 /* Send PCI begin command
129                  */
130                 dwWriteReg(info->mmio_map, portOffset,      begin[0]);
131                 dwWriteReg(info->mmio_map, portOffset +  4, begin[1]);
132                 dwWriteReg(info->mmio_map, portOffset +  8, begin[2]);
133                 dwWriteReg(info->mmio_map, portOffset + 12, begin[3]);
134         } else {
135                 DRM_DEBUG("info->cmdring.last_ptr != NULL\n");
136
137                 if (pCmdInfo->type == BTYPE_3D) {
138                         xgi_emit_flush(info, FALSE);
139                 }
140
141                 info->cmdring.last_ptr[1] = cpu_to_le32(begin[1]);
142                 info->cmdring.last_ptr[2] = cpu_to_le32(begin[2]);
143                 info->cmdring.last_ptr[3] = cpu_to_le32(begin[3]);
144                 DRM_WRITEMEMORYBARRIER();
145                 info->cmdring.last_ptr[0] = cpu_to_le32(begin[0]);
146
147                 triggerHWCommandList(info);
148         }
149
150         info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr);
151         drm_fence_flush_old(info->dev, 0, info->next_sequence);
152         return 0;
153 }
154
155
156 /*
157     state:      0 - console
158                 1 - graphic
159                 2 - fb
160                 3 - logout
161 */
162 int xgi_state_change(struct xgi_info * info, unsigned int to,
163                      unsigned int from)
164 {
165 #define STATE_CONSOLE   0
166 #define STATE_GRAPHIC   1
167 #define STATE_FBTERM    2
168 #define STATE_LOGOUT    3
169 #define STATE_REBOOT    4
170 #define STATE_SHUTDOWN  5
171
172         if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) {
173                 DRM_INFO("Leaving graphical mode (probably VT switch)\n");
174         } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) {
175                 DRM_INFO("Entering graphical mode (probably VT switch)\n");
176                 xgi_cmdlist_reset(info);
177         } else if ((from == STATE_GRAPHIC)
178                    && ((to == STATE_LOGOUT)
179                        || (to == STATE_REBOOT)
180                        || (to == STATE_SHUTDOWN))) {
181                 DRM_INFO("Leaving graphical mode (probably X shutting down)\n");
182         } else {
183                 DRM_ERROR("Invalid state change.\n");
184                 return -EINVAL;
185         }
186
187         return 0;
188 }
189
190
191 int xgi_state_change_ioctl(struct drm_device * dev, void * data,
192                            struct drm_file * filp)
193 {
194         struct xgi_state_info *const state =
195                 (struct xgi_state_info *) data;
196         struct xgi_info *info = dev->dev_private;
197
198
199         return xgi_state_change(info, state->_toState, state->_fromState);
200 }
201
202
203 void xgi_cmdlist_reset(struct xgi_info * info)
204 {
205         info->cmdring.last_ptr = NULL;
206         info->cmdring.ring_offset = 0;
207 }
208
209
210 void xgi_cmdlist_cleanup(struct xgi_info * info)
211 {
212         if (info->cmdring.ring_hw_base != 0) {
213                 /* If command lists have been issued, terminate the command
214                  * list chain with a flush command.
215                  */
216                 if (info->cmdring.last_ptr != NULL) {
217                         xgi_emit_flush(info, FALSE);
218                         xgi_emit_nop(info);
219                 }
220
221                 xgi_waitfor_pci_idle(info);
222
223                 (void) memset(&info->cmdring, 0, sizeof(info->cmdring));
224         }
225 }
226
227 static void triggerHWCommandList(struct xgi_info * info)
228 {
229         static unsigned int s_triggerID = 1;
230
231         dwWriteReg(info->mmio_map,
232                    BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
233                    0x05000000 + (0x0ffff & s_triggerID++));
234 }
235
236
237 /**
238  * Emit a flush to the CRTL command stream.
239  * @info XGI info structure
240  *
241  * This function assumes info->cmdring.ptr is non-NULL.
242  */
243 void xgi_emit_flush(struct xgi_info * info, bool stop)
244 {
245         const u32 flush_command[8] = {
246                 ((0x10 << 24)
247                  | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)),
248                 BEGIN_LINK_ENABLE_MASK | (0x00004),
249                 0x00000000, 0x00000000,
250
251                 /* Flush the 2D engine with the default 32 clock delay.
252                  */
253                 M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
254                 M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
255                 M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
256                 M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
257         };
258         const unsigned int flush_size = sizeof(flush_command);
259         u32 *batch_addr;
260         u32 hw_addr;
261         unsigned int i;
262
263
264         /* check buf is large enough to contain a new flush batch */
265         if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) {
266                 info->cmdring.ring_offset = 0;
267         }
268
269         hw_addr = info->cmdring.ring_hw_base
270                 + info->cmdring.ring_offset;
271         batch_addr = info->cmdring.ptr
272                 + (info->cmdring.ring_offset / 4);
273
274         for (i = 0; i < (flush_size / 4); i++) {
275                 batch_addr[i] = cpu_to_le32(flush_command[i]);
276         }
277
278         if (stop) {
279                 *batch_addr |= cpu_to_le32(BEGIN_STOP_STORE_CURRENT_POINTER_MASK);
280         }
281
282         info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK | (flush_size / 4));
283         info->cmdring.last_ptr[2] = cpu_to_le32(hw_addr >> 4);
284         info->cmdring.last_ptr[3] = 0;
285         DRM_WRITEMEMORYBARRIER();
286         info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24)
287                 | (BEGIN_VALID_MASK));
288
289         triggerHWCommandList(info);
290
291         info->cmdring.ring_offset += flush_size;
292         info->cmdring.last_ptr = batch_addr;
293 }
294
295
296 /**
297  * Emit an empty command to the CRTL command stream.
298  * @info XGI info structure
299  *
300  * This function assumes info->cmdring.ptr is non-NULL.  In addition, since
301  * this function emits a command that does not have linkage information,
302  * it sets info->cmdring.ptr to NULL.
303  */
304 void xgi_emit_nop(struct xgi_info * info)
305 {
306         info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK
307                 | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence));
308         info->cmdring.last_ptr[2] = 0;
309         info->cmdring.last_ptr[3] = 0;
310         DRM_WRITEMEMORYBARRIER();
311         info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24)
312                 | (BEGIN_VALID_MASK));
313
314         triggerHWCommandList(info);
315
316         info->cmdring.last_ptr = NULL;
317 }
318
319
320 void xgi_emit_irq(struct xgi_info * info)
321 {
322         if (info->cmdring.last_ptr == NULL)
323                 return;
324
325         xgi_emit_flush(info, TRUE);
326 }