2 /****************************************************************************
3 * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
5 * All Rights Reserved. *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation on the rights to use, copy, modify, merge,
11 * publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so,
13 * subject to the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial
17 * portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22 * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
23 * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 ***************************************************************************/
32 #include "xgi_cmdlist.h"
34 struct xgi_cmdring_info s_cmdring;
36 static void addFlush2D(struct xgi_info * info);
37 static unsigned int get_batch_command(enum xgi_batch_type type);
38 static void triggerHWCommandList(struct xgi_info * info);
39 static void xgi_cmdlist_reset(void);
41 int xgi_cmdlist_initialize(struct xgi_info * info, size_t size)
43 struct xgi_mem_alloc mem_alloc = {
49 err = xgi_pcie_alloc(info, &mem_alloc, 0);
54 s_cmdring._cmdRingSize = mem_alloc.size;
55 s_cmdring._cmdRingBuffer = mem_alloc.hw_addr;
56 s_cmdring._cmdRingAllocOffset = mem_alloc.offset;
57 s_cmdring._lastBatchStartAddr = 0;
58 s_cmdring._cmdRingOffset = 0;
65 * get_batch_command - Get the command ID for the current begin type.
66 * @type: Type of the current batch
68 * See section 3.2.2 "Begin" (page 15) of the 3D SPG.
70 * This function assumes that @type is on the range [0,3].
72 unsigned int get_batch_command(enum xgi_batch_type type)
74 static const unsigned int ports[4] = {
75 0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2
82 static void xgi_submit_cmdlist(struct xgi_info * info,
83 const struct xgi_cmd_info * pCmdInfo)
85 const unsigned int cmd = get_batch_command(pCmdInfo->type);
89 begin[0] = (cmd << 24) | BEGIN_VALID_MASK
90 | (BEGIN_BEGIN_IDENTIFICATION_MASK & pCmdInfo->id);
91 begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->size;
92 begin[2] = pCmdInfo->hw_addr >> 4;
95 if (s_cmdring._lastBatchStartAddr == 0) {
96 const unsigned int portOffset = BASE_3D_ENG + (cmd << 2);
99 /* Enable PCI Trigger Mode
101 DRM_INFO("Enable PCI Trigger Mode \n");
103 dwWriteReg(info->mmio_map,
104 BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
105 (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
106 M2REG_CLEAR_COUNTERS_MASK | 0x08 |
107 M2REG_PCI_TRIGGER_MODE_MASK);
109 dwWriteReg(info->mmio_map,
110 BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
111 (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 |
112 M2REG_PCI_TRIGGER_MODE_MASK);
115 /* Send PCI begin command
117 DRM_INFO("portOffset=%d, beginPort=%d\n",
118 portOffset, cmd << 2);
120 dwWriteReg(info->mmio_map, portOffset, begin[0]);
121 dwWriteReg(info->mmio_map, portOffset + 4, begin[1]);
122 dwWriteReg(info->mmio_map, portOffset + 8, begin[2]);
123 dwWriteReg(info->mmio_map, portOffset + 12, begin[3]);
125 u32 *lastBatchVirtAddr;
127 DRM_INFO("s_cmdring._lastBatchStartAddr != 0\n");
129 if (pCmdInfo->type == BTYPE_3D) {
134 xgi_find_pcie_virt(info,
135 s_cmdring._lastBatchStartAddr);
137 lastBatchVirtAddr[1] = begin[1];
138 lastBatchVirtAddr[2] = begin[2];
139 lastBatchVirtAddr[3] = begin[3];
141 lastBatchVirtAddr[0] = begin[0];
143 triggerHWCommandList(info);
146 s_cmdring._lastBatchStartAddr = pCmdInfo->hw_addr;
147 DRM_INFO("%s: exit\n", __func__);
151 int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS)
154 struct xgi_cmd_info cmd_list;
155 struct xgi_info *info = dev->dev_private;
157 DRM_COPY_FROM_USER_IOCTL(cmd_list,
158 (struct xgi_cmd_info __user *) data,
161 if (cmd_list.type > BTYPE_CTRL) {
162 return DRM_ERR(EINVAL);
165 xgi_submit_cmdlist(info, &cmd_list);
176 int xgi_state_change(struct xgi_info * info, unsigned int to,
179 #define STATE_CONSOLE 0
180 #define STATE_GRAPHIC 1
181 #define STATE_FBTERM 2
182 #define STATE_LOGOUT 3
183 #define STATE_REBOOT 4
184 #define STATE_SHUTDOWN 5
186 if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) {
187 DRM_INFO("[kd] I see, now is to leaveVT\n");
188 // stop to received batch
189 } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) {
190 DRM_INFO("[kd] I see, now is to enterVT\n");
192 } else if ((from == STATE_GRAPHIC)
193 && ((to == STATE_LOGOUT)
194 || (to == STATE_REBOOT)
195 || (to == STATE_SHUTDOWN))) {
196 DRM_INFO("[kd] I see, not is to exit from X\n");
197 // stop to received batch
199 DRM_ERROR("[kd] Should not happen\n");
200 return DRM_ERR(EINVAL);
207 int xgi_state_change_ioctl(DRM_IOCTL_ARGS)
210 struct xgi_state_info state;
211 struct xgi_info *info = dev->dev_private;
213 DRM_COPY_FROM_USER_IOCTL(state, (struct xgi_state_info __user *) data,
216 return xgi_state_change(info, state._toState, state._fromState);
220 void xgi_cmdlist_reset(void)
222 s_cmdring._lastBatchStartAddr = 0;
223 s_cmdring._cmdRingOffset = 0;
226 void xgi_cmdlist_cleanup(struct xgi_info * info)
228 if (s_cmdring._cmdRingBuffer != 0) {
229 xgi_pcie_free(info, s_cmdring._cmdRingAllocOffset, NULL);
230 s_cmdring._cmdRingBuffer = 0;
231 s_cmdring._cmdRingOffset = 0;
232 s_cmdring._cmdRingSize = 0;
236 static void triggerHWCommandList(struct xgi_info * info)
238 static unsigned int s_triggerID = 1;
240 dwWriteReg(info->mmio_map,
241 BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
242 0x05000000 + (0x0ffff & s_triggerID++));
246 static void addFlush2D(struct xgi_info * info)
248 u32 *flushBatchVirtAddr;
249 u32 flushBatchHWAddr;
250 u32 *lastBatchVirtAddr;
252 /* check buf is large enough to contain a new flush batch */
253 if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize) {
254 s_cmdring._cmdRingOffset = 0;
257 flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset;
258 flushBatchVirtAddr = xgi_find_pcie_virt(info, flushBatchHWAddr);
260 /* not using memcpy for I assume the address is discrete */
261 *(flushBatchVirtAddr + 0) = 0x10000000;
262 *(flushBatchVirtAddr + 1) = 0x80000004; /* size = 0x04 dwords */
263 *(flushBatchVirtAddr + 2) = 0x00000000;
264 *(flushBatchVirtAddr + 3) = 0x00000000;
265 *(flushBatchVirtAddr + 4) = FLUSH_2D;
266 *(flushBatchVirtAddr + 5) = FLUSH_2D;
267 *(flushBatchVirtAddr + 6) = FLUSH_2D;
268 *(flushBatchVirtAddr + 7) = FLUSH_2D;
270 // ASSERT(s_cmdring._lastBatchStartAddr != NULL);
272 xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr);
274 lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08;
275 lastBatchVirtAddr[2] = flushBatchHWAddr >> 4;
276 lastBatchVirtAddr[3] = 0;
278 lastBatchVirtAddr[0] = (get_batch_command(BTYPE_CTRL) << 24)
279 | (BEGIN_VALID_MASK);
281 triggerHWCommandList(info);
283 s_cmdring._cmdRingOffset += 0x20;
284 s_cmdring._lastBatchStartAddr = flushBatchHWAddr;