2 /****************************************************************************
3 * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
5 * All Rights Reserved. *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation on the rights to use, copy, modify, merge,
11 * publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so,
13 * subject to the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial
17 * portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22 * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
23 * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 ***************************************************************************/
32 #include "xgi_cmdlist.h"
34 static void addFlush2D(struct xgi_info * info);
35 static unsigned int get_batch_command(enum xgi_batch_type type);
36 static void triggerHWCommandList(struct xgi_info * info);
37 static void xgi_cmdlist_reset(struct xgi_info * info);
39 int xgi_cmdlist_initialize(struct xgi_info * info, size_t size)
41 struct xgi_mem_alloc mem_alloc = {
47 err = xgi_pcie_alloc(info, &mem_alloc, 0);
52 info->cmdring._cmdRingSize = mem_alloc.size;
53 info->cmdring._cmdRingBuffer = mem_alloc.hw_addr;
54 info->cmdring._cmdRingAllocOffset = mem_alloc.offset;
55 info->cmdring._lastBatchStartAddr = 0;
56 info->cmdring._cmdRingOffset = 0;
63 * get_batch_command - Get the command ID for the current begin type.
64 * @type: Type of the current batch
66 * See section 3.2.2 "Begin" (page 15) of the 3D SPG.
68 * This function assumes that @type is on the range [0,3].
70 unsigned int get_batch_command(enum xgi_batch_type type)
72 static const unsigned int ports[4] = {
73 0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2
80 static void xgi_submit_cmdlist(struct xgi_info * info,
81 const struct xgi_cmd_info * pCmdInfo)
83 const unsigned int cmd = get_batch_command(pCmdInfo->type);
87 begin[0] = (cmd << 24) | BEGIN_VALID_MASK
88 | (BEGIN_BEGIN_IDENTIFICATION_MASK & pCmdInfo->id);
89 begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->size;
90 begin[2] = pCmdInfo->hw_addr >> 4;
93 if (info->cmdring._lastBatchStartAddr == 0) {
94 const unsigned int portOffset = BASE_3D_ENG + (cmd << 2);
97 /* Enable PCI Trigger Mode
99 DRM_INFO("Enable PCI Trigger Mode \n");
101 dwWriteReg(info->mmio_map,
102 BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
103 (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
104 M2REG_CLEAR_COUNTERS_MASK | 0x08 |
105 M2REG_PCI_TRIGGER_MODE_MASK);
107 dwWriteReg(info->mmio_map,
108 BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
109 (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 |
110 M2REG_PCI_TRIGGER_MODE_MASK);
113 /* Send PCI begin command
115 DRM_INFO("portOffset=%d, beginPort=%d\n",
116 portOffset, cmd << 2);
118 dwWriteReg(info->mmio_map, portOffset, begin[0]);
119 dwWriteReg(info->mmio_map, portOffset + 4, begin[1]);
120 dwWriteReg(info->mmio_map, portOffset + 8, begin[2]);
121 dwWriteReg(info->mmio_map, portOffset + 12, begin[3]);
123 u32 *lastBatchVirtAddr;
125 DRM_INFO("info->cmdring._lastBatchStartAddr != 0\n");
127 if (pCmdInfo->type == BTYPE_3D) {
132 xgi_find_pcie_virt(info,
133 info->cmdring._lastBatchStartAddr);
135 lastBatchVirtAddr[1] = begin[1];
136 lastBatchVirtAddr[2] = begin[2];
137 lastBatchVirtAddr[3] = begin[3];
139 lastBatchVirtAddr[0] = begin[0];
141 triggerHWCommandList(info);
144 info->cmdring._lastBatchStartAddr = pCmdInfo->hw_addr;
145 DRM_INFO("%s: exit\n", __func__);
149 int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS)
152 struct xgi_cmd_info cmd_list;
153 struct xgi_info *info = dev->dev_private;
155 DRM_COPY_FROM_USER_IOCTL(cmd_list,
156 (struct xgi_cmd_info __user *) data,
159 if (cmd_list.type > BTYPE_CTRL) {
160 return DRM_ERR(EINVAL);
163 xgi_submit_cmdlist(info, &cmd_list);
174 int xgi_state_change(struct xgi_info * info, unsigned int to,
177 #define STATE_CONSOLE 0
178 #define STATE_GRAPHIC 1
179 #define STATE_FBTERM 2
180 #define STATE_LOGOUT 3
181 #define STATE_REBOOT 4
182 #define STATE_SHUTDOWN 5
184 if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) {
185 DRM_INFO("[kd] I see, now is to leaveVT\n");
186 // stop to received batch
187 } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) {
188 DRM_INFO("[kd] I see, now is to enterVT\n");
189 xgi_cmdlist_reset(info);
190 } else if ((from == STATE_GRAPHIC)
191 && ((to == STATE_LOGOUT)
192 || (to == STATE_REBOOT)
193 || (to == STATE_SHUTDOWN))) {
194 DRM_INFO("[kd] I see, not is to exit from X\n");
195 // stop to received batch
197 DRM_ERROR("[kd] Should not happen\n");
198 return DRM_ERR(EINVAL);
205 int xgi_state_change_ioctl(DRM_IOCTL_ARGS)
208 struct xgi_state_info state;
209 struct xgi_info *info = dev->dev_private;
211 DRM_COPY_FROM_USER_IOCTL(state, (struct xgi_state_info __user *) data,
214 return xgi_state_change(info, state._toState, state._fromState);
218 void xgi_cmdlist_reset(struct xgi_info * info)
220 info->cmdring._lastBatchStartAddr = 0;
221 info->cmdring._cmdRingOffset = 0;
224 void xgi_cmdlist_cleanup(struct xgi_info * info)
226 if (info->cmdring._cmdRingBuffer != 0) {
227 xgi_pcie_free(info, info->cmdring._cmdRingAllocOffset, NULL);
228 info->cmdring._cmdRingBuffer = 0;
229 info->cmdring._cmdRingOffset = 0;
230 info->cmdring._cmdRingSize = 0;
234 static void triggerHWCommandList(struct xgi_info * info)
236 static unsigned int s_triggerID = 1;
238 dwWriteReg(info->mmio_map,
239 BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
240 0x05000000 + (0x0ffff & s_triggerID++));
244 static void addFlush2D(struct xgi_info * info)
246 u32 *flushBatchVirtAddr;
247 u32 flushBatchHWAddr;
248 u32 *lastBatchVirtAddr;
250 /* check buf is large enough to contain a new flush batch */
251 if ((info->cmdring._cmdRingOffset + 0x20) >= info->cmdring._cmdRingSize) {
252 info->cmdring._cmdRingOffset = 0;
255 flushBatchHWAddr = info->cmdring._cmdRingBuffer + info->cmdring._cmdRingOffset;
256 flushBatchVirtAddr = xgi_find_pcie_virt(info, flushBatchHWAddr);
258 /* not using memcpy for I assume the address is discrete */
259 *(flushBatchVirtAddr + 0) = 0x10000000;
260 *(flushBatchVirtAddr + 1) = 0x80000004; /* size = 0x04 dwords */
261 *(flushBatchVirtAddr + 2) = 0x00000000;
262 *(flushBatchVirtAddr + 3) = 0x00000000;
263 *(flushBatchVirtAddr + 4) = FLUSH_2D;
264 *(flushBatchVirtAddr + 5) = FLUSH_2D;
265 *(flushBatchVirtAddr + 6) = FLUSH_2D;
266 *(flushBatchVirtAddr + 7) = FLUSH_2D;
268 // ASSERT(info->cmdring._lastBatchStartAddr != NULL);
270 xgi_find_pcie_virt(info, info->cmdring._lastBatchStartAddr);
272 lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08;
273 lastBatchVirtAddr[2] = flushBatchHWAddr >> 4;
274 lastBatchVirtAddr[3] = 0;
276 lastBatchVirtAddr[0] = (get_batch_command(BTYPE_CTRL) << 24)
277 | (BEGIN_VALID_MASK);
279 triggerHWCommandList(info);
281 info->cmdring._cmdRingOffset += 0x20;
282 info->cmdring._lastBatchStartAddr = flushBatchHWAddr;