2 *-----------------------------------------------------------------------------
5 *-----------------------------------------------------------------------------
6 * Copyright (c) 2002-2010, Intel Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 *-----------------------------------------------------------------------------
29 *-----------------------------------------------------------------------------
31 #include <igd_debug.h>
36 #include <asm/cacheflush.h>
37 #include <linux/version.h>
41 #define SCR1 0x71410 /* scratch register set by vbios indicating status*/
42 #define SCR2 0x71418 /* scratch register set by vbios indicating amount of stolen memory */
43 #define FW_ID 0xE1DF0000 /* firmware identifier */
44 #define ST_BIT 0x00000004 /* bit2- stolen memory bit */
45 #define PSB_PTE_VALID 0x0001
47 void emgd_free_pages(gmm_mem_buffer_t *mem);
49 static DEFINE_MUTEX(client_sem);
50 static DEFINE_MUTEX(gtt_sem);
52 struct client_list_struct {
53 struct list_head list;
54 struct vm_area_struct *vma;
58 static LIST_HEAD(client_list);
60 static void ipi_handler(void *null) {
61 //flush_agp_cache(); /* from agp.h */
65 static void emgd_cache_flush(void) {
66 if (on_each_cpu(ipi_handler, NULL, 1) != 0)
67 panic(PFX "timed out waiting for the other CPUs!\n");
70 static void tlb_flush(void) {
71 /* If needed, this would flush the SGX/MSVDX mmu TLB's */
74 /* FIXME - TURNS OUT THAT THIS FUNCTION IS CURRENTLY A NOOP, BECAUSE
75 * client_list NEVER HAS ANYTHING ADDED TO IT.
77 static void invalidate_vma(unsigned long pg_offset, unsigned long bus_addr) {
79 struct list_head *tmp;
80 struct client_list_struct *entry;
81 unsigned long addr_start=0;
82 unsigned long addr_end=0;
83 unsigned long addr_offset=0;
90 mutex_lock(&client_sem);
91 list_for_each(tmp, &client_list) {
92 entry = list_entry(tmp, struct client_list_struct, list);
95 * We need to handle invalidating VMA's that are only mapping
96 * a portion of the virtual aperture. Calculate what if
97 * any invalidated pages need to be zapped
99 addr_start = (entry->vma->vm_pgoff << PAGE_SHIFT)
101 addr_end = addr_start + (entry->vma->vm_end - entry->vma->vm_start);
102 addr_offset = pg_offset << PAGE_SHIFT;
104 vaddr = entry->vma->vm_start + (addr_offset - addr_start);
113 * Look up page table entries for all VMAs that currently
114 * have the virtual aperture mapped -- to see if the page
117 pgd = pgd_offset(entry->vma->vm_mm, vaddr);
118 if (!pgd_none(*pgd)) {
119 pud = pud_offset(pgd, vaddr);
120 if (!pud_none(*pud)) {
121 pmd = pmd_offset(pud, vaddr);
122 if (!pmd_none(*pmd)) {
123 pte = pte_offset_map(pmd, vaddr);
124 if (!pte_none(*pte)) {
131 * Only zap a page if it falls within the mapped region
132 * and it has previously faulted
134 if (zap && (addr_offset >= addr_start) &&
135 (addr_offset < addr_end)) {
138 if (!page_mapcount(pte_page(*pte))) {
139 printk(KERN_ERR "[EMGD] ERROR No mapcount\n");
140 printk(KERN_ALERT "[EMGD] ZR %p %08lX %d %d %p\n",
142 pte_page(*pte)->flags, page_count(pte_page(*pte)),
143 page_mapcount(pte_page(*pte)), pte_page(*pte)->mapping);
145 atomic_add_negative(-1, &pte_page(*pte)->_mapcount);
146 put_page(pte_page(*pte));
147 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
148 dec_mm_counter(entry->vma->vm_mm, file_rss);
150 dec_mm_counter(entry->vma->vm_mm, MM_FILEPAGES);
154 pte_clear(entry->vma->vm_mm, vaddr, pte);
161 mutex_unlock(&client_sem);
167 * Allocate pages from the kernel and store in a page list.
169 gmm_mem_buffer_t *emgd_alloc_pages(unsigned long num_pages, int type) {
170 gmm_mem_buffer_t *mem;
176 mem = (gmm_mem_buffer_t *)kzalloc(sizeof(gmm_mem_buffer_t), GFP_KERNEL);
181 /* First allocate page array */
182 list_size = num_pages * sizeof(struct page *);
183 mem->vmalloc_flag = false;
185 if (list_size <= (2 * PAGE_SIZE)) {
186 mem->pages = kmalloc(list_size, GFP_KERNEL | __GFP_NORETRY);
189 if (mem->pages == NULL) {
190 mem->pages = vmalloc(list_size);
191 mem->vmalloc_flag = true;
194 if (mem->pages == NULL) {
195 printk(KERN_ERR "[EMGD] Failed to allocate memory info struct.\n");
201 * If we need phyical contiguous memory, then do things differently.
202 * Call alloc_pages(GFP_KERNEL, pages) to allocate all the pages.
203 * The page structure returned is just the first page of the group.
204 * ? is it a virtual address ?
206 * mem->pages[0] = virt_to_phys(page)
207 * mem->pages[1] = mem->pages[0] + PAGE_SIZE
210 if ((type == 1) || (type == 0)) {
211 /* Next allocate the pages */
212 for (i = 0; i < num_pages; i++) {
213 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
216 printk(KERN_ERR "[EMGD] Memory allocation failure!\n");
217 if (mem->vmalloc_flag) {
226 /* Make sure this page isn't cached */
227 if (set_memory_uc((unsigned long) page_address(page), 1) < 0) {
228 printk(KERN_ERR "[EMGD] Unable to set page attributes for newly"
229 " allocated graphics memory.\n");
230 /* Rely on the fact that we've kept up the data structures: */
231 emgd_free_pages(mem);
232 /* XXX - THIS IS WHAT SOME OLD IEGD CODE DID--A GOOD IDEA??? */
233 set_memory_wb((unsigned long) page_address(page), 1);
239 mem->pages[i] = page;
243 if (num_pages == 1) {
245 } else if (num_pages == 4) {
247 } else if (num_pages == 8) {
250 printk(KERN_ERR "[EMGD] Page count is not valid for physical "
252 if (mem->vmalloc_flag) {
261 page = alloc_pages(GFP_KERNEL, order);
264 printk(KERN_ERR "[EMGD] Memory allocation failure!\n");
265 if (mem->vmalloc_flag) {
273 /* Make sure these pages aren't cached */
274 if (set_memory_uc((unsigned long) page_address(page),
276 printk(KERN_ERR "[EMGD] Unable to set page attributes for newly"
277 " allocated physical graphics memory.\n");
278 /* XXX - THIS IS WHAT SOME OLD IEGD CODE DID--A GOOD IDEA??? */
279 set_memory_wb((unsigned long) page_address(page), num_pages);
280 __free_pages(page, num_pages);
281 if (mem->vmalloc_flag) {
291 mem->pages[0] = page;
293 for (i = 1; i < num_pages; i++) {
294 mem->pages[i] = mem->pages[i-1] + 1;
297 mem->physical = page_to_phys(mem->pages[0]);
298 mem->page_count = num_pages;
308 void emgd_free_pages(gmm_mem_buffer_t *mem) {
312 for (i = 0; i < mem->page_count; i++) {
313 page = mem->pages[i];
314 /* XXX - THIS IS WHAT SOME OLD IEGD CODE DID--A GOOD IDEA??? */
315 set_memory_wb((unsigned long) page_address(page), 1);
318 mem->pages[i] = NULL;
321 if (mem->vmalloc_flag) {
332 * Need a function to populate the GTT with the pages.
334 * The caller provides the offset into the GTT where the memory needs
335 * to go. This simply needs to allocate the pages and insert them
338 void emgd_gtt_insert(igd_context_t *context,
339 gmm_mem_buffer_t *mem,
340 unsigned long offset)
344 unsigned long pg_off;
347 pg_off = offset >> PAGE_SHIFT;
349 /* Check that the offset is within the gtt's range */
350 if ((pg_off + mem->page_count) > context->device_context.gatt_pages) {
351 printk(KERN_ERR "[EMGD] Attempt to insert a offset beyond of GTT range.\n");
355 /* Flush before inserting pages into the GTT */
360 /* Insert the pages into the GTT */
361 mutex_lock(>t_sem);
362 for (i = 0, j = pg_off; i < mem->page_count; i++, j++) {
363 page = mem->pages[i];
365 /* Mark the page as valid */
366 pte = page_to_phys(page) | PSB_PTE_VALID;
367 writel(pte, (context->device_context.virt_gttadr + j));
368 readl(context->device_context.virt_gttadr + j);
372 mutex_unlock(>t_sem);
374 (void)readl(context->device_context.virt_gttadr + j - 1);
376 /* Invalidate VMA's */
378 (context->device_context.gmch_ctl | PCI_BASE_ADDRESS_MEM_MASK));
388 * Need a function to remove pages from the GTT (and replace with the
389 * scratch page?) and free the pages.
392 void emgd_gtt_remove(igd_context_t *context,
393 gmm_mem_buffer_t *mem,
394 unsigned long offset)
399 unsigned long pg_start;
401 pg_start = offset >> PAGE_SHIFT;
403 /* Flush before inserting pages into the GTT */
407 mutex_lock(>t_sem);
409 page = context->device_context.scratch_page;
410 pte = page_to_phys(page) | PSB_PTE_VALID;
412 /* Insert the scratch page into the GTT */
413 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
414 /* FIXME: Apparently we don't really need to copy stolen memory pages.
415 * If so, what should we do about the following code? Is it correct to
418 if (i < context->device_context.stolen_pages) {
419 /* This is stolen memory.... */
421 writel(pte, context->device_context.virt_gttadr + i);
422 (void)readl(context->device_context.virt_gttadr + i);
427 mutex_unlock(>t_sem);
429 /* Invaidate VMA's */
431 (context->device_context.gmch_ctl | PCI_BASE_ADDRESS_MEM_MASK));