Add a compat kmap_atomic_prot_pfn to do quick kernel map / unmaps of
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Thu, 28 Feb 2008 12:47:15 +0000 (13:47 +0100)
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Thu, 28 Feb 2008 13:06:46 +0000 (14:06 +0100)
PCI- or high memory.
This is substantially more efficient than drm_bo_kmap,
since the mapping only lives on a single processor.
Unmapping is done use kunmap_atomic(). Flushes only a single tlb() entry.

Add a support utility int drm_bo_pfn_prot() that returns the
pfn and desired page protection for a given bo offset.

This is all intended for relocations in bound TTMS or vram.
Mapping-accessing-unmapping must be atomic, either using preempt_xx() macros
or a spinlock.

linux-core/drm_bo_move.c
linux-core/drm_compat.c
linux-core/drm_compat.h
linux-core/drm_objects.h

index b06a09f..30e0f43 100644 (file)
@@ -595,3 +595,36 @@ void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
        map->page = NULL;
 }
 EXPORT_SYMBOL(drm_bo_kunmap);
+
+int drm_bo_pfn_prot(struct drm_buffer_object *bo,
+                   unsigned long dst_offset,
+                   unsigned long *pfn,
+                   pgprot_t *prot)
+{
+       struct drm_bo_mem_reg *mem = &bo->mem;
+       struct drm_device *dev = bo->dev;
+       unsigned long bus_offset;
+       unsigned long bus_size;
+       unsigned long bus_base;
+       struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
+       int ret;
+
+       ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset,
+                               &bus_size);
+       if (ret)
+               return -EINVAL;
+
+       if (bus_size != 0)
+               *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
+       else if (!bo->ttm)
+               return -EINVAL;
+       else
+               *pfn = page_to_pfn(drm_ttm_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
+
+       *prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
+               PAGE_KERNEL : drm_kernel_io_prot(man->drm_bus_maptype);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_bo_pfn_prot);
+
index a745a7d..32e43a0 100644 (file)
@@ -729,3 +729,35 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
 }
 EXPORT_SYMBOL(idr_replace);
 #endif
+
+#if defined(CONFIG_X86)
+
+#define drm_kmap_get_fixmap_pte(vaddr)                                 \
+       pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
+
+void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
+                          pgprot_t protection)
+{
+       enum fixed_addresses idx;
+       unsigned long vaddr;
+       static pte_t *km_pte;
+       static int initialized = 0;
+
+       if (unlikely(!initialized)) {
+               km_pte = drm_kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
+               initialized = 1;
+       }
+
+       pagefault_disable();
+       idx = type + KM_TYPE_NR*smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+       set_pte(km_pte-idx, pfn_pte(pfn, protection));
+
+       return (void*) vaddr;
+}
+
+EXPORT_SYMBOL(kmap_atomic_prot_pfn);
+
+#endif
+
+
index f8933e0..39027cf 100644 (file)
@@ -328,4 +328,9 @@ void *idr_replace(struct idr *idp, void *ptr, int id);
 typedef _Bool                   bool;
 #endif
 
+#if defined(CONFIG_X86)
+#define DRM_KMAP_ATOMIC_PROT_PFN
+extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
+                                 pgprot_t protection);
+#endif
 #endif
index e43e8df..8055afe 100644 (file)
@@ -738,6 +738,10 @@ static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
 extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
 extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
                       unsigned long num_pages, struct drm_bo_kmap_obj *map);
+extern int drm_bo_pfn_prot(struct drm_buffer_object *bo,
+                          unsigned long dst_offset,
+                          unsigned long *pfn,
+                          pgprot_t *prot);
 
 
 /*