Add an emergency pinnable memory quota for root-only processes.
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Thu, 17 Jan 2008 12:10:36 +0000 (13:10 +0100)
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Wed, 12 Mar 2008 09:10:03 +0000 (10:10 +0100)
linux-core/drmP.h
linux-core/drm_memory.c
linux-core/drm_proc.c

index 1fea807..2f76f3d 100644 (file)
@@ -1001,8 +1001,10 @@ extern int drm_unbind_agp(DRM_AGP_MEM * handle);
 extern void drm_free_memctl(size_t size);
 extern int drm_alloc_memctl(size_t size);
 extern void drm_query_memctl(uint64_t *cur_used,
+                            uint64_t *emer_used,
                             uint64_t *low_threshold,
-                            uint64_t *high_threshold);
+                            uint64_t *high_threshold,
+                            uint64_t *emer_threshold);
 extern void drm_init_memctl(size_t low_threshold,
                            size_t high_threshold,
                            size_t unit_size);
index 402a680..1a6c48d 100644 (file)
 static struct {
        spinlock_t lock;
        uint64_t cur_used;
+       uint64_t emer_used;
        uint64_t low_threshold;
        uint64_t high_threshold;
+       uint64_t emer_threshold;
 } drm_memctl = {
        .lock = SPIN_LOCK_UNLOCKED
 };
@@ -59,14 +61,30 @@ static inline size_t drm_size_align(size_t size)
 
 int drm_alloc_memctl(size_t size)
 {
-       int ret;
+       int ret = 0;
        unsigned long a_size = drm_size_align(size);
+       unsigned long new_used = drm_memctl.cur_used + a_size;
 
        spin_lock(&drm_memctl.lock);
-       ret = ((drm_memctl.cur_used + a_size) > drm_memctl.high_threshold) ?
-               -ENOMEM : 0;
-       if (!ret)
-               drm_memctl.cur_used += a_size;
+       if (unlikely(new_used > drm_memctl.high_threshold)) {
+               if (!DRM_SUSER(DRM_CURPROC) ||
+                   (new_used + drm_memctl.emer_used > drm_memctl.emer_threshold) ||
+                   (a_size > 2*PAGE_SIZE)) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               /*
+                * Allow small root-only allocations, even if the
+                * high threshold is exceeded.
+                */
+
+               new_used -= drm_memctl.high_threshold;
+               drm_memctl.emer_used += new_used;
+               a_size -= new_used;
+       }
+       drm_memctl.cur_used += a_size;
+out:
        spin_unlock(&drm_memctl.lock);
        return ret;
 }
@@ -77,19 +95,30 @@ void drm_free_memctl(size_t size)
        unsigned long a_size = drm_size_align(size);
 
        spin_lock(&drm_memctl.lock);
+       if (likely(a_size >= drm_memctl.emer_used)) {
+               a_size -= drm_memctl.emer_used;
+               drm_memctl.emer_used = 0;
+       } else {
+               drm_memctl.emer_used -= a_size;
+               a_size = 0;
+       }
        drm_memctl.cur_used -= a_size;
        spin_unlock(&drm_memctl.lock);
 }
 EXPORT_SYMBOL(drm_free_memctl);
 
 void drm_query_memctl(uint64_t *cur_used,
+                     uint64_t *emer_used,
                      uint64_t *low_threshold,
-                     uint64_t *high_threshold)
+                     uint64_t *high_threshold,
+                     uint64_t *emer_threshold)
 {
        spin_lock(&drm_memctl.lock);
        *cur_used = drm_memctl.cur_used;
+       *emer_used = drm_memctl.emer_used;
        *low_threshold = drm_memctl.low_threshold;
        *high_threshold = drm_memctl.high_threshold;
+       *emer_threshold = drm_memctl.emer_threshold;
        spin_unlock(&drm_memctl.lock);
 }
 EXPORT_SYMBOL(drm_query_memctl);
@@ -99,9 +128,12 @@ void drm_init_memctl(size_t p_low_threshold,
                     size_t unit_size)
 {
        spin_lock(&drm_memctl.lock);
+       drm_memctl.emer_used = 0;
        drm_memctl.cur_used = 0;
        drm_memctl.low_threshold = p_low_threshold * unit_size;
        drm_memctl.high_threshold = p_high_threshold * unit_size;
+       drm_memctl.emer_threshold = (drm_memctl.high_threshold >> 4) +
+               drm_memctl.high_threshold;
        spin_unlock(&drm_memctl.lock);
 }
 
index 67afee8..42da5c6 100644 (file)
@@ -445,9 +445,10 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
        struct drm_buffer_manager *bm = &dev->bm;
        struct drm_fence_manager *fm = &dev->fm;
        uint64_t used_mem;
+       uint64_t used_emer;
        uint64_t low_mem;
        uint64_t high_mem;
-
+       uint64_t emer_mem;
 
        if (offset > DRM_PROC_LIMIT) {
                *eof = 1;
@@ -476,7 +477,7 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
                DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n");
        }
 
-       drm_query_memctl(&used_mem, &low_mem, &high_mem);
+       drm_query_memctl(&used_mem, &used_emer, &low_mem, &high_mem, &emer_mem);
 
        if (used_mem > 16*PAGE_SIZE) {
                DRM_PROC_PRINT("Used object memory is %lu pages.\n",
@@ -485,10 +486,19 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
                DRM_PROC_PRINT("Used object memory is %lu bytes.\n",
                               (unsigned long) used_mem);
        }
+       if (used_emer > 16*PAGE_SIZE) {
+               DRM_PROC_PRINT("Used emergency memory is %lu pages.\n",
+                              (unsigned long) (used_emer >> PAGE_SHIFT));
+       } else {
+               DRM_PROC_PRINT("Used emergency memory is %lu bytes.\n\n",
+                              (unsigned long) used_emer);
+       }
        DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
                       (unsigned long) (low_mem >> PAGE_SHIFT));
        DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
                       (unsigned long) (high_mem >> PAGE_SHIFT));
+       DRM_PROC_PRINT("Emergency root only memory usage threshold is %lu pages.\n",
+                      (unsigned long) (emer_mem >> PAGE_SHIFT));
 
        DRM_PROC_PRINT("\n");