1 /**************************************************************************
3 * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * This file implements a simple replacement for the buffer manager use
33 * of the heavyweight hardware lock.
34 * The lock is a read-write lock. Taking it in read mode is fast, and
35 * intended for in-kernel use only.
36 * Taking it in write mode is slow.
38 * The write mode is used only when there is a need to block all
39 * user-space processes from allocating a
41 * Typical use in write mode is X server VT switching, and it's allowed
42 * to leave kernel space with the write lock held. If a user-space process
43 * dies while having the write-lock, it will be released during the file
46 * The read lock is typically placed at the start of an IOCTL- or
47 * user-space callable function that may end up allocating a memory area.
48 * This includes setstatus, super-ioctls and no_pfn; the latter may move
49 * unmappable regions to mappable. It's a bug to leave kernel space with the
52 * Both read- and write lock taking may be interruptible for low signal-delivery
53 * latency. The locking functions will return -EAGAIN if interrupted by a
56 * Locking order: The lock should be taken BEFORE any kernel mutexes
62 void drm_bo_init_lock(struct drm_bo_lock *lock)
64 DRM_INIT_WAITQUEUE(&lock->queue);
65 atomic_set(&lock->write_lock_pending, 0);
66 atomic_set(&lock->readers, 0);
69 void drm_bo_read_unlock(struct drm_bo_lock *lock)
71 if (atomic_dec_and_test(&lock->readers))
72 wake_up_all(&lock->queue);
74 EXPORT_SYMBOL(drm_bo_read_unlock);
76 int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible)
78 while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
82 wait_event(lock->queue,
83 atomic_read(&lock->write_lock_pending) == 0);
86 ret = wait_event_interruptible
87 (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
92 while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
95 wait_event(lock->queue,
96 atomic_read(&lock->readers) != -1);
99 ret = wait_event_interruptible
100 (lock->queue, atomic_read(&lock->readers) != -1);
106 EXPORT_SYMBOL(drm_bo_read_lock);
108 static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
110 if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
112 wake_up_all(&lock->queue);
116 static void drm_bo_write_lock_remove(struct drm_file *file_priv,
117 struct drm_user_object *item)
119 struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base);
122 ret = __drm_bo_write_unlock(lock);
126 int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible,
127 struct drm_file *file_priv)
130 struct drm_device *dev;
132 atomic_inc(&lock->write_lock_pending);
134 while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
135 if (!interruptible) {
136 wait_event(lock->queue,
137 atomic_read(&lock->readers) == 0);
140 ret = wait_event_interruptible
141 (lock->queue, atomic_read(&lock->readers) == 0);
144 atomic_dec(&lock->write_lock_pending);
145 wake_up_all(&lock->queue);
151 * Add a dummy user-object, the destructor of which will
152 * make sure the lock is released if the client dies
156 if (atomic_dec_and_test(&lock->write_lock_pending))
157 wake_up_all(&lock->queue);
158 dev = file_priv->minor->dev;
159 mutex_lock(&dev->struct_mutex);
160 ret = drm_add_user_object(file_priv, &lock->base, 0);
161 lock->base.remove = &drm_bo_write_lock_remove;
162 lock->base.type = drm_lock_type;
164 (void)__drm_bo_write_unlock(lock);
166 mutex_unlock(&dev->struct_mutex);
171 int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv)
173 struct drm_device *dev = file_priv->minor->dev;
174 struct drm_ref_object *ro;
176 mutex_lock(&dev->struct_mutex);
178 if (lock->base.owner != file_priv) {
179 mutex_unlock(&dev->struct_mutex);
182 ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE);
184 drm_remove_ref_object(file_priv, ro);
185 lock->base.owner = NULL;
187 mutex_unlock(&dev->struct_mutex);