2 * Copyright 2007 Dave Airlie.
3 * Copyright 2007 Jérôme Glisse
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
27 * Jerome Glisse <glisse@freedesktop.org>
29 #include "radeon_ms.h"
30 #include "amd_legacy_fence.h"
32 #define R3XX_FENCE_SEQUENCE_RW_FLUSH 0x80000000u
34 static inline int r3xx_fence_emit_sequence(struct drm_device *dev,
35 struct drm_radeon_private *dev_priv,
38 struct legacy_fence *r3xx_fence = dev_priv->fence;
42 if (sequence & R3XX_FENCE_SEQUENCE_RW_FLUSH) {
43 r3xx_fence->sequence_last_flush =
44 sequence & ~R3XX_FENCE_SEQUENCE_RW_FLUSH;
45 /* Ask flush for VERTEX & FRAGPROG pipeline
47 /* FIXME: proper flush */
49 dev_priv->flush_cache(dev);
52 cmd[0] = CP_PACKET0(dev_priv->fence_reg, 0);
54 for (i = 0; i < dev_priv->usec_timeout; i++) {
55 r = radeon_ms_ring_emit(dev, cmd, 2);
57 dev_priv->irq_emit(dev);
64 static inline uint32_t r3xx_fence_sequence(struct legacy_fence *r3xx_fence)
66 r3xx_fence->sequence += 1;
67 if (unlikely(r3xx_fence->sequence > 0x7fffffffu)) {
68 r3xx_fence->sequence = 1;
70 return r3xx_fence->sequence;
73 static inline void r3xx_fence_report(struct drm_device *dev,
74 struct drm_radeon_private *dev_priv,
75 struct legacy_fence *r3xx_fence)
77 uint32_t fence_types = DRM_FENCE_TYPE_EXE;
80 if (dev_priv == NULL) {
83 sequence = mmio_read(dev_priv, dev_priv->fence_reg);
84 DRM_INFO("%s pass fence 0x%08x\n", __func__, sequence);
85 if (sequence & R3XX_FENCE_SEQUENCE_RW_FLUSH) {
86 sequence &= ~R3XX_FENCE_SEQUENCE_RW_FLUSH;
87 fence_types |= DRM_AMD_FENCE_TYPE_R;
88 fence_types |= DRM_AMD_FENCE_TYPE_W;
89 if (sequence == r3xx_fence->sequence_last_flush) {
90 r3xx_fence->sequence_last_flush = 0;
93 drm_fence_handler(dev, 0, sequence, fence_types, 0);
94 r3xx_fence->sequence_last_reported = sequence;
97 static void r3xx_fence_flush(struct drm_device *dev, uint32_t class)
99 struct drm_radeon_private *dev_priv = dev->dev_private;
100 struct legacy_fence *r3xx_fence = dev_priv->fence;
103 sequence = r3xx_fence_sequence(r3xx_fence);
104 sequence |= R3XX_FENCE_SEQUENCE_RW_FLUSH;
105 r3xx_fence_emit_sequence(dev, dev_priv, sequence);
108 static void r3xx_fence_poll(struct drm_device *dev, uint32_t fence_class,
109 uint32_t waiting_types)
111 struct drm_radeon_private *dev_priv = dev->dev_private;
112 struct drm_fence_manager *fm = &dev->fm;
113 struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
114 struct legacy_fence *r3xx_fence = dev_priv->fence;
116 if (unlikely(!dev_priv)) {
119 /* if there is a RW flush pending then submit new sequence
120 * preceded by flush cmds */
121 if (fc->pending_flush & (DRM_AMD_FENCE_TYPE_R | DRM_AMD_FENCE_TYPE_W)) {
122 r3xx_fence_flush(dev, 0);
123 fc->pending_flush &= ~DRM_AMD_FENCE_TYPE_R;
124 fc->pending_flush &= ~DRM_AMD_FENCE_TYPE_W;
126 r3xx_fence_report(dev, dev_priv, r3xx_fence);
130 static int r3xx_fence_emit(struct drm_device *dev, uint32_t class,
131 uint32_t flags, uint32_t *sequence,
132 uint32_t *native_type)
134 struct drm_radeon_private *dev_priv = dev->dev_private;
135 struct legacy_fence *r3xx_fence = dev_priv->fence;
138 if (!dev_priv || dev_priv->cp_ready != 1) {
141 *sequence = tmp = r3xx_fence_sequence(r3xx_fence);
142 *native_type = DRM_FENCE_TYPE_EXE;
143 if (flags & DRM_AMD_FENCE_FLAG_FLUSH) {
144 *native_type |= DRM_AMD_FENCE_TYPE_R;
145 *native_type |= DRM_AMD_FENCE_TYPE_W;
146 tmp |= R3XX_FENCE_SEQUENCE_RW_FLUSH;
148 DRM_INFO("%s emit fence 0x%08x\n", __func__, tmp);
149 return r3xx_fence_emit_sequence(dev, dev_priv, tmp);
152 static int r3xx_fence_has_irq(struct drm_device *dev,
153 uint32_t class, uint32_t type)
155 const uint32_t type_irq_mask = DRM_FENCE_TYPE_EXE |
156 DRM_AMD_FENCE_TYPE_R |
157 DRM_AMD_FENCE_TYPE_W;
159 * We have an irq for EXE & RW fence.
161 if (class == 0 && (type & type_irq_mask)) {
167 static uint32_t r3xx_fence_needed_flush(struct drm_fence_object *fence)
169 struct drm_device *dev = fence->dev;
170 struct drm_radeon_private *dev_priv = dev->dev_private;
171 struct legacy_fence *r3xx_fence = dev_priv->fence;
172 struct drm_fence_driver *driver = dev->driver->fence_driver;
173 uint32_t flush_types, diff;
175 flush_types = fence->waiting_types &
176 ~(DRM_FENCE_TYPE_EXE | fence->signaled_types);
178 if (flush_types == 0 || ((flush_types & ~fence->native_types) == 0)) {
181 if (unlikely(dev_priv == NULL)) {
184 if (r3xx_fence->sequence_last_flush) {
185 diff = (r3xx_fence->sequence_last_flush - fence->sequence) &
186 driver->sequence_mask;
187 if (diff < driver->wrap_diff) {
194 static int r3xx_fence_wait(struct drm_fence_object *fence,
195 int lazy, int interruptible, uint32_t mask)
197 struct drm_device *dev = fence->dev;
198 struct drm_fence_manager *fm = &dev->fm;
199 struct drm_fence_class_manager *fc = &fm->fence_class[0];
202 drm_fence_object_flush(fence, mask);
203 if (likely(interruptible)) {
204 r = wait_event_interruptible_timeout(
206 drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE),
209 r = wait_event_timeout(
211 drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE),
214 if (unlikely(r == -ERESTARTSYS)) {
217 if (unlikely(r == 0)) {
221 if (likely(mask == DRM_FENCE_TYPE_EXE ||
222 drm_fence_object_signaled(fence, mask))) {
227 * Poll for sync flush completion.
229 return drm_fence_wait_polling(fence, lazy, interruptible,
233 struct drm_fence_driver r3xx_fence_driver = {
235 .wrap_diff = (1 << 29),
236 .flush_diff = (1 << 28),
237 .sequence_mask = 0x7fffffffU,
238 .has_irq = r3xx_fence_has_irq,
239 .emit = r3xx_fence_emit,
240 .flush = r3xx_fence_flush,
241 .poll = r3xx_fence_poll,
242 .needed_flush = r3xx_fence_needed_flush,
243 .wait = r3xx_fence_wait,
246 /* this are used by the buffer object code */
247 int r3xx_fence_types(struct drm_buffer_object *bo,
248 uint32_t *class, uint32_t *type)
251 if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) {
252 *type = DRM_FENCE_TYPE_EXE |
253 DRM_AMD_FENCE_TYPE_R |
254 DRM_AMD_FENCE_TYPE_W;
256 *type = DRM_FENCE_TYPE_EXE;
261 /* this are used by the irq code */
262 void r3xx_fence_handler(struct drm_device * dev)
264 struct drm_radeon_private *dev_priv = dev->dev_private;
265 struct drm_fence_manager *fm = &dev->fm;
266 struct drm_fence_class_manager *fc = &fm->fence_class[0];
268 if (unlikely(dev_priv == NULL)) {
272 write_lock(&fm->lock);
273 r3xx_fence_poll(dev, 0, fc->waiting_types);
274 write_unlock(&fm->lock);