drm/virtio: plane: use drm managed resources
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / virtio / virtgpu_plane.c
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_fourcc.h>
29 #include <drm/drm_plane_helper.h>
30
31 #include "virtgpu_drv.h"
32
33 static const uint32_t virtio_gpu_formats[] = {
34         DRM_FORMAT_HOST_XRGB8888,
35 };
36
37 static const uint32_t virtio_gpu_cursor_formats[] = {
38         DRM_FORMAT_HOST_ARGB8888,
39 };
40
41 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
42 {
43         uint32_t format;
44
45         switch (drm_fourcc) {
46         case DRM_FORMAT_XRGB8888:
47                 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
48                 break;
49         case DRM_FORMAT_ARGB8888:
50                 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
51                 break;
52         case DRM_FORMAT_BGRX8888:
53                 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
54                 break;
55         case DRM_FORMAT_BGRA8888:
56                 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
57                 break;
58         default:
59                 /*
60                  * This should not happen, we handle everything listed
61                  * in virtio_gpu_formats[].
62                  */
63                 format = 0;
64                 break;
65         }
66         WARN_ON(format == 0);
67         return format;
68 }
69
70 static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
71         .update_plane           = drm_atomic_helper_update_plane,
72         .disable_plane          = drm_atomic_helper_disable_plane,
73         .destroy                = drm_plane_cleanup,
74         .reset                  = drm_atomic_helper_plane_reset,
75         .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
76         .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
77 };
78
79 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
80                                          struct drm_atomic_state *state)
81 {
82         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
83                                                                                  plane);
84         bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
85         struct drm_crtc_state *crtc_state;
86         int ret;
87
88         if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
89                 return 0;
90
91         crtc_state = drm_atomic_get_crtc_state(state,
92                                                new_plane_state->crtc);
93         if (IS_ERR(crtc_state))
94                 return PTR_ERR(crtc_state);
95
96         ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
97                                                   DRM_PLANE_HELPER_NO_SCALING,
98                                                   DRM_PLANE_HELPER_NO_SCALING,
99                                                   is_cursor, true);
100         return ret;
101 }
102
103 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
104                                       struct drm_plane_state *state,
105                                       struct drm_rect *rect)
106 {
107         struct virtio_gpu_object *bo =
108                 gem_to_virtio_gpu_obj(state->fb->obj[0]);
109         struct virtio_gpu_object_array *objs;
110         uint32_t w = rect->x2 - rect->x1;
111         uint32_t h = rect->y2 - rect->y1;
112         uint32_t x = rect->x1;
113         uint32_t y = rect->y1;
114         uint32_t off = x * state->fb->format->cpp[0] +
115                 y * state->fb->pitches[0];
116
117         objs = virtio_gpu_array_alloc(1);
118         if (!objs)
119                 return;
120         virtio_gpu_array_add_obj(objs, &bo->base.base);
121
122         virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
123                                            objs, NULL);
124 }
125
126 static void virtio_gpu_resource_flush(struct drm_plane *plane,
127                                       uint32_t x, uint32_t y,
128                                       uint32_t width, uint32_t height)
129 {
130         struct drm_device *dev = plane->dev;
131         struct virtio_gpu_device *vgdev = dev->dev_private;
132         struct virtio_gpu_framebuffer *vgfb;
133         struct virtio_gpu_object *bo;
134
135         vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
136         bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
137         if (vgfb->fence) {
138                 struct virtio_gpu_object_array *objs;
139
140                 objs = virtio_gpu_array_alloc(1);
141                 if (!objs)
142                         return;
143                 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
144                 virtio_gpu_array_lock_resv(objs);
145                 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
146                                               width, height, objs, vgfb->fence);
147                 virtio_gpu_notify(vgdev);
148
149                 dma_fence_wait_timeout(&vgfb->fence->f, true,
150                                        msecs_to_jiffies(50));
151                 dma_fence_put(&vgfb->fence->f);
152                 vgfb->fence = NULL;
153         } else {
154                 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
155                                               width, height, NULL, NULL);
156                 virtio_gpu_notify(vgdev);
157         }
158 }
159
160 static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
161                                             struct drm_atomic_state *state)
162 {
163         struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
164                                                                            plane);
165         struct drm_device *dev = plane->dev;
166         struct virtio_gpu_device *vgdev = dev->dev_private;
167         struct virtio_gpu_output *output = NULL;
168         struct virtio_gpu_object *bo;
169         struct drm_rect rect;
170
171         if (plane->state->crtc)
172                 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
173         if (old_state->crtc)
174                 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
175         if (WARN_ON(!output))
176                 return;
177
178         if (!plane->state->fb || !output->crtc.state->active) {
179                 DRM_DEBUG("nofb\n");
180                 virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
181                                            plane->state->src_w >> 16,
182                                            plane->state->src_h >> 16,
183                                            0, 0);
184                 virtio_gpu_notify(vgdev);
185                 return;
186         }
187
188         if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
189                 return;
190
191         bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
192         if (bo->dumb)
193                 virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
194
195         if (plane->state->fb != old_state->fb ||
196             plane->state->src_w != old_state->src_w ||
197             plane->state->src_h != old_state->src_h ||
198             plane->state->src_x != old_state->src_x ||
199             plane->state->src_y != old_state->src_y ||
200             output->needs_modeset) {
201                 output->needs_modeset = false;
202                 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
203                           bo->hw_res_handle,
204                           plane->state->crtc_w, plane->state->crtc_h,
205                           plane->state->crtc_x, plane->state->crtc_y,
206                           plane->state->src_w >> 16,
207                           plane->state->src_h >> 16,
208                           plane->state->src_x >> 16,
209                           plane->state->src_y >> 16);
210
211                 if (bo->host3d_blob || bo->guest_blob) {
212                         virtio_gpu_cmd_set_scanout_blob
213                                                 (vgdev, output->index, bo,
214                                                  plane->state->fb,
215                                                  plane->state->src_w >> 16,
216                                                  plane->state->src_h >> 16,
217                                                  plane->state->src_x >> 16,
218                                                  plane->state->src_y >> 16);
219                 } else {
220                         virtio_gpu_cmd_set_scanout(vgdev, output->index,
221                                                    bo->hw_res_handle,
222                                                    plane->state->src_w >> 16,
223                                                    plane->state->src_h >> 16,
224                                                    plane->state->src_x >> 16,
225                                                    plane->state->src_y >> 16);
226                 }
227         }
228
229         virtio_gpu_resource_flush(plane,
230                                   rect.x1,
231                                   rect.y1,
232                                   rect.x2 - rect.x1,
233                                   rect.y2 - rect.y1);
234 }
235
236 static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
237                                        struct drm_plane_state *new_state)
238 {
239         struct drm_device *dev = plane->dev;
240         struct virtio_gpu_device *vgdev = dev->dev_private;
241         struct virtio_gpu_framebuffer *vgfb;
242         struct virtio_gpu_object *bo;
243
244         if (!new_state->fb)
245                 return 0;
246
247         vgfb = to_virtio_gpu_framebuffer(new_state->fb);
248         bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
249         if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
250                 return 0;
251
252         if (bo->dumb && (plane->state->fb != new_state->fb)) {
253                 vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
254                                                      0);
255                 if (!vgfb->fence)
256                         return -ENOMEM;
257         }
258
259         return 0;
260 }
261
262 static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
263                                         struct drm_plane_state *state)
264 {
265         struct virtio_gpu_framebuffer *vgfb;
266
267         if (!state->fb)
268                 return;
269
270         vgfb = to_virtio_gpu_framebuffer(state->fb);
271         if (vgfb->fence) {
272                 dma_fence_put(&vgfb->fence->f);
273                 vgfb->fence = NULL;
274         }
275 }
276
277 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
278                                            struct drm_atomic_state *state)
279 {
280         struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
281                                                                            plane);
282         struct drm_device *dev = plane->dev;
283         struct virtio_gpu_device *vgdev = dev->dev_private;
284         struct virtio_gpu_output *output = NULL;
285         struct virtio_gpu_framebuffer *vgfb;
286         struct virtio_gpu_object *bo = NULL;
287         uint32_t handle;
288
289         if (plane->state->crtc)
290                 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
291         if (old_state->crtc)
292                 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
293         if (WARN_ON(!output))
294                 return;
295
296         if (plane->state->fb) {
297                 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
298                 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
299                 handle = bo->hw_res_handle;
300         } else {
301                 handle = 0;
302         }
303
304         if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
305                 /* new cursor -- update & wait */
306                 struct virtio_gpu_object_array *objs;
307
308                 objs = virtio_gpu_array_alloc(1);
309                 if (!objs)
310                         return;
311                 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
312                 virtio_gpu_array_lock_resv(objs);
313                 virtio_gpu_cmd_transfer_to_host_2d
314                         (vgdev, 0,
315                          plane->state->crtc_w,
316                          plane->state->crtc_h,
317                          0, 0, objs, vgfb->fence);
318                 virtio_gpu_notify(vgdev);
319                 dma_fence_wait(&vgfb->fence->f, true);
320                 dma_fence_put(&vgfb->fence->f);
321                 vgfb->fence = NULL;
322         }
323
324         if (plane->state->fb != old_state->fb) {
325                 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
326                           plane->state->crtc_x,
327                           plane->state->crtc_y,
328                           plane->state->fb ? plane->state->fb->hot_x : 0,
329                           plane->state->fb ? plane->state->fb->hot_y : 0);
330                 output->cursor.hdr.type =
331                         cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
332                 output->cursor.resource_id = cpu_to_le32(handle);
333                 if (plane->state->fb) {
334                         output->cursor.hot_x =
335                                 cpu_to_le32(plane->state->fb->hot_x);
336                         output->cursor.hot_y =
337                                 cpu_to_le32(plane->state->fb->hot_y);
338                 } else {
339                         output->cursor.hot_x = cpu_to_le32(0);
340                         output->cursor.hot_y = cpu_to_le32(0);
341                 }
342         } else {
343                 DRM_DEBUG("move +%d+%d\n",
344                           plane->state->crtc_x,
345                           plane->state->crtc_y);
346                 output->cursor.hdr.type =
347                         cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
348         }
349         output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
350         output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
351         virtio_gpu_cursor_ping(vgdev, output);
352 }
353
354 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
355         .prepare_fb             = virtio_gpu_plane_prepare_fb,
356         .cleanup_fb             = virtio_gpu_plane_cleanup_fb,
357         .atomic_check           = virtio_gpu_plane_atomic_check,
358         .atomic_update          = virtio_gpu_primary_plane_update,
359 };
360
361 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
362         .prepare_fb             = virtio_gpu_plane_prepare_fb,
363         .cleanup_fb             = virtio_gpu_plane_cleanup_fb,
364         .atomic_check           = virtio_gpu_plane_atomic_check,
365         .atomic_update          = virtio_gpu_cursor_plane_update,
366 };
367
368 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
369                                         enum drm_plane_type type,
370                                         int index)
371 {
372         struct drm_device *dev = vgdev->ddev;
373         const struct drm_plane_helper_funcs *funcs;
374         struct drm_plane *plane;
375         const uint32_t *formats;
376         int nformats;
377
378         if (type == DRM_PLANE_TYPE_CURSOR) {
379                 formats = virtio_gpu_cursor_formats;
380                 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
381                 funcs = &virtio_gpu_cursor_helper_funcs;
382         } else {
383                 formats = virtio_gpu_formats;
384                 nformats = ARRAY_SIZE(virtio_gpu_formats);
385                 funcs = &virtio_gpu_primary_helper_funcs;
386         }
387
388         plane = drmm_universal_plane_alloc(dev, struct drm_plane, dev,
389                                            1 << index, &virtio_gpu_plane_funcs,
390                                            formats, nformats, NULL, type, NULL);
391         if (IS_ERR(plane))
392                 return plane;
393
394         drm_plane_helper_add(plane, funcs);
395         return plane;
396 }