7e8cbcee1722a68c716fa39b395ff022ee98493b
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / virtio / virtgpu_plane.c
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_fourcc.h>
29 #include <drm/drm_plane_helper.h>
30
31 #include "virtgpu_drv.h"
32
33 static const uint32_t virtio_gpu_formats[] = {
34         DRM_FORMAT_HOST_XRGB8888,
35 };
36
37 static const uint32_t virtio_gpu_cursor_formats[] = {
38         DRM_FORMAT_HOST_ARGB8888,
39 };
40
41 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
42 {
43         uint32_t format;
44
45         switch (drm_fourcc) {
46         case DRM_FORMAT_XRGB8888:
47                 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
48                 break;
49         case DRM_FORMAT_ARGB8888:
50                 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
51                 break;
52         case DRM_FORMAT_BGRX8888:
53                 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
54                 break;
55         case DRM_FORMAT_BGRA8888:
56                 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
57                 break;
58         default:
59                 /*
60                  * This should not happen, we handle everything listed
61                  * in virtio_gpu_formats[].
62                  */
63                 format = 0;
64                 break;
65         }
66         WARN_ON(format == 0);
67         return format;
68 }
69
70 static void virtio_gpu_plane_destroy(struct drm_plane *plane)
71 {
72         drm_plane_cleanup(plane);
73         kfree(plane);
74 }
75
76 static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
77         .update_plane           = drm_atomic_helper_update_plane,
78         .disable_plane          = drm_atomic_helper_disable_plane,
79         .destroy                = virtio_gpu_plane_destroy,
80         .reset                  = drm_atomic_helper_plane_reset,
81         .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
82         .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
83 };
84
85 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
86                                          struct drm_atomic_state *state)
87 {
88         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
89                                                                                  plane);
90         bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
91         struct drm_crtc_state *crtc_state;
92         int ret;
93
94         if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
95                 return 0;
96
97         crtc_state = drm_atomic_get_crtc_state(state,
98                                                new_plane_state->crtc);
99         if (IS_ERR(crtc_state))
100                 return PTR_ERR(crtc_state);
101
102         ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
103                                                   DRM_PLANE_HELPER_NO_SCALING,
104                                                   DRM_PLANE_HELPER_NO_SCALING,
105                                                   is_cursor, true);
106         return ret;
107 }
108
109 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
110                                       struct drm_plane_state *state,
111                                       struct drm_rect *rect)
112 {
113         struct virtio_gpu_object *bo =
114                 gem_to_virtio_gpu_obj(state->fb->obj[0]);
115         struct virtio_gpu_object_array *objs;
116         uint32_t w = rect->x2 - rect->x1;
117         uint32_t h = rect->y2 - rect->y1;
118         uint32_t x = rect->x1;
119         uint32_t y = rect->y1;
120         uint32_t off = x * state->fb->format->cpp[0] +
121                 y * state->fb->pitches[0];
122
123         objs = virtio_gpu_array_alloc(1);
124         if (!objs)
125                 return;
126         virtio_gpu_array_add_obj(objs, &bo->base.base);
127
128         virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
129                                            objs, NULL);
130 }
131
132 static void virtio_gpu_resource_flush(struct drm_plane *plane,
133                                       uint32_t x, uint32_t y,
134                                       uint32_t width, uint32_t height)
135 {
136         struct drm_device *dev = plane->dev;
137         struct virtio_gpu_device *vgdev = dev->dev_private;
138         struct virtio_gpu_framebuffer *vgfb;
139         struct virtio_gpu_object *bo;
140
141         vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
142         bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
143         if (vgfb->fence) {
144                 struct virtio_gpu_object_array *objs;
145
146                 objs = virtio_gpu_array_alloc(1);
147                 if (!objs)
148                         return;
149                 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
150                 virtio_gpu_array_lock_resv(objs);
151                 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
152                                               width, height, objs, vgfb->fence);
153                 virtio_gpu_notify(vgdev);
154
155                 dma_fence_wait_timeout(&vgfb->fence->f, true,
156                                        msecs_to_jiffies(50));
157                 dma_fence_put(&vgfb->fence->f);
158                 vgfb->fence = NULL;
159         } else {
160                 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
161                                               width, height, NULL, NULL);
162                 virtio_gpu_notify(vgdev);
163         }
164 }
165
166 static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
167                                             struct drm_atomic_state *state)
168 {
169         struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
170                                                                            plane);
171         struct drm_device *dev = plane->dev;
172         struct virtio_gpu_device *vgdev = dev->dev_private;
173         struct virtio_gpu_output *output = NULL;
174         struct virtio_gpu_object *bo;
175         struct drm_rect rect;
176
177         if (plane->state->crtc)
178                 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
179         if (old_state->crtc)
180                 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
181         if (WARN_ON(!output))
182                 return;
183
184         if (!plane->state->fb || !output->crtc.state->active) {
185                 DRM_DEBUG("nofb\n");
186                 virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
187                                            plane->state->src_w >> 16,
188                                            plane->state->src_h >> 16,
189                                            0, 0);
190                 virtio_gpu_notify(vgdev);
191                 return;
192         }
193
194         if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
195                 return;
196
197         bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
198         if (bo->dumb)
199                 virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
200
201         if (plane->state->fb != old_state->fb ||
202             plane->state->src_w != old_state->src_w ||
203             plane->state->src_h != old_state->src_h ||
204             plane->state->src_x != old_state->src_x ||
205             plane->state->src_y != old_state->src_y ||
206             output->needs_modeset) {
207                 output->needs_modeset = false;
208                 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
209                           bo->hw_res_handle,
210                           plane->state->crtc_w, plane->state->crtc_h,
211                           plane->state->crtc_x, plane->state->crtc_y,
212                           plane->state->src_w >> 16,
213                           plane->state->src_h >> 16,
214                           plane->state->src_x >> 16,
215                           plane->state->src_y >> 16);
216
217                 if (bo->host3d_blob || bo->guest_blob) {
218                         virtio_gpu_cmd_set_scanout_blob
219                                                 (vgdev, output->index, bo,
220                                                  plane->state->fb,
221                                                  plane->state->src_w >> 16,
222                                                  plane->state->src_h >> 16,
223                                                  plane->state->src_x >> 16,
224                                                  plane->state->src_y >> 16);
225                 } else {
226                         virtio_gpu_cmd_set_scanout(vgdev, output->index,
227                                                    bo->hw_res_handle,
228                                                    plane->state->src_w >> 16,
229                                                    plane->state->src_h >> 16,
230                                                    plane->state->src_x >> 16,
231                                                    plane->state->src_y >> 16);
232                 }
233         }
234
235         virtio_gpu_resource_flush(plane,
236                                   rect.x1,
237                                   rect.y1,
238                                   rect.x2 - rect.x1,
239                                   rect.y2 - rect.y1);
240 }
241
242 static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
243                                        struct drm_plane_state *new_state)
244 {
245         struct drm_device *dev = plane->dev;
246         struct virtio_gpu_device *vgdev = dev->dev_private;
247         struct virtio_gpu_framebuffer *vgfb;
248         struct virtio_gpu_object *bo;
249
250         if (!new_state->fb)
251                 return 0;
252
253         vgfb = to_virtio_gpu_framebuffer(new_state->fb);
254         bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
255         if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
256                 return 0;
257
258         if (bo->dumb && (plane->state->fb != new_state->fb)) {
259                 vgfb->fence = virtio_gpu_fence_alloc(vgdev);
260                 if (!vgfb->fence)
261                         return -ENOMEM;
262         }
263
264         return 0;
265 }
266
267 static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
268                                         struct drm_plane_state *state)
269 {
270         struct virtio_gpu_framebuffer *vgfb;
271
272         if (!state->fb)
273                 return;
274
275         vgfb = to_virtio_gpu_framebuffer(state->fb);
276         if (vgfb->fence) {
277                 dma_fence_put(&vgfb->fence->f);
278                 vgfb->fence = NULL;
279         }
280 }
281
282 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
283                                            struct drm_atomic_state *state)
284 {
285         struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
286                                                                            plane);
287         struct drm_device *dev = plane->dev;
288         struct virtio_gpu_device *vgdev = dev->dev_private;
289         struct virtio_gpu_output *output = NULL;
290         struct virtio_gpu_framebuffer *vgfb;
291         struct virtio_gpu_object *bo = NULL;
292         uint32_t handle;
293
294         if (plane->state->crtc)
295                 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
296         if (old_state->crtc)
297                 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
298         if (WARN_ON(!output))
299                 return;
300
301         if (plane->state->fb) {
302                 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
303                 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
304                 handle = bo->hw_res_handle;
305         } else {
306                 handle = 0;
307         }
308
309         if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
310                 /* new cursor -- update & wait */
311                 struct virtio_gpu_object_array *objs;
312
313                 objs = virtio_gpu_array_alloc(1);
314                 if (!objs)
315                         return;
316                 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
317                 virtio_gpu_array_lock_resv(objs);
318                 virtio_gpu_cmd_transfer_to_host_2d
319                         (vgdev, 0,
320                          plane->state->crtc_w,
321                          plane->state->crtc_h,
322                          0, 0, objs, vgfb->fence);
323                 virtio_gpu_notify(vgdev);
324                 dma_fence_wait(&vgfb->fence->f, true);
325                 dma_fence_put(&vgfb->fence->f);
326                 vgfb->fence = NULL;
327         }
328
329         if (plane->state->fb != old_state->fb) {
330                 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
331                           plane->state->crtc_x,
332                           plane->state->crtc_y,
333                           plane->state->fb ? plane->state->fb->hot_x : 0,
334                           plane->state->fb ? plane->state->fb->hot_y : 0);
335                 output->cursor.hdr.type =
336                         cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
337                 output->cursor.resource_id = cpu_to_le32(handle);
338                 if (plane->state->fb) {
339                         output->cursor.hot_x =
340                                 cpu_to_le32(plane->state->fb->hot_x);
341                         output->cursor.hot_y =
342                                 cpu_to_le32(plane->state->fb->hot_y);
343                 } else {
344                         output->cursor.hot_x = cpu_to_le32(0);
345                         output->cursor.hot_y = cpu_to_le32(0);
346                 }
347         } else {
348                 DRM_DEBUG("move +%d+%d\n",
349                           plane->state->crtc_x,
350                           plane->state->crtc_y);
351                 output->cursor.hdr.type =
352                         cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
353         }
354         output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
355         output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
356         virtio_gpu_cursor_ping(vgdev, output);
357 }
358
359 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
360         .prepare_fb             = virtio_gpu_plane_prepare_fb,
361         .cleanup_fb             = virtio_gpu_plane_cleanup_fb,
362         .atomic_check           = virtio_gpu_plane_atomic_check,
363         .atomic_update          = virtio_gpu_primary_plane_update,
364 };
365
366 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
367         .prepare_fb             = virtio_gpu_plane_prepare_fb,
368         .cleanup_fb             = virtio_gpu_plane_cleanup_fb,
369         .atomic_check           = virtio_gpu_plane_atomic_check,
370         .atomic_update          = virtio_gpu_cursor_plane_update,
371 };
372
373 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
374                                         enum drm_plane_type type,
375                                         int index)
376 {
377         struct drm_device *dev = vgdev->ddev;
378         const struct drm_plane_helper_funcs *funcs;
379         struct drm_plane *plane;
380         const uint32_t *formats;
381         int ret, nformats;
382
383         plane = kzalloc(sizeof(*plane), GFP_KERNEL);
384         if (!plane)
385                 return ERR_PTR(-ENOMEM);
386
387         if (type == DRM_PLANE_TYPE_CURSOR) {
388                 formats = virtio_gpu_cursor_formats;
389                 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
390                 funcs = &virtio_gpu_cursor_helper_funcs;
391         } else {
392                 formats = virtio_gpu_formats;
393                 nformats = ARRAY_SIZE(virtio_gpu_formats);
394                 funcs = &virtio_gpu_primary_helper_funcs;
395         }
396         ret = drm_universal_plane_init(dev, plane, 1 << index,
397                                        &virtio_gpu_plane_funcs,
398                                        formats, nformats,
399                                        NULL, type, NULL);
400         if (ret)
401                 goto err_plane_init;
402
403         drm_plane_helper_add(plane, funcs);
404         return plane;
405
406 err_plane_init:
407         kfree(plane);
408         return ERR_PTR(ret);
409 }