Merge tag 'drm-intel-next-2019-12-23' of git://anongit.freedesktop.org/drm/drm-intel...
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / virtio / virtgpu_plane.c
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_fourcc.h>
28 #include <drm/drm_plane_helper.h>
29
30 #include "virtgpu_drv.h"
31
32 static const uint32_t virtio_gpu_formats[] = {
33         DRM_FORMAT_HOST_XRGB8888,
34 };
35
36 static const uint32_t virtio_gpu_cursor_formats[] = {
37         DRM_FORMAT_HOST_ARGB8888,
38 };
39
40 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
41 {
42         uint32_t format;
43
44         switch (drm_fourcc) {
45         case DRM_FORMAT_XRGB8888:
46                 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
47                 break;
48         case DRM_FORMAT_ARGB8888:
49                 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
50                 break;
51         case DRM_FORMAT_BGRX8888:
52                 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
53                 break;
54         case DRM_FORMAT_BGRA8888:
55                 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
56                 break;
57         default:
58                 /*
59                  * This should not happen, we handle everything listed
60                  * in virtio_gpu_formats[].
61                  */
62                 format = 0;
63                 break;
64         }
65         WARN_ON(format == 0);
66         return format;
67 }
68
69 static void virtio_gpu_plane_destroy(struct drm_plane *plane)
70 {
71         drm_plane_cleanup(plane);
72         kfree(plane);
73 }
74
75 static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
76         .update_plane           = drm_atomic_helper_update_plane,
77         .disable_plane          = drm_atomic_helper_disable_plane,
78         .destroy                = virtio_gpu_plane_destroy,
79         .reset                  = drm_atomic_helper_plane_reset,
80         .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
81         .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
82 };
83
84 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
85                                          struct drm_plane_state *state)
86 {
87         bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
88         struct drm_crtc_state *crtc_state;
89         int ret;
90
91         if (!state->fb || !state->crtc)
92                 return 0;
93
94         crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
95         if (IS_ERR(crtc_state))
96                 return PTR_ERR(crtc_state);
97
98         ret = drm_atomic_helper_check_plane_state(state, crtc_state,
99                                                   DRM_PLANE_HELPER_NO_SCALING,
100                                                   DRM_PLANE_HELPER_NO_SCALING,
101                                                   is_cursor, true);
102         return ret;
103 }
104
105 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
106                                       struct virtio_gpu_object *bo,
107                                       struct drm_plane_state *state)
108 {
109         struct virtio_gpu_object_array *objs;
110
111         objs = virtio_gpu_array_alloc(1);
112         if (!objs)
113                 return;
114         virtio_gpu_array_add_obj(objs, &bo->base.base);
115         virtio_gpu_cmd_transfer_to_host_2d
116                 (vgdev, 0,
117                  state->src_w >> 16,
118                  state->src_h >> 16,
119                  state->src_x >> 16,
120                  state->src_y >> 16,
121                  objs, NULL);
122 }
123
124 static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
125                                             struct drm_plane_state *old_state)
126 {
127         struct drm_device *dev = plane->dev;
128         struct virtio_gpu_device *vgdev = dev->dev_private;
129         struct virtio_gpu_output *output = NULL;
130         struct virtio_gpu_framebuffer *vgfb;
131         struct virtio_gpu_object *bo;
132
133         if (plane->state->crtc)
134                 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
135         if (old_state->crtc)
136                 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
137         if (WARN_ON(!output))
138                 return;
139
140         if (!plane->state->fb || !output->enabled) {
141                 DRM_DEBUG("nofb\n");
142                 virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
143                                            plane->state->src_w >> 16,
144                                            plane->state->src_h >> 16,
145                                            0, 0);
146                 return;
147         }
148
149         vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
150         bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
151         if (bo->dumb)
152                 virtio_gpu_update_dumb_bo(vgdev, bo, plane->state);
153
154         DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
155                   bo->hw_res_handle,
156                   plane->state->crtc_w, plane->state->crtc_h,
157                   plane->state->crtc_x, plane->state->crtc_y,
158                   plane->state->src_w >> 16,
159                   plane->state->src_h >> 16,
160                   plane->state->src_x >> 16,
161                   plane->state->src_y >> 16);
162         virtio_gpu_cmd_set_scanout(vgdev, output->index,
163                                    bo->hw_res_handle,
164                                    plane->state->src_w >> 16,
165                                    plane->state->src_h >> 16,
166                                    plane->state->src_x >> 16,
167                                    plane->state->src_y >> 16);
168         virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle,
169                                       plane->state->src_x >> 16,
170                                       plane->state->src_y >> 16,
171                                       plane->state->src_w >> 16,
172                                       plane->state->src_h >> 16);
173 }
174
175 static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
176                                         struct drm_plane_state *new_state)
177 {
178         struct drm_device *dev = plane->dev;
179         struct virtio_gpu_device *vgdev = dev->dev_private;
180         struct virtio_gpu_framebuffer *vgfb;
181         struct virtio_gpu_object *bo;
182
183         if (!new_state->fb)
184                 return 0;
185
186         vgfb = to_virtio_gpu_framebuffer(new_state->fb);
187         bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
188         if (bo && bo->dumb && (plane->state->fb != new_state->fb)) {
189                 vgfb->fence = virtio_gpu_fence_alloc(vgdev);
190                 if (!vgfb->fence)
191                         return -ENOMEM;
192         }
193
194         return 0;
195 }
196
197 static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
198                                          struct drm_plane_state *old_state)
199 {
200         struct virtio_gpu_framebuffer *vgfb;
201
202         if (!plane->state->fb)
203                 return;
204
205         vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
206         if (vgfb->fence) {
207                 dma_fence_put(&vgfb->fence->f);
208                 vgfb->fence = NULL;
209         }
210 }
211
212 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
213                                            struct drm_plane_state *old_state)
214 {
215         struct drm_device *dev = plane->dev;
216         struct virtio_gpu_device *vgdev = dev->dev_private;
217         struct virtio_gpu_output *output = NULL;
218         struct virtio_gpu_framebuffer *vgfb;
219         struct virtio_gpu_object *bo = NULL;
220         uint32_t handle;
221
222         if (plane->state->crtc)
223                 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
224         if (old_state->crtc)
225                 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
226         if (WARN_ON(!output))
227                 return;
228
229         if (plane->state->fb) {
230                 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
231                 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
232                 handle = bo->hw_res_handle;
233         } else {
234                 handle = 0;
235         }
236
237         if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
238                 /* new cursor -- update & wait */
239                 struct virtio_gpu_object_array *objs;
240
241                 objs = virtio_gpu_array_alloc(1);
242                 if (!objs)
243                         return;
244                 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
245                 virtio_gpu_cmd_transfer_to_host_2d
246                         (vgdev, 0,
247                          plane->state->crtc_w,
248                          plane->state->crtc_h,
249                          0, 0, objs, vgfb->fence);
250                 dma_fence_wait(&vgfb->fence->f, true);
251                 dma_fence_put(&vgfb->fence->f);
252                 vgfb->fence = NULL;
253         }
254
255         if (plane->state->fb != old_state->fb) {
256                 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
257                           plane->state->crtc_x,
258                           plane->state->crtc_y,
259                           plane->state->fb ? plane->state->fb->hot_x : 0,
260                           plane->state->fb ? plane->state->fb->hot_y : 0);
261                 output->cursor.hdr.type =
262                         cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
263                 output->cursor.resource_id = cpu_to_le32(handle);
264                 if (plane->state->fb) {
265                         output->cursor.hot_x =
266                                 cpu_to_le32(plane->state->fb->hot_x);
267                         output->cursor.hot_y =
268                                 cpu_to_le32(plane->state->fb->hot_y);
269                 } else {
270                         output->cursor.hot_x = cpu_to_le32(0);
271                         output->cursor.hot_y = cpu_to_le32(0);
272                 }
273         } else {
274                 DRM_DEBUG("move +%d+%d\n",
275                           plane->state->crtc_x,
276                           plane->state->crtc_y);
277                 output->cursor.hdr.type =
278                         cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
279         }
280         output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
281         output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
282         virtio_gpu_cursor_ping(vgdev, output);
283 }
284
285 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
286         .atomic_check           = virtio_gpu_plane_atomic_check,
287         .atomic_update          = virtio_gpu_primary_plane_update,
288 };
289
290 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
291         .prepare_fb             = virtio_gpu_cursor_prepare_fb,
292         .cleanup_fb             = virtio_gpu_cursor_cleanup_fb,
293         .atomic_check           = virtio_gpu_plane_atomic_check,
294         .atomic_update          = virtio_gpu_cursor_plane_update,
295 };
296
297 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
298                                         enum drm_plane_type type,
299                                         int index)
300 {
301         struct drm_device *dev = vgdev->ddev;
302         const struct drm_plane_helper_funcs *funcs;
303         struct drm_plane *plane;
304         const uint32_t *formats;
305         int ret, nformats;
306
307         plane = kzalloc(sizeof(*plane), GFP_KERNEL);
308         if (!plane)
309                 return ERR_PTR(-ENOMEM);
310
311         if (type == DRM_PLANE_TYPE_CURSOR) {
312                 formats = virtio_gpu_cursor_formats;
313                 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
314                 funcs = &virtio_gpu_cursor_helper_funcs;
315         } else {
316                 formats = virtio_gpu_formats;
317                 nformats = ARRAY_SIZE(virtio_gpu_formats);
318                 funcs = &virtio_gpu_primary_helper_funcs;
319         }
320         ret = drm_universal_plane_init(dev, plane, 1 << index,
321                                        &virtio_gpu_plane_funcs,
322                                        formats, nformats,
323                                        NULL, type, NULL);
324         if (ret)
325                 goto err_plane_init;
326
327         drm_plane_helper_add(plane, funcs);
328         return plane;
329
330 err_plane_init:
331         kfree(plane);
332         return ERR_PTR(ret);
333 }