1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include "vmwgfx_kms.h"
29 #include "vmwgfx_bo.h"
30 #include "vmw_surface_cache.h"
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_damage_helper.h>
35 #include <drm/drm_fourcc.h>
36 #include <drm/drm_rect.h>
37 #include <drm/drm_sysfs.h>
39 void vmw_du_cleanup(struct vmw_display_unit *du)
41 struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
42 drm_plane_cleanup(&du->primary);
43 if (vmw_cmd_supported(dev_priv))
44 drm_plane_cleanup(&du->cursor.base);
46 drm_connector_unregister(&du->connector);
47 drm_crtc_cleanup(&du->crtc);
48 drm_encoder_cleanup(&du->encoder);
49 drm_connector_cleanup(&du->connector);
53 * Display Unit Cursor functions
56 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
57 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
58 struct vmw_plane_state *vps,
59 u32 *image, u32 width, u32 height,
60 u32 hotspotX, u32 hotspotY);
62 struct vmw_svga_fifo_cmd_define_cursor {
64 SVGAFifoCmdDefineAlphaCursor cursor;
68 * vmw_send_define_cursor_cmd - queue a define cursor command
69 * @dev_priv: the private driver struct
70 * @image: buffer which holds the cursor image
71 * @width: width of the mouse cursor image
72 * @height: height of the mouse cursor image
73 * @hotspotX: the horizontal position of mouse hotspot
74 * @hotspotY: the vertical position of mouse hotspot
76 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
77 u32 *image, u32 width, u32 height,
78 u32 hotspotX, u32 hotspotY)
80 struct vmw_svga_fifo_cmd_define_cursor *cmd;
81 const u32 image_size = width * height * sizeof(*image);
82 const u32 cmd_size = sizeof(*cmd) + image_size;
84 /* Try to reserve fifocmd space and swallow any failures;
85 such reservations cannot be left unconsumed for long
86 under the risk of clogging other fifocmd users, so
87 we treat reservations separtely from the way we treat
88 other fallible KMS-atomic resources at prepare_fb */
89 cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
94 memset(cmd, 0, sizeof(*cmd));
96 memcpy(&cmd[1], image, image_size);
98 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
100 cmd->cursor.width = width;
101 cmd->cursor.height = height;
102 cmd->cursor.hotspotX = hotspotX;
103 cmd->cursor.hotspotY = hotspotY;
105 vmw_cmd_commit_flush(dev_priv, cmd_size);
109 * vmw_cursor_update_image - update the cursor image on the provided plane
110 * @dev_priv: the private driver struct
111 * @vps: the plane state of the cursor plane
112 * @image: buffer which holds the cursor image
113 * @width: width of the mouse cursor image
114 * @height: height of the mouse cursor image
115 * @hotspotX: the horizontal position of mouse hotspot
116 * @hotspotY: the vertical position of mouse hotspot
118 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
119 struct vmw_plane_state *vps,
120 u32 *image, u32 width, u32 height,
121 u32 hotspotX, u32 hotspotY)
124 vmw_cursor_update_mob(dev_priv, vps, image,
125 vps->base.crtc_w, vps->base.crtc_h,
129 vmw_send_define_cursor_cmd(dev_priv, image, width, height,
135 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
137 * Called from inside vmw_du_cursor_plane_atomic_update to actually
138 * make the cursor-image live.
140 * @dev_priv: device to work with
141 * @vps: the plane state of the cursor plane
142 * @image: cursor source data to fill the MOB with
143 * @width: source data width
144 * @height: source data height
145 * @hotspotX: cursor hotspot x
146 * @hotspotY: cursor hotspot Y
148 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
149 struct vmw_plane_state *vps,
150 u32 *image, u32 width, u32 height,
151 u32 hotspotX, u32 hotspotY)
153 SVGAGBCursorHeader *header;
154 SVGAGBAlphaCursorHeader *alpha_header;
155 const u32 image_size = width * height * sizeof(*image);
157 header = vmw_bo_map_and_cache(vps->cursor.bo);
158 alpha_header = &header->header.alphaHeader;
160 memset(header, 0, sizeof(*header));
162 header->type = SVGA_ALPHA_CURSOR;
163 header->sizeInBytes = image_size;
165 alpha_header->hotspotX = hotspotX;
166 alpha_header->hotspotY = hotspotY;
167 alpha_header->width = width;
168 alpha_header->height = height;
170 memcpy(header + 1, image, image_size);
171 vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
172 vps->cursor.bo->tbo.resource->start);
176 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
178 return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
182 * vmw_du_cursor_plane_acquire_image -- Acquire the image data
183 * @vps: cursor plane state
185 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
189 if (vps->surf_mapped)
190 return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
191 return vps->surf->snooper.image;
193 return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
197 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
198 struct vmw_plane_state *new_vps)
205 if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
206 old_vps->base.crtc_h != new_vps->base.crtc_h)
209 if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
210 old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
213 size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
215 old_image = vmw_du_cursor_plane_acquire_image(old_vps);
216 new_image = vmw_du_cursor_plane_acquire_image(new_vps);
219 if (old_image && new_image)
220 changed = memcmp(old_image, new_image, size) != 0;
225 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
230 ttm_bo_unpin(&(*vbo)->tbo);
231 vmw_bo_unreference(vbo);
234 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
235 struct vmw_plane_state *vps)
242 vmw_du_cursor_plane_unmap_cm(vps);
244 /* Look for a free slot to return this mob to the cache. */
245 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
246 if (!vcp->cursor_mobs[i]) {
247 vcp->cursor_mobs[i] = vps->cursor.bo;
248 vps->cursor.bo = NULL;
253 /* Cache is full: See if this mob is bigger than an existing mob. */
254 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
255 if (vcp->cursor_mobs[i]->tbo.base.size <
256 vps->cursor.bo->tbo.base.size) {
257 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
258 vcp->cursor_mobs[i] = vps->cursor.bo;
259 vps->cursor.bo = NULL;
264 /* Destroy it if it's not worth caching. */
265 vmw_du_destroy_cursor_mob(&vps->cursor.bo);
268 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
269 struct vmw_plane_state *vps)
271 struct vmw_private *dev_priv = vcp->base.dev->dev_private;
272 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
274 u32 cursor_max_dim, mob_max_size;
277 if (!dev_priv->has_mob ||
278 (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
281 mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
282 cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
284 if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
285 vps->base.crtc_h > cursor_max_dim)
288 if (vps->cursor.bo) {
289 if (vps->cursor.bo->tbo.base.size >= size)
291 vmw_du_put_cursor_mob(vcp, vps);
294 /* Look for an unused mob in the cache. */
295 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
296 if (vcp->cursor_mobs[i] &&
297 vcp->cursor_mobs[i]->tbo.base.size >= size) {
298 vps->cursor.bo = vcp->cursor_mobs[i];
299 vcp->cursor_mobs[i] = NULL;
303 /* Create a new mob if we can't find an existing one. */
304 ret = vmw_bo_create_and_populate(dev_priv, size,
311 /* Fence the mob creation so we are guarateed to have the mob */
312 ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
316 vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
317 ttm_bo_unreserve(&vps->cursor.bo->tbo);
321 vmw_du_destroy_cursor_mob(&vps->cursor.bo);
326 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
327 bool show, int x, int y)
329 const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
330 : SVGA_CURSOR_ON_HIDE;
333 spin_lock(&dev_priv->cursor_lock);
334 if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
335 vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
336 vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
337 vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
338 vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
339 vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
340 } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
341 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
342 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
343 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
344 count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
345 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
347 vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
348 vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
349 vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
351 spin_unlock(&dev_priv->cursor_lock);
354 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
355 struct ttm_object_file *tfile,
356 struct ttm_buffer_object *bo,
357 SVGA3dCmdHeader *header)
359 struct ttm_bo_kmap_obj map;
360 unsigned long kmap_offset;
361 unsigned long kmap_num;
367 SVGA3dCmdHeader header;
368 SVGA3dCmdSurfaceDMA dma;
371 const struct SVGA3dSurfaceDesc *desc =
372 vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
373 const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
375 cmd = container_of(header, struct vmw_dma_cmd, header);
377 /* No snooper installed, nothing to copy */
378 if (!srf->snooper.image)
381 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
382 DRM_ERROR("face and mipmap for cursors should never != 0\n");
386 if (cmd->header.size < 64) {
387 DRM_ERROR("at least one full copy box must be given\n");
391 box = (SVGA3dCopyBox *)&cmd[1];
392 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
393 sizeof(SVGA3dCopyBox);
395 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
396 box->x != 0 || box->y != 0 || box->z != 0 ||
397 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
398 box->d != 1 || box_count != 1 ||
399 box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
400 /* TODO handle none page aligned offsets */
401 /* TODO handle more dst & src != 0 */
402 /* TODO handle more then one copy */
403 DRM_ERROR("Can't snoop dma request for cursor!\n");
404 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
405 box->srcx, box->srcy, box->srcz,
406 box->x, box->y, box->z,
407 box->w, box->h, box->d, box_count,
408 cmd->dma.guest.ptr.offset);
412 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
413 kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
415 ret = ttm_bo_reserve(bo, true, false, NULL);
416 if (unlikely(ret != 0)) {
417 DRM_ERROR("reserve failed\n");
421 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
422 if (unlikely(ret != 0))
425 virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
427 if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
428 memcpy(srf->snooper.image, virtual,
429 VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
431 /* Image is unsigned pointer. */
432 for (i = 0; i < box->h; i++)
433 memcpy(srf->snooper.image + i * image_pitch,
434 virtual + i * cmd->dma.guest.pitch,
435 box->w * desc->pitchBytesPerBlock);
442 ttm_bo_unreserve(bo);
446 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
448 * @dev_priv: Pointer to the device private struct.
450 * Clears all legacy hotspots.
452 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
454 struct drm_device *dev = &dev_priv->drm;
455 struct vmw_display_unit *du;
456 struct drm_crtc *crtc;
458 drm_modeset_lock_all(dev);
459 drm_for_each_crtc(crtc, dev) {
460 du = vmw_crtc_to_du(crtc);
465 drm_modeset_unlock_all(dev);
468 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
470 struct drm_device *dev = &dev_priv->drm;
471 struct vmw_display_unit *du;
472 struct drm_crtc *crtc;
474 mutex_lock(&dev->mode_config.mutex);
476 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
477 du = vmw_crtc_to_du(crtc);
478 if (!du->cursor_surface ||
479 du->cursor_age == du->cursor_surface->snooper.age ||
480 !du->cursor_surface->snooper.image)
483 du->cursor_age = du->cursor_surface->snooper.age;
484 vmw_send_define_cursor_cmd(dev_priv,
485 du->cursor_surface->snooper.image,
486 VMW_CURSOR_SNOOP_WIDTH,
487 VMW_CURSOR_SNOOP_HEIGHT,
488 du->hotspot_x + du->core_hotspot_x,
489 du->hotspot_y + du->core_hotspot_y);
492 mutex_unlock(&dev->mode_config.mutex);
496 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
498 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
501 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
503 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
504 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
506 drm_plane_cleanup(plane);
510 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
512 drm_plane_cleanup(plane);
514 /* Planes are static in our case so we don't free it */
519 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
521 * @vps: plane state associated with the display surface
522 * @unreference: true if we also want to unreference the display.
524 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
529 vmw_resource_unpin(&vps->surf->res);
535 DRM_ERROR("Surface still pinned\n");
536 vmw_surface_unreference(&vps->surf);
543 * vmw_du_plane_cleanup_fb - Unpins the plane surface
545 * @plane: display plane
546 * @old_state: Contains the FB to clean up
548 * Unpins the framebuffer surface
550 * Returns 0 on success
553 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
554 struct drm_plane_state *old_state)
556 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
558 vmw_du_plane_unpin_surf(vps, false);
563 * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
567 * Returns 0 on success
571 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
574 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
575 struct ttm_buffer_object *bo;
580 bo = &vps->cursor.bo->tbo;
582 if (bo->base.size < size)
585 if (vps->cursor.bo->map.virtual)
588 ret = ttm_bo_reserve(bo, false, false, NULL);
589 if (unlikely(ret != 0))
592 vmw_bo_map_and_cache(vps->cursor.bo);
594 ttm_bo_unreserve(bo);
596 if (unlikely(ret != 0))
604 * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
606 * @vps: state of the cursor plane
608 * Returns 0 on success
612 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
615 struct vmw_bo *vbo = vps->cursor.bo;
617 if (!vbo || !vbo->map.virtual)
620 ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
621 if (likely(ret == 0)) {
623 ttm_bo_unreserve(&vbo->tbo);
631 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
633 * @plane: cursor plane
634 * @old_state: contains the state to clean up
636 * Unmaps all cursor bo mappings and unpins the cursor surface
638 * Returns 0 on success
641 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
642 struct drm_plane_state *old_state)
644 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
645 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
648 if (vps->surf_mapped) {
649 vmw_bo_unmap(vps->surf->res.guest_memory_bo);
650 vps->surf_mapped = false;
653 if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
654 const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
656 if (likely(ret == 0)) {
657 ttm_bo_kunmap(&vps->bo->map);
658 ttm_bo_unreserve(&vps->bo->tbo);
662 vmw_du_cursor_plane_unmap_cm(vps);
663 vmw_du_put_cursor_mob(vcp, vps);
665 vmw_du_plane_unpin_surf(vps, false);
668 vmw_surface_unreference(&vps->surf);
673 vmw_bo_unreference(&vps->bo);
680 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
682 * @plane: display plane
683 * @new_state: info on the new plane state, including the FB
685 * Returns 0 on success
688 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
689 struct drm_plane_state *new_state)
691 struct drm_framebuffer *fb = new_state->fb;
692 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
693 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
697 vmw_surface_unreference(&vps->surf);
702 vmw_bo_unreference(&vps->bo);
707 if (vmw_framebuffer_to_vfb(fb)->bo) {
708 vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
709 vmw_bo_reference(vps->bo);
711 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
712 vmw_surface_reference(vps->surf);
716 if (!vps->surf && vps->bo) {
717 const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
720 * Not using vmw_bo_map_and_cache() helper here as we need to
721 * reserve the ttm_buffer_object first which
722 * vmw_bo_map_and_cache() omits.
724 ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
726 if (unlikely(ret != 0))
729 ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
731 ttm_bo_unreserve(&vps->bo->tbo);
733 if (unlikely(ret != 0))
735 } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
737 WARN_ON(vps->surf->snooper.image);
738 ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
740 if (unlikely(ret != 0))
742 vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
743 ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
744 vps->surf_mapped = true;
747 if (vps->surf || vps->bo) {
748 vmw_du_get_cursor_mob(vcp, vps);
749 vmw_du_cursor_plane_map_cm(vps);
757 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
758 struct drm_atomic_state *state)
760 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
762 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
764 struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
765 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
766 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
767 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
768 struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
769 s32 hotspot_x, hotspot_y;
771 hotspot_x = du->hotspot_x;
772 hotspot_y = du->hotspot_y;
775 hotspot_x += new_state->fb->hot_x;
776 hotspot_y += new_state->fb->hot_y;
779 du->cursor_surface = vps->surf;
780 du->cursor_bo = vps->bo;
782 if (!vps->surf && !vps->bo) {
783 vmw_cursor_update_position(dev_priv, false, 0, 0);
787 vps->cursor.hotspot_x = hotspot_x;
788 vps->cursor.hotspot_y = hotspot_y;
791 du->cursor_age = du->cursor_surface->snooper.age;
794 if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
796 * If it hasn't changed, avoid making the device do extra
797 * work by keeping the old cursor active.
799 struct vmw_cursor_plane_state tmp = old_vps->cursor;
800 old_vps->cursor = vps->cursor;
803 void *image = vmw_du_cursor_plane_acquire_image(vps);
805 vmw_cursor_update_image(dev_priv, vps, image,
808 hotspot_x, hotspot_y);
811 du->cursor_x = new_state->crtc_x + du->set_gui_x;
812 du->cursor_y = new_state->crtc_y + du->set_gui_y;
814 vmw_cursor_update_position(dev_priv, true,
815 du->cursor_x + hotspot_x,
816 du->cursor_y + hotspot_y);
818 du->core_hotspot_x = hotspot_x - du->hotspot_x;
819 du->core_hotspot_y = hotspot_y - du->hotspot_y;
824 * vmw_du_primary_plane_atomic_check - check if the new state is okay
826 * @plane: display plane
827 * @state: info on the new plane state, including the FB
829 * Check if the new state is settable given the current state. Other
830 * than what the atomic helper checks, we care about crtc fitting
831 * the FB and maintaining one active framebuffer.
833 * Returns 0 on success
835 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
836 struct drm_atomic_state *state)
838 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
840 struct drm_crtc_state *crtc_state = NULL;
841 struct drm_framebuffer *new_fb = new_state->fb;
845 crtc_state = drm_atomic_get_new_crtc_state(state,
848 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
849 DRM_PLANE_NO_SCALING,
850 DRM_PLANE_NO_SCALING,
853 if (!ret && new_fb) {
854 struct drm_crtc *crtc = new_state->crtc;
855 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
857 vmw_connector_state_to_vcs(du->connector.state);
866 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
868 * @plane: cursor plane
869 * @state: info on the new plane state
871 * This is a chance to fail if the new cursor state does not fit
874 * Returns 0 on success
876 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
877 struct drm_atomic_state *state)
879 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
882 struct drm_crtc_state *crtc_state = NULL;
883 struct vmw_surface *surface = NULL;
884 struct drm_framebuffer *fb = new_state->fb;
887 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
890 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
891 DRM_PLANE_NO_SCALING,
892 DRM_PLANE_NO_SCALING,
901 /* A lot of the code assumes this */
902 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
903 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
904 new_state->crtc_w, new_state->crtc_h);
908 if (!vmw_framebuffer_to_vfb(fb)->bo) {
909 surface = vmw_framebuffer_to_vfbs(fb)->surface;
914 (!surface->snooper.image && !surface->res.guest_memory_bo)) {
915 DRM_ERROR("surface not suitable for cursor\n");
924 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
925 struct drm_atomic_state *state)
927 struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
929 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
930 int connector_mask = drm_connector_mask(&du->connector);
931 bool has_primary = new_state->plane_mask &
932 drm_plane_mask(crtc->primary);
934 /* We always want to have an active plane with an active CRTC */
935 if (has_primary != new_state->enable)
939 if (new_state->connector_mask != connector_mask &&
940 new_state->connector_mask != 0) {
941 DRM_ERROR("Invalid connectors configuration\n");
946 * Our virtual device does not have a dot clock, so use the logical
947 * clock value as the dot clock.
949 if (new_state->mode.crtc_clock == 0)
950 new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
956 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
957 struct drm_atomic_state *state)
962 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
963 struct drm_atomic_state *state)
969 * vmw_du_crtc_duplicate_state - duplicate crtc state
972 * Allocates and returns a copy of the crtc state (both common and
973 * vmw-specific) for the specified crtc.
975 * Returns: The newly allocated crtc state, or NULL on failure.
977 struct drm_crtc_state *
978 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
980 struct drm_crtc_state *state;
981 struct vmw_crtc_state *vcs;
983 if (WARN_ON(!crtc->state))
986 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
993 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
1000 * vmw_du_crtc_reset - creates a blank vmw crtc state
1003 * Resets the atomic state for @crtc by freeing the state pointer (which
1004 * might be NULL, e.g. at driver load time) and allocating a new empty state
1007 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1009 struct vmw_crtc_state *vcs;
1013 __drm_atomic_helper_crtc_destroy_state(crtc->state);
1015 kfree(vmw_crtc_state_to_vcs(crtc->state));
1018 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1021 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1025 __drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1030 * vmw_du_crtc_destroy_state - destroy crtc state
1032 * @state: state object to destroy
1034 * Destroys the crtc state (both common and vmw-specific) for the
1038 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1039 struct drm_crtc_state *state)
1041 drm_atomic_helper_crtc_destroy_state(crtc, state);
1046 * vmw_du_plane_duplicate_state - duplicate plane state
1049 * Allocates and returns a copy of the plane state (both common and
1050 * vmw-specific) for the specified plane.
1052 * Returns: The newly allocated plane state, or NULL on failure.
1054 struct drm_plane_state *
1055 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1057 struct drm_plane_state *state;
1058 struct vmw_plane_state *vps;
1060 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1068 memset(&vps->cursor, 0, sizeof(vps->cursor));
1070 /* Each ref counted resource needs to be acquired again */
1072 (void) vmw_surface_reference(vps->surf);
1075 (void) vmw_bo_reference(vps->bo);
1079 __drm_atomic_helper_plane_duplicate_state(plane, state);
1086 * vmw_du_plane_reset - creates a blank vmw plane state
1089 * Resets the atomic state for @plane by freeing the state pointer (which might
1090 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1092 void vmw_du_plane_reset(struct drm_plane *plane)
1094 struct vmw_plane_state *vps;
1097 vmw_du_plane_destroy_state(plane, plane->state);
1099 vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1102 DRM_ERROR("Cannot allocate vmw_plane_state\n");
1106 __drm_atomic_helper_plane_reset(plane, &vps->base);
1111 * vmw_du_plane_destroy_state - destroy plane state
1113 * @state: state object to destroy
1115 * Destroys the plane state (both common and vmw-specific) for the
1119 vmw_du_plane_destroy_state(struct drm_plane *plane,
1120 struct drm_plane_state *state)
1122 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1124 /* Should have been freed by cleanup_fb */
1126 vmw_surface_unreference(&vps->surf);
1129 vmw_bo_unreference(&vps->bo);
1131 drm_atomic_helper_plane_destroy_state(plane, state);
1136 * vmw_du_connector_duplicate_state - duplicate connector state
1137 * @connector: DRM connector
1139 * Allocates and returns a copy of the connector state (both common and
1140 * vmw-specific) for the specified connector.
1142 * Returns: The newly allocated connector state, or NULL on failure.
1144 struct drm_connector_state *
1145 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1147 struct drm_connector_state *state;
1148 struct vmw_connector_state *vcs;
1150 if (WARN_ON(!connector->state))
1153 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1160 __drm_atomic_helper_connector_duplicate_state(connector, state);
1167 * vmw_du_connector_reset - creates a blank vmw connector state
1168 * @connector: DRM connector
1170 * Resets the atomic state for @connector by freeing the state pointer (which
1171 * might be NULL, e.g. at driver load time) and allocating a new empty state
1174 void vmw_du_connector_reset(struct drm_connector *connector)
1176 struct vmw_connector_state *vcs;
1179 if (connector->state) {
1180 __drm_atomic_helper_connector_destroy_state(connector->state);
1182 kfree(vmw_connector_state_to_vcs(connector->state));
1185 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1188 DRM_ERROR("Cannot allocate vmw_connector_state\n");
1192 __drm_atomic_helper_connector_reset(connector, &vcs->base);
1197 * vmw_du_connector_destroy_state - destroy connector state
1198 * @connector: DRM connector
1199 * @state: state object to destroy
1201 * Destroys the connector state (both common and vmw-specific) for the
1205 vmw_du_connector_destroy_state(struct drm_connector *connector,
1206 struct drm_connector_state *state)
1208 drm_atomic_helper_connector_destroy_state(connector, state);
1211 * Generic framebuffer code
1215 * Surface framebuffer code
1218 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1220 struct vmw_framebuffer_surface *vfbs =
1221 vmw_framebuffer_to_vfbs(framebuffer);
1223 drm_framebuffer_cleanup(framebuffer);
1224 vmw_surface_unreference(&vfbs->surface);
1230 * vmw_kms_readback - Perform a readback from the screen system to
1231 * a buffer-object backed framebuffer.
1233 * @dev_priv: Pointer to the device private structure.
1234 * @file_priv: Pointer to a struct drm_file identifying the caller.
1235 * Must be set to NULL if @user_fence_rep is NULL.
1236 * @vfb: Pointer to the buffer-object backed framebuffer.
1237 * @user_fence_rep: User-space provided structure for fence information.
1238 * Must be set to non-NULL if @file_priv is non-NULL.
1239 * @vclips: Array of clip rects.
1240 * @num_clips: Number of clip rects in @vclips.
1242 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1245 int vmw_kms_readback(struct vmw_private *dev_priv,
1246 struct drm_file *file_priv,
1247 struct vmw_framebuffer *vfb,
1248 struct drm_vmw_fence_rep __user *user_fence_rep,
1249 struct drm_vmw_rect *vclips,
1252 switch (dev_priv->active_display_unit) {
1253 case vmw_du_screen_object:
1254 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1255 user_fence_rep, vclips, num_clips,
1257 case vmw_du_screen_target:
1258 return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1259 user_fence_rep, NULL, vclips, num_clips,
1263 "Readback called with invalid display system.\n");
1270 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1271 .destroy = vmw_framebuffer_surface_destroy,
1272 .dirty = drm_atomic_helper_dirtyfb,
1275 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1276 struct vmw_surface *surface,
1277 struct vmw_framebuffer **out,
1278 const struct drm_mode_fb_cmd2
1283 struct drm_device *dev = &dev_priv->drm;
1284 struct vmw_framebuffer_surface *vfbs;
1285 enum SVGA3dSurfaceFormat format;
1288 /* 3D is only supported on HWv8 and newer hosts */
1289 if (dev_priv->active_display_unit == vmw_du_legacy)
1296 if (!drm_any_plane_has_format(&dev_priv->drm,
1297 mode_cmd->pixel_format,
1298 mode_cmd->modifier[0])) {
1299 drm_dbg(&dev_priv->drm,
1300 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1301 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1305 /* Surface must be marked as a scanout. */
1306 if (unlikely(!surface->metadata.scanout))
1309 if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1310 surface->metadata.num_sizes != 1 ||
1311 surface->metadata.base_size.width < mode_cmd->width ||
1312 surface->metadata.base_size.height < mode_cmd->height ||
1313 surface->metadata.base_size.depth != 1)) {
1314 DRM_ERROR("Incompatible surface dimensions "
1315 "for requested mode.\n");
1319 switch (mode_cmd->pixel_format) {
1320 case DRM_FORMAT_ARGB8888:
1321 format = SVGA3D_A8R8G8B8;
1323 case DRM_FORMAT_XRGB8888:
1324 format = SVGA3D_X8R8G8B8;
1326 case DRM_FORMAT_RGB565:
1327 format = SVGA3D_R5G6B5;
1329 case DRM_FORMAT_XRGB1555:
1330 format = SVGA3D_A1R5G5B5;
1333 DRM_ERROR("Invalid pixel format: %p4cc\n",
1334 &mode_cmd->pixel_format);
1339 * For DX, surface format validation is done when surface->scanout
1342 if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1343 DRM_ERROR("Invalid surface format for requested mode.\n");
1347 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1353 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1354 vfbs->surface = vmw_surface_reference(surface);
1355 vfbs->base.user_handle = mode_cmd->handles[0];
1356 vfbs->is_bo_proxy = is_bo_proxy;
1360 ret = drm_framebuffer_init(dev, &vfbs->base.base,
1361 &vmw_framebuffer_surface_funcs);
1368 vmw_surface_unreference(&surface);
1375 * Buffer-object framebuffer code
1378 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1379 struct drm_file *file_priv,
1380 unsigned int *handle)
1382 struct vmw_framebuffer_bo *vfbd =
1383 vmw_framebuffer_to_vfbd(fb);
1385 return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1388 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1390 struct vmw_framebuffer_bo *vfbd =
1391 vmw_framebuffer_to_vfbd(framebuffer);
1393 drm_framebuffer_cleanup(framebuffer);
1394 vmw_bo_unreference(&vfbd->buffer);
1399 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1400 .create_handle = vmw_framebuffer_bo_create_handle,
1401 .destroy = vmw_framebuffer_bo_destroy,
1402 .dirty = drm_atomic_helper_dirtyfb,
1406 * vmw_create_bo_proxy - create a proxy surface for the buffer object
1409 * @mode_cmd: parameters for the new surface
1410 * @bo_mob: MOB backing the buffer object
1411 * @srf_out: newly created surface
1413 * When the content FB is a buffer object, we create a surface as a proxy to the
1414 * same buffer. This way we can do a surface copy rather than a surface DMA.
1415 * This is a more efficient approach
1418 * 0 on success, error code otherwise
1420 static int vmw_create_bo_proxy(struct drm_device *dev,
1421 const struct drm_mode_fb_cmd2 *mode_cmd,
1422 struct vmw_bo *bo_mob,
1423 struct vmw_surface **srf_out)
1425 struct vmw_surface_metadata metadata = {0};
1427 struct vmw_resource *res;
1428 unsigned int bytes_pp;
1431 switch (mode_cmd->pixel_format) {
1432 case DRM_FORMAT_ARGB8888:
1433 case DRM_FORMAT_XRGB8888:
1434 format = SVGA3D_X8R8G8B8;
1438 case DRM_FORMAT_RGB565:
1439 case DRM_FORMAT_XRGB1555:
1440 format = SVGA3D_R5G6B5;
1450 DRM_ERROR("Invalid framebuffer format %p4cc\n",
1451 &mode_cmd->pixel_format);
1455 metadata.format = format;
1456 metadata.mip_levels[0] = 1;
1457 metadata.num_sizes = 1;
1458 metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1459 metadata.base_size.height = mode_cmd->height;
1460 metadata.base_size.depth = 1;
1461 metadata.scanout = true;
1463 ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1465 DRM_ERROR("Failed to allocate proxy content buffer\n");
1469 res = &(*srf_out)->res;
1471 /* Reserve and switch the backing mob. */
1472 mutex_lock(&res->dev_priv->cmdbuf_mutex);
1473 (void) vmw_resource_reserve(res, false, true);
1474 vmw_bo_unreference(&res->guest_memory_bo);
1475 res->guest_memory_bo = vmw_bo_reference(bo_mob);
1476 res->guest_memory_offset = 0;
1477 vmw_resource_unreserve(res, false, false, false, NULL, 0);
1478 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1485 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1487 struct vmw_framebuffer **out,
1488 const struct drm_mode_fb_cmd2
1492 struct drm_device *dev = &dev_priv->drm;
1493 struct vmw_framebuffer_bo *vfbd;
1494 unsigned int requested_size;
1497 requested_size = mode_cmd->height * mode_cmd->pitches[0];
1498 if (unlikely(requested_size > bo->tbo.base.size)) {
1499 DRM_ERROR("Screen buffer object size is too small "
1500 "for requested mode.\n");
1504 if (!drm_any_plane_has_format(&dev_priv->drm,
1505 mode_cmd->pixel_format,
1506 mode_cmd->modifier[0])) {
1507 drm_dbg(&dev_priv->drm,
1508 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1509 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1513 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1519 vfbd->base.base.obj[0] = &bo->tbo.base;
1520 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1521 vfbd->base.bo = true;
1522 vfbd->buffer = vmw_bo_reference(bo);
1523 vfbd->base.user_handle = mode_cmd->handles[0];
1526 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1527 &vmw_framebuffer_bo_funcs);
1534 vmw_bo_unreference(&bo);
1542 * vmw_kms_srf_ok - check if a surface can be created
1544 * @dev_priv: Pointer to device private struct.
1545 * @width: requested width
1546 * @height: requested height
1548 * Surfaces need to be less than texture size
1551 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1553 if (width > dev_priv->texture_max_width ||
1554 height > dev_priv->texture_max_height)
1561 * vmw_kms_new_framebuffer - Create a new framebuffer.
1563 * @dev_priv: Pointer to device private struct.
1564 * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1565 * Either @bo or @surface must be NULL.
1566 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1567 * Either @bo or @surface must be NULL.
1568 * @only_2d: No presents will occur to this buffer object based framebuffer.
1569 * This helps the code to do some important optimizations.
1570 * @mode_cmd: Frame-buffer metadata.
1572 struct vmw_framebuffer *
1573 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1575 struct vmw_surface *surface,
1577 const struct drm_mode_fb_cmd2 *mode_cmd)
1579 struct vmw_framebuffer *vfb = NULL;
1580 bool is_bo_proxy = false;
1584 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1585 * therefore, wrap the buffer object in a surface so we can use the
1586 * SurfaceCopy command.
1588 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
1590 mode_cmd->width > 64 && /* Don't create a proxy for cursor */
1591 dev_priv->active_display_unit == vmw_du_screen_target) {
1592 ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1595 return ERR_PTR(ret);
1600 /* Create the new framebuffer depending one what we have */
1602 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1606 * vmw_create_bo_proxy() adds a reference that is no longer
1610 vmw_surface_unreference(&surface);
1612 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1619 return ERR_PTR(ret);
1625 * Generic Kernel modesetting functions
1628 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1629 struct drm_file *file_priv,
1630 const struct drm_mode_fb_cmd2 *mode_cmd)
1632 struct vmw_private *dev_priv = vmw_priv(dev);
1633 struct vmw_framebuffer *vfb = NULL;
1634 struct vmw_surface *surface = NULL;
1635 struct vmw_bo *bo = NULL;
1638 /* returns either a bo or surface */
1639 ret = vmw_user_lookup_handle(dev_priv, file_priv,
1640 mode_cmd->handles[0],
1643 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1644 mode_cmd->handles[0], mode_cmd->handles[0]);
1650 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1651 DRM_ERROR("Surface size cannot exceed %dx%d\n",
1652 dev_priv->texture_max_width,
1653 dev_priv->texture_max_height);
1658 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1659 !(dev_priv->capabilities & SVGA_CAP_3D),
1667 /* vmw_user_lookup_handle takes one ref so does new_fb */
1669 vmw_user_bo_unref(bo);
1671 vmw_surface_unreference(&surface);
1674 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1675 return ERR_PTR(ret);
1682 * vmw_kms_check_display_memory - Validates display memory required for a
1685 * @num_rects: number of drm_rect in rects
1686 * @rects: array of drm_rect representing the topology to validate indexed by
1690 * 0 on success otherwise negative error code
1692 static int vmw_kms_check_display_memory(struct drm_device *dev,
1694 struct drm_rect *rects)
1696 struct vmw_private *dev_priv = vmw_priv(dev);
1697 struct drm_rect bounding_box = {0};
1698 u64 total_pixels = 0, pixel_mem, bb_mem;
1701 for (i = 0; i < num_rects; i++) {
1703 * For STDU only individual screen (screen target) is limited by
1704 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1706 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1707 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1708 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1709 VMW_DEBUG_KMS("Screen size not supported.\n");
1713 /* Bounding box upper left is at (0,0). */
1714 if (rects[i].x2 > bounding_box.x2)
1715 bounding_box.x2 = rects[i].x2;
1717 if (rects[i].y2 > bounding_box.y2)
1718 bounding_box.y2 = rects[i].y2;
1720 total_pixels += (u64) drm_rect_width(&rects[i]) *
1721 (u64) drm_rect_height(&rects[i]);
1724 /* Virtual svga device primary limits are always in 32-bpp. */
1725 pixel_mem = total_pixels * 4;
1728 * For HV10 and below prim_bb_mem is vram size. When
1729 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1730 * limit on primary bounding box
1732 if (pixel_mem > dev_priv->max_primary_mem) {
1733 VMW_DEBUG_KMS("Combined output size too large.\n");
1737 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1738 if (dev_priv->active_display_unit != vmw_du_screen_target ||
1739 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1740 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1742 if (bb_mem > dev_priv->max_primary_mem) {
1743 VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1752 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1754 * @state: The atomic state pointer containing the new atomic state
1757 * This function returns the new crtc state if it's part of the state update.
1758 * Otherwise returns the current crtc state. It also makes sure that the
1759 * crtc mutex is locked.
1761 * Returns: A valid crtc state pointer or NULL. It may also return a
1762 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1764 static struct drm_crtc_state *
1765 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1767 struct drm_crtc_state *crtc_state;
1769 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1771 lockdep_assert_held(&crtc->mutex.mutex.base);
1773 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1775 if (ret != 0 && ret != -EALREADY)
1776 return ERR_PTR(ret);
1778 crtc_state = crtc->state;
1785 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1786 * from the same fb after the new state is committed.
1787 * @dev: The drm_device.
1788 * @state: The new state to be checked.
1792 * -EINVAL on invalid state,
1793 * -EDEADLK if modeset locking needs to be rerun.
1795 static int vmw_kms_check_implicit(struct drm_device *dev,
1796 struct drm_atomic_state *state)
1798 struct drm_framebuffer *implicit_fb = NULL;
1799 struct drm_crtc *crtc;
1800 struct drm_crtc_state *crtc_state;
1801 struct drm_plane_state *plane_state;
1803 drm_for_each_crtc(crtc, dev) {
1804 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1806 if (!du->is_implicit)
1809 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1810 if (IS_ERR(crtc_state))
1811 return PTR_ERR(crtc_state);
1813 if (!crtc_state || !crtc_state->enable)
1817 * Can't move primary planes across crtcs, so this is OK.
1818 * It also means we don't need to take the plane mutex.
1820 plane_state = du->primary.state;
1821 if (plane_state->crtc != crtc)
1825 implicit_fb = plane_state->fb;
1826 else if (implicit_fb != plane_state->fb)
1834 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1836 * @state: the driver state object
1839 * 0 on success otherwise negative error code
1841 static int vmw_kms_check_topology(struct drm_device *dev,
1842 struct drm_atomic_state *state)
1844 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1845 struct drm_rect *rects;
1846 struct drm_crtc *crtc;
1850 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1855 drm_for_each_crtc(crtc, dev) {
1856 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1857 struct drm_crtc_state *crtc_state;
1859 i = drm_crtc_index(crtc);
1861 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1862 if (IS_ERR(crtc_state)) {
1863 ret = PTR_ERR(crtc_state);
1870 if (crtc_state->enable) {
1871 rects[i].x1 = du->gui_x;
1872 rects[i].y1 = du->gui_y;
1873 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1874 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1883 /* Determine change to topology due to new atomic state */
1884 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1885 new_crtc_state, i) {
1886 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1887 struct drm_connector *connector;
1888 struct drm_connector_state *conn_state;
1889 struct vmw_connector_state *vmw_conn_state;
1891 if (!du->pref_active && new_crtc_state->enable) {
1892 VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1898 * For vmwgfx each crtc has only one connector attached and it
1899 * is not changed so don't really need to check the
1900 * crtc->connector_mask and iterate over it.
1902 connector = &du->connector;
1903 conn_state = drm_atomic_get_connector_state(state, connector);
1904 if (IS_ERR(conn_state)) {
1905 ret = PTR_ERR(conn_state);
1909 vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1910 vmw_conn_state->gui_x = du->gui_x;
1911 vmw_conn_state->gui_y = du->gui_y;
1914 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1923 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1926 * @state: the driver state object
1928 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1929 * us to assign a value to mode->crtc_clock so that
1930 * drm_calc_timestamping_constants() won't throw an error message
1933 * Zero for success or -errno
1936 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1937 struct drm_atomic_state *state)
1939 struct drm_crtc *crtc;
1940 struct drm_crtc_state *crtc_state;
1941 bool need_modeset = false;
1944 ret = drm_atomic_helper_check(dev, state);
1948 ret = vmw_kms_check_implicit(dev, state);
1950 VMW_DEBUG_KMS("Invalid implicit state\n");
1954 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1955 if (drm_atomic_crtc_needs_modeset(crtc_state))
1956 need_modeset = true;
1960 return vmw_kms_check_topology(dev, state);
1965 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1966 .fb_create = vmw_kms_fb_create,
1967 .atomic_check = vmw_kms_atomic_check_modeset,
1968 .atomic_commit = drm_atomic_helper_commit,
1971 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1972 struct drm_file *file_priv,
1973 struct vmw_framebuffer *vfb,
1974 struct vmw_surface *surface,
1976 int32_t destX, int32_t destY,
1977 struct drm_vmw_rect *clips,
1980 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1981 &surface->res, destX, destY,
1982 num_clips, 1, NULL, NULL);
1986 int vmw_kms_present(struct vmw_private *dev_priv,
1987 struct drm_file *file_priv,
1988 struct vmw_framebuffer *vfb,
1989 struct vmw_surface *surface,
1991 int32_t destX, int32_t destY,
1992 struct drm_vmw_rect *clips,
1997 switch (dev_priv->active_display_unit) {
1998 case vmw_du_screen_target:
1999 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2000 &surface->res, destX, destY,
2001 num_clips, 1, NULL, NULL);
2003 case vmw_du_screen_object:
2004 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2005 sid, destX, destY, clips,
2010 "Present called with invalid display system.\n");
2017 vmw_cmd_flush(dev_priv, false);
2023 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2025 if (dev_priv->hotplug_mode_update_property)
2028 dev_priv->hotplug_mode_update_property =
2029 drm_property_create_range(&dev_priv->drm,
2030 DRM_MODE_PROP_IMMUTABLE,
2031 "hotplug_mode_update", 0, 1);
2034 int vmw_kms_init(struct vmw_private *dev_priv)
2036 struct drm_device *dev = &dev_priv->drm;
2038 static const char *display_unit_names[] = {
2046 drm_mode_config_init(dev);
2047 dev->mode_config.funcs = &vmw_kms_funcs;
2048 dev->mode_config.min_width = 1;
2049 dev->mode_config.min_height = 1;
2050 dev->mode_config.max_width = dev_priv->texture_max_width;
2051 dev->mode_config.max_height = dev_priv->texture_max_height;
2052 dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2054 drm_mode_create_suggested_offset_properties(dev);
2055 vmw_kms_create_hotplug_mode_update_property(dev_priv);
2057 ret = vmw_kms_stdu_init_display(dev_priv);
2059 ret = vmw_kms_sou_init_display(dev_priv);
2060 if (ret) /* Fallback */
2061 ret = vmw_kms_ldu_init_display(dev_priv);
2063 BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2064 drm_info(&dev_priv->drm, "%s display unit initialized\n",
2065 display_unit_names[dev_priv->active_display_unit]);
2070 int vmw_kms_close(struct vmw_private *dev_priv)
2075 * Docs says we should take the lock before calling this function
2076 * but since it destroys encoders and our destructor calls
2077 * drm_encoder_cleanup which takes the lock we deadlock.
2079 drm_mode_config_cleanup(&dev_priv->drm);
2080 if (dev_priv->active_display_unit == vmw_du_legacy)
2081 ret = vmw_kms_ldu_close_display(dev_priv);
2086 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2087 struct drm_file *file_priv)
2089 struct drm_vmw_cursor_bypass_arg *arg = data;
2090 struct vmw_display_unit *du;
2091 struct drm_crtc *crtc;
2094 mutex_lock(&dev->mode_config.mutex);
2095 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2097 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2098 du = vmw_crtc_to_du(crtc);
2099 du->hotspot_x = arg->xhot;
2100 du->hotspot_y = arg->yhot;
2103 mutex_unlock(&dev->mode_config.mutex);
2107 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2113 du = vmw_crtc_to_du(crtc);
2115 du->hotspot_x = arg->xhot;
2116 du->hotspot_y = arg->yhot;
2119 mutex_unlock(&dev->mode_config.mutex);
2124 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2125 unsigned width, unsigned height, unsigned pitch,
2126 unsigned bpp, unsigned depth)
2128 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2129 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2130 else if (vmw_fifo_have_pitchlock(vmw_priv))
2131 vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2132 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2133 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2134 if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2135 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2137 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2138 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2139 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2146 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2150 return ((u64) pitch * (u64) height) < (u64)
2151 ((dev_priv->active_display_unit == vmw_du_screen_target) ?
2152 dev_priv->max_primary_mem : dev_priv->vram_size);
2156 * vmw_du_update_layout - Update the display unit with topology from resolution
2157 * plugin and generate DRM uevent
2158 * @dev_priv: device private
2159 * @num_rects: number of drm_rect in rects
2160 * @rects: toplogy to update
2162 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2163 unsigned int num_rects, struct drm_rect *rects)
2165 struct drm_device *dev = &dev_priv->drm;
2166 struct vmw_display_unit *du;
2167 struct drm_connector *con;
2168 struct drm_connector_list_iter conn_iter;
2169 struct drm_modeset_acquire_ctx ctx;
2170 struct drm_crtc *crtc;
2173 /* Currently gui_x/y is protected with the crtc mutex */
2174 mutex_lock(&dev->mode_config.mutex);
2175 drm_modeset_acquire_init(&ctx, 0);
2177 drm_for_each_crtc(crtc, dev) {
2178 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2180 if (ret == -EDEADLK) {
2181 drm_modeset_backoff(&ctx);
2188 drm_connector_list_iter_begin(dev, &conn_iter);
2189 drm_for_each_connector_iter(con, &conn_iter) {
2190 du = vmw_connector_to_du(con);
2191 if (num_rects > du->unit) {
2192 du->pref_width = drm_rect_width(&rects[du->unit]);
2193 du->pref_height = drm_rect_height(&rects[du->unit]);
2194 du->pref_active = true;
2195 du->gui_x = rects[du->unit].x1;
2196 du->gui_y = rects[du->unit].y1;
2198 du->pref_width = VMWGFX_MIN_INITIAL_WIDTH;
2199 du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2200 du->pref_active = false;
2205 drm_connector_list_iter_end(&conn_iter);
2207 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2208 du = vmw_connector_to_du(con);
2209 if (num_rects > du->unit) {
2210 drm_object_property_set_value
2211 (&con->base, dev->mode_config.suggested_x_property,
2213 drm_object_property_set_value
2214 (&con->base, dev->mode_config.suggested_y_property,
2217 drm_object_property_set_value
2218 (&con->base, dev->mode_config.suggested_x_property,
2220 drm_object_property_set_value
2221 (&con->base, dev->mode_config.suggested_y_property,
2224 con->status = vmw_du_connector_detect(con, true);
2227 drm_modeset_drop_locks(&ctx);
2228 drm_modeset_acquire_fini(&ctx);
2229 mutex_unlock(&dev->mode_config.mutex);
2231 drm_sysfs_hotplug_event(dev);
2236 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2237 u16 *r, u16 *g, u16 *b,
2239 struct drm_modeset_acquire_ctx *ctx)
2241 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2244 for (i = 0; i < size; i++) {
2245 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2247 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2248 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2249 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2255 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2260 enum drm_connector_status
2261 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2263 uint32_t num_displays;
2264 struct drm_device *dev = connector->dev;
2265 struct vmw_private *dev_priv = vmw_priv(dev);
2266 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2268 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2270 return ((vmw_connector_to_du(connector)->unit < num_displays &&
2272 connector_status_connected : connector_status_disconnected);
2275 static struct drm_display_mode vmw_kms_connector_builtin[] = {
2277 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2278 752, 800, 0, 480, 489, 492, 525, 0,
2279 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2281 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2282 968, 1056, 0, 600, 601, 605, 628, 0,
2283 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2285 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2286 1184, 1344, 0, 768, 771, 777, 806, 0,
2287 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2289 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2290 1344, 1600, 0, 864, 865, 868, 900, 0,
2291 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2293 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2294 1472, 1664, 0, 720, 723, 728, 748, 0,
2295 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2297 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2298 1472, 1664, 0, 768, 771, 778, 798, 0,
2299 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2301 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2302 1480, 1680, 0, 800, 803, 809, 831, 0,
2303 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2305 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2306 1488, 1800, 0, 960, 961, 964, 1000, 0,
2307 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2308 /* 1280x1024@60Hz */
2309 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2310 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2311 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2313 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2314 1536, 1792, 0, 768, 771, 777, 795, 0,
2315 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2316 /* 1440x1050@60Hz */
2317 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2318 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2319 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2321 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2322 1672, 1904, 0, 900, 903, 909, 934, 0,
2323 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2324 /* 1600x1200@60Hz */
2325 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2326 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2327 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2328 /* 1680x1050@60Hz */
2329 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2330 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2331 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2332 /* 1792x1344@60Hz */
2333 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2334 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2335 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2336 /* 1853x1392@60Hz */
2337 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2338 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2339 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2340 /* 1920x1080@60Hz */
2341 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2342 2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2343 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2344 /* 1920x1200@60Hz */
2345 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2346 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2347 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2348 /* 1920x1440@60Hz */
2349 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2350 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2351 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2352 /* 2560x1440@60Hz */
2353 { DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2354 2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2355 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2356 /* 2560x1600@60Hz */
2357 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2358 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2359 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2360 /* 2880x1800@60Hz */
2361 { DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2362 2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2363 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2364 /* 3840x2160@60Hz */
2365 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2366 3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2367 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2368 /* 3840x2400@60Hz */
2369 { DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2370 3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2371 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2373 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2377 * vmw_guess_mode_timing - Provide fake timings for a
2378 * 60Hz vrefresh mode.
2380 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2381 * members filled in.
2383 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2385 mode->hsync_start = mode->hdisplay + 50;
2386 mode->hsync_end = mode->hsync_start + 50;
2387 mode->htotal = mode->hsync_end + 50;
2389 mode->vsync_start = mode->vdisplay + 50;
2390 mode->vsync_end = mode->vsync_start + 50;
2391 mode->vtotal = mode->vsync_end + 50;
2393 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2397 int vmw_du_connector_fill_modes(struct drm_connector *connector,
2398 uint32_t max_width, uint32_t max_height)
2400 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2401 struct drm_device *dev = connector->dev;
2402 struct vmw_private *dev_priv = vmw_priv(dev);
2403 struct drm_display_mode *mode = NULL;
2404 struct drm_display_mode *bmode;
2405 struct drm_display_mode prefmode = { DRM_MODE("preferred",
2406 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2407 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2408 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2411 u32 assumed_bpp = 4;
2413 if (dev_priv->assume_16bpp)
2416 max_width = min(max_width, dev_priv->texture_max_width);
2417 max_height = min(max_height, dev_priv->texture_max_height);
2420 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2423 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2424 max_width = min(max_width, dev_priv->stdu_max_width);
2425 max_height = min(max_height, dev_priv->stdu_max_height);
2428 /* Add preferred mode */
2429 mode = drm_mode_duplicate(dev, &prefmode);
2432 mode->hdisplay = du->pref_width;
2433 mode->vdisplay = du->pref_height;
2434 vmw_guess_mode_timing(mode);
2435 drm_mode_set_name(mode);
2437 if (vmw_kms_validate_mode_vram(dev_priv,
2438 mode->hdisplay * assumed_bpp,
2440 drm_mode_probed_add(connector, mode);
2442 drm_mode_destroy(dev, mode);
2446 if (du->pref_mode) {
2447 list_del_init(&du->pref_mode->head);
2448 drm_mode_destroy(dev, du->pref_mode);
2451 /* mode might be null here, this is intended */
2452 du->pref_mode = mode;
2454 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2455 bmode = &vmw_kms_connector_builtin[i];
2456 if (bmode->hdisplay > max_width ||
2457 bmode->vdisplay > max_height)
2460 if (!vmw_kms_validate_mode_vram(dev_priv,
2461 bmode->hdisplay * assumed_bpp,
2465 mode = drm_mode_duplicate(dev, bmode);
2469 drm_mode_probed_add(connector, mode);
2472 drm_connector_list_update(connector);
2473 /* Move the prefered mode first, help apps pick the right mode. */
2474 drm_mode_sort(&connector->modes);
2480 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2481 * @dev: drm device for the ioctl
2482 * @data: data pointer for the ioctl
2483 * @file_priv: drm file for the ioctl call
2485 * Update preferred topology of display unit as per ioctl request. The topology
2486 * is expressed as array of drm_vmw_rect.
2488 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2491 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2492 * device limit on topology, x + w and y + h (lower right) cannot be greater
2493 * than INT_MAX. So topology beyond these limits will return with error.
2496 * Zero on success, negative errno on failure.
2498 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2499 struct drm_file *file_priv)
2501 struct vmw_private *dev_priv = vmw_priv(dev);
2502 struct drm_mode_config *mode_config = &dev->mode_config;
2503 struct drm_vmw_update_layout_arg *arg =
2504 (struct drm_vmw_update_layout_arg *)data;
2505 void __user *user_rects;
2506 struct drm_vmw_rect *rects;
2507 struct drm_rect *drm_rects;
2508 unsigned rects_size;
2511 if (!arg->num_outputs) {
2512 struct drm_rect def_rect = {0, 0,
2513 VMWGFX_MIN_INITIAL_WIDTH,
2514 VMWGFX_MIN_INITIAL_HEIGHT};
2515 vmw_du_update_layout(dev_priv, 1, &def_rect);
2519 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2520 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2522 if (unlikely(!rects))
2525 user_rects = (void __user *)(unsigned long)arg->rects;
2526 ret = copy_from_user(rects, user_rects, rects_size);
2527 if (unlikely(ret != 0)) {
2528 DRM_ERROR("Failed to get rects.\n");
2533 drm_rects = (struct drm_rect *)rects;
2535 VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2536 for (i = 0; i < arg->num_outputs; i++) {
2537 struct drm_vmw_rect curr_rect;
2539 /* Verify user-space for overflow as kernel use drm_rect */
2540 if ((rects[i].x + rects[i].w > INT_MAX) ||
2541 (rects[i].y + rects[i].h > INT_MAX)) {
2546 curr_rect = rects[i];
2547 drm_rects[i].x1 = curr_rect.x;
2548 drm_rects[i].y1 = curr_rect.y;
2549 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2550 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2552 VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
2553 drm_rects[i].x1, drm_rects[i].y1,
2554 drm_rects[i].x2, drm_rects[i].y2);
2557 * Currently this check is limiting the topology within
2558 * mode_config->max (which actually is max texture size
2559 * supported by virtual device). This limit is here to address
2560 * window managers that create a big framebuffer for whole
2563 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
2564 drm_rects[i].x2 > mode_config->max_width ||
2565 drm_rects[i].y2 > mode_config->max_height) {
2566 VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2567 drm_rects[i].x1, drm_rects[i].y1,
2568 drm_rects[i].x2, drm_rects[i].y2);
2574 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2577 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2585 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2586 * on a set of cliprects and a set of display units.
2588 * @dev_priv: Pointer to a device private structure.
2589 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2590 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2591 * Cliprects are given in framebuffer coordinates.
2592 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2593 * be NULL. Cliprects are given in source coordinates.
2594 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2595 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2596 * @num_clips: Number of cliprects in the @clips or @vclips array.
2597 * @increment: Integer with which to increment the clip counter when looping.
2598 * Used to skip a predetermined number of clip rects.
2599 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2601 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2602 struct vmw_framebuffer *framebuffer,
2603 const struct drm_clip_rect *clips,
2604 const struct drm_vmw_rect *vclips,
2605 s32 dest_x, s32 dest_y,
2608 struct vmw_kms_dirty *dirty)
2610 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2611 struct drm_crtc *crtc;
2615 dirty->dev_priv = dev_priv;
2617 /* If crtc is passed, no need to iterate over other display units */
2619 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2621 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2623 struct drm_plane *plane = crtc->primary;
2625 if (plane->state->fb == &framebuffer->base)
2626 units[num_units++] = vmw_crtc_to_du(crtc);
2630 for (k = 0; k < num_units; k++) {
2631 struct vmw_display_unit *unit = units[k];
2632 s32 crtc_x = unit->crtc.x;
2633 s32 crtc_y = unit->crtc.y;
2634 s32 crtc_width = unit->crtc.mode.hdisplay;
2635 s32 crtc_height = unit->crtc.mode.vdisplay;
2636 const struct drm_clip_rect *clips_ptr = clips;
2637 const struct drm_vmw_rect *vclips_ptr = vclips;
2640 if (dirty->fifo_reserve_size > 0) {
2641 dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2642 dirty->fifo_reserve_size);
2646 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2648 dirty->num_hits = 0;
2649 for (i = 0; i < num_clips; i++, clips_ptr += increment,
2650 vclips_ptr += increment) {
2655 * Select clip array type. Note that integer type
2656 * in @clips is unsigned short, whereas in @vclips
2660 dirty->fb_x = (s32) clips_ptr->x1;
2661 dirty->fb_y = (s32) clips_ptr->y1;
2662 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2664 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2667 dirty->fb_x = vclips_ptr->x;
2668 dirty->fb_y = vclips_ptr->y;
2669 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2671 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2675 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2676 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2678 /* Skip this clip if it's outside the crtc region */
2679 if (dirty->unit_x1 >= crtc_width ||
2680 dirty->unit_y1 >= crtc_height ||
2681 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2684 /* Clip right and bottom to crtc limits */
2685 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2687 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2690 /* Clip left and top to crtc limits */
2691 clip_left = min_t(s32, dirty->unit_x1, 0);
2692 clip_top = min_t(s32, dirty->unit_y1, 0);
2693 dirty->unit_x1 -= clip_left;
2694 dirty->unit_y1 -= clip_top;
2695 dirty->fb_x -= clip_left;
2696 dirty->fb_y -= clip_top;
2701 dirty->fifo_commit(dirty);
2708 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2709 * cleanup and fencing
2710 * @dev_priv: Pointer to the device-private struct
2711 * @file_priv: Pointer identifying the client when user-space fencing is used
2712 * @ctx: Pointer to the validation context
2713 * @out_fence: If non-NULL, returned refcounted fence-pointer
2714 * @user_fence_rep: If non-NULL, pointer to user-space address area
2715 * in which to copy user-space fence info
2717 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2718 struct drm_file *file_priv,
2719 struct vmw_validation_context *ctx,
2720 struct vmw_fence_obj **out_fence,
2721 struct drm_vmw_fence_rep __user *
2724 struct vmw_fence_obj *fence = NULL;
2725 uint32_t handle = 0;
2728 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2730 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2731 file_priv ? &handle : NULL);
2732 vmw_validation_done(ctx, fence);
2734 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2735 ret, user_fence_rep, fence,
2740 vmw_fence_obj_unreference(&fence);
2744 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2747 * @res: Pointer to the surface resource
2748 * @clips: Clip rects in framebuffer (surface) space.
2749 * @num_clips: Number of clips in @clips.
2750 * @increment: Integer with which to increment the clip counter when looping.
2751 * Used to skip a predetermined number of clip rects.
2753 * This function makes sure the proxy surface is updated from its backing MOB
2754 * using the region given by @clips. The surface resource @res and its backing
2755 * MOB needs to be reserved and validated on call.
2757 int vmw_kms_update_proxy(struct vmw_resource *res,
2758 const struct drm_clip_rect *clips,
2762 struct vmw_private *dev_priv = res->dev_priv;
2763 struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2765 SVGA3dCmdHeader header;
2766 SVGA3dCmdUpdateGBImage body;
2769 size_t copy_size = 0;
2775 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2779 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2780 box = &cmd->body.box;
2782 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2783 cmd->header.size = sizeof(cmd->body);
2784 cmd->body.image.sid = res->id;
2785 cmd->body.image.face = 0;
2786 cmd->body.image.mipmap = 0;
2788 if (clips->x1 > size->width || clips->x2 > size->width ||
2789 clips->y1 > size->height || clips->y2 > size->height) {
2790 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2797 box->w = clips->x2 - clips->x1;
2798 box->h = clips->y2 - clips->y1;
2801 copy_size += sizeof(*cmd);
2804 vmw_cmd_commit(dev_priv, copy_size);
2810 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2813 * @dev_priv: Pointer to a device private struct.
2815 * Sets up the implicit placement property unless it's already set up.
2818 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2820 if (dev_priv->implicit_placement_property)
2823 dev_priv->implicit_placement_property =
2824 drm_property_create_range(&dev_priv->drm,
2825 DRM_MODE_PROP_IMMUTABLE,
2826 "implicit_placement", 0, 1);
2830 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2832 * @dev: Pointer to the drm device
2833 * Return: 0 on success. Negative error code on failure.
2835 int vmw_kms_suspend(struct drm_device *dev)
2837 struct vmw_private *dev_priv = vmw_priv(dev);
2839 dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2840 if (IS_ERR(dev_priv->suspend_state)) {
2841 int ret = PTR_ERR(dev_priv->suspend_state);
2843 DRM_ERROR("Failed kms suspend: %d\n", ret);
2844 dev_priv->suspend_state = NULL;
2854 * vmw_kms_resume - Re-enable modesetting and restore state
2856 * @dev: Pointer to the drm device
2857 * Return: 0 on success. Negative error code on failure.
2859 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2860 * to call this function without a previous vmw_kms_suspend().
2862 int vmw_kms_resume(struct drm_device *dev)
2864 struct vmw_private *dev_priv = vmw_priv(dev);
2867 if (WARN_ON(!dev_priv->suspend_state))
2870 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2871 dev_priv->suspend_state = NULL;
2877 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2879 * @dev: Pointer to the drm device
2881 void vmw_kms_lost_device(struct drm_device *dev)
2883 drm_atomic_helper_shutdown(dev);
2887 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2888 * @update: The closure structure.
2890 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2891 * update on display unit.
2893 * Return: 0 on success or a negative error code on failure.
2895 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2897 struct drm_plane_state *state = update->plane->state;
2898 struct drm_plane_state *old_state = update->old_state;
2899 struct drm_atomic_helper_damage_iter iter;
2900 struct drm_rect clip;
2902 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2903 uint32_t reserved_size = 0;
2904 uint32_t submit_size = 0;
2905 uint32_t curr_size = 0;
2906 uint32_t num_hits = 0;
2912 * Iterate in advance to check if really need plane update and find the
2913 * number of clips that actually are in plane src for fifo allocation.
2915 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2916 drm_atomic_for_each_plane_damage(&iter, &clip)
2922 if (update->vfb->bo) {
2923 struct vmw_framebuffer_bo *vfbbo =
2924 container_of(update->vfb, typeof(*vfbbo), base);
2927 * For screen targets we want a mappable bo, for everything else we want
2928 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2929 * is not screen target then mob's shouldn't be available.
2931 if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2932 vmw_bo_placement_set(vfbbo->buffer,
2933 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2934 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2936 WARN_ON(update->dev_priv->has_mob);
2937 vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2939 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2941 struct vmw_framebuffer_surface *vfbs =
2942 container_of(update->vfb, typeof(*vfbs), base);
2944 ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2945 0, VMW_RES_DIRTY_NONE, NULL,
2952 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2956 reserved_size = update->calc_fifo_size(update, num_hits);
2957 cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2963 cmd_next = cmd_start;
2965 if (update->post_prepare) {
2966 curr_size = update->post_prepare(update, cmd_next);
2967 cmd_next += curr_size;
2968 submit_size += curr_size;
2971 if (update->pre_clip) {
2972 curr_size = update->pre_clip(update, cmd_next, num_hits);
2973 cmd_next += curr_size;
2974 submit_size += curr_size;
2982 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2983 drm_atomic_for_each_plane_damage(&iter, &clip) {
2984 uint32_t fb_x = clip.x1;
2985 uint32_t fb_y = clip.y1;
2987 vmw_du_translate_to_crtc(state, &clip);
2989 curr_size = update->clip(update, cmd_next, &clip, fb_x,
2991 cmd_next += curr_size;
2992 submit_size += curr_size;
2994 bb.x1 = min_t(int, bb.x1, clip.x1);
2995 bb.y1 = min_t(int, bb.y1, clip.y1);
2996 bb.x2 = max_t(int, bb.x2, clip.x2);
2997 bb.y2 = max_t(int, bb.y2, clip.y2);
3000 curr_size = update->post_clip(update, cmd_next, &bb);
3001 submit_size += curr_size;
3003 if (reserved_size < submit_size)
3006 vmw_cmd_commit(update->dev_priv, submit_size);
3008 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3009 update->out_fence, NULL);
3013 vmw_validation_revert(&val_ctx);
3016 vmw_validation_unref_lists(&val_ctx);