1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
8 #include <linux/dma-mapping.h>
9 #include <linux/fault-inject.h>
10 #include <linux/kthread.h>
11 #include <linux/sched/mm.h>
12 #include <linux/uaccess.h>
13 #include <uapi/linux/sched/types.h>
15 #include <drm/drm_bridge.h>
16 #include <drm/drm_drv.h>
17 #include <drm/drm_file.h>
18 #include <drm/drm_ioctl.h>
19 #include <drm/drm_prime.h>
20 #include <drm/drm_of.h>
21 #include <drm/drm_vblank.h>
23 #include "disp/msm_disp_snapshot.h"
25 #include "msm_debugfs.h"
26 #include "msm_fence.h"
31 #include "adreno/adreno_gpu.h"
35 * - 1.0.0 - initial interface
36 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
37 * - 1.2.0 - adds explicit fence support for submit ioctl
38 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
39 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
41 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
42 * GEM object's debug name
43 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
44 * - 1.6.0 - Syncobj support
45 * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
46 * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
47 * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
48 * - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
49 * - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
51 #define MSM_VERSION_MAJOR 1
52 #define MSM_VERSION_MINOR 10
53 #define MSM_VERSION_PATCHLEVEL 0
55 static void msm_deinit_vram(struct drm_device *ddev);
57 static const struct drm_mode_config_funcs mode_config_funcs = {
58 .fb_create = msm_framebuffer_create,
59 .output_poll_changed = drm_fb_helper_output_poll_changed,
60 .atomic_check = drm_atomic_helper_check,
61 .atomic_commit = drm_atomic_helper_commit,
64 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
65 .atomic_commit_tail = msm_atomic_commit_tail,
68 #ifdef CONFIG_DRM_FBDEV_EMULATION
69 static bool fbdev = true;
70 MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
71 module_param(fbdev, bool, 0600);
74 static char *vram = "16m";
75 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
76 module_param(vram, charp, 0);
79 MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
80 module_param(dumpstate, bool, 0600);
82 static bool modeset = true;
83 MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
84 module_param(modeset, bool, 0600);
86 #ifdef CONFIG_FAULT_INJECTION
87 DECLARE_FAULT_ATTR(fail_gem_alloc);
88 DECLARE_FAULT_ATTR(fail_gem_iova);
91 static irqreturn_t msm_irq(int irq, void *arg)
93 struct drm_device *dev = arg;
94 struct msm_drm_private *priv = dev->dev_private;
95 struct msm_kms *kms = priv->kms;
99 return kms->funcs->irq(kms);
102 static void msm_irq_preinstall(struct drm_device *dev)
104 struct msm_drm_private *priv = dev->dev_private;
105 struct msm_kms *kms = priv->kms;
109 kms->funcs->irq_preinstall(kms);
112 static int msm_irq_postinstall(struct drm_device *dev)
114 struct msm_drm_private *priv = dev->dev_private;
115 struct msm_kms *kms = priv->kms;
119 if (kms->funcs->irq_postinstall)
120 return kms->funcs->irq_postinstall(kms);
125 static int msm_irq_install(struct drm_device *dev, unsigned int irq)
127 struct msm_drm_private *priv = dev->dev_private;
128 struct msm_kms *kms = priv->kms;
131 if (irq == IRQ_NOTCONNECTED)
134 msm_irq_preinstall(dev);
136 ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
140 kms->irq_requested = true;
142 ret = msm_irq_postinstall(dev);
151 static void msm_irq_uninstall(struct drm_device *dev)
153 struct msm_drm_private *priv = dev->dev_private;
154 struct msm_kms *kms = priv->kms;
156 kms->funcs->irq_uninstall(kms);
157 if (kms->irq_requested)
158 free_irq(kms->irq, dev);
161 struct msm_vblank_work {
162 struct work_struct work;
165 struct msm_drm_private *priv;
168 static void vblank_ctrl_worker(struct work_struct *work)
170 struct msm_vblank_work *vbl_work = container_of(work,
171 struct msm_vblank_work, work);
172 struct msm_drm_private *priv = vbl_work->priv;
173 struct msm_kms *kms = priv->kms;
175 if (vbl_work->enable)
176 kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
178 kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
183 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
184 int crtc_id, bool enable)
186 struct msm_vblank_work *vbl_work;
188 vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
192 INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
194 vbl_work->crtc_id = crtc_id;
195 vbl_work->enable = enable;
196 vbl_work->priv = priv;
198 queue_work(priv->wq, &vbl_work->work);
203 static int msm_drm_uninit(struct device *dev)
205 struct platform_device *pdev = to_platform_device(dev);
206 struct msm_drm_private *priv = platform_get_drvdata(pdev);
207 struct drm_device *ddev = priv->dev;
208 struct msm_kms *kms = priv->kms;
212 * Shutdown the hw if we're far enough along where things might be on.
213 * If we run this too early, we'll end up panicking in any variety of
214 * places. Since we don't register the drm device until late in
215 * msm_drm_init, drm_dev->registered is used as an indicator that the
216 * shutdown will be successful.
218 if (ddev->registered) {
219 drm_dev_unregister(ddev);
220 drm_atomic_helper_shutdown(ddev);
223 /* We must cancel and cleanup any pending vblank enable/disable
224 * work before msm_irq_uninstall() to avoid work re-enabling an
225 * irq after uninstall has disabled it.
228 flush_workqueue(priv->wq);
230 /* clean up event worker threads */
231 for (i = 0; i < priv->num_crtcs; i++) {
232 if (priv->event_thread[i].worker)
233 kthread_destroy_worker(priv->event_thread[i].worker);
236 msm_gem_shrinker_cleanup(ddev);
238 drm_kms_helper_poll_fini(ddev);
240 msm_perf_debugfs_cleanup(priv);
241 msm_rd_debugfs_cleanup(priv);
243 #ifdef CONFIG_DRM_FBDEV_EMULATION
244 if (fbdev && priv->fbdev)
245 msm_fbdev_free(ddev);
249 msm_disp_snapshot_destroy(ddev);
251 drm_mode_config_cleanup(ddev);
253 for (i = 0; i < priv->num_bridges; i++)
254 drm_bridge_remove(priv->bridges[i]);
255 priv->num_bridges = 0;
258 pm_runtime_get_sync(dev);
259 msm_irq_uninstall(ddev);
260 pm_runtime_put_sync(dev);
263 if (kms && kms->funcs)
264 kms->funcs->destroy(kms);
266 msm_deinit_vram(ddev);
268 component_unbind_all(dev, ddev);
270 ddev->dev_private = NULL;
273 destroy_workqueue(priv->wq);
278 #include <linux/of_address.h>
280 struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
282 struct msm_gem_address_space *aspace;
284 struct device *mdp_dev = dev->dev;
285 struct device *mdss_dev = mdp_dev->parent;
286 struct device *iommu_dev;
289 * IOMMUs can be a part of MDSS device tree binding, or the
292 if (device_iommu_mapped(mdp_dev))
295 iommu_dev = mdss_dev;
297 mmu = msm_iommu_new(iommu_dev, 0);
299 return ERR_CAST(mmu);
302 drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
306 aspace = msm_gem_address_space_create(mmu, "mdp_kms",
307 0x1000, 0x100000000 - 0x1000);
308 if (IS_ERR(aspace)) {
309 dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
310 mmu->funcs->destroy(mmu);
316 bool msm_use_mmu(struct drm_device *dev)
318 struct msm_drm_private *priv = dev->dev_private;
321 * a2xx comes with its own MMU
322 * On other platforms IOMMU can be declared specified either for the
323 * MDP/DPU device or for its parent, MDSS device.
325 return priv->is_a2xx ||
326 device_iommu_mapped(dev->dev) ||
327 device_iommu_mapped(dev->dev->parent);
330 static int msm_init_vram(struct drm_device *dev)
332 struct msm_drm_private *priv = dev->dev_private;
333 struct device_node *node;
334 unsigned long size = 0;
337 /* In the device-tree world, we could have a 'memory-region'
338 * phandle, which gives us a link to our "vram". Allocating
339 * is all nicely abstracted behind the dma api, but we need
340 * to know the entire size to allocate it all in one go. There
342 * 1) device with no IOMMU, in which case we need exclusive
343 * access to a VRAM carveout big enough for all gpu
345 * 2) device with IOMMU, but where the bootloader puts up
346 * a splash screen. In this case, the VRAM carveout
347 * need only be large enough for fbdev fb. But we need
348 * exclusive access to the buffer to avoid the kernel
349 * using those pages for other purposes (which appears
350 * as corruption on screen before we have a chance to
351 * load and do initial modeset)
354 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
357 ret = of_address_to_resource(node, 0, &r);
361 size = r.end - r.start + 1;
362 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
364 /* if we have no IOMMU, then we need to use carveout allocator.
365 * Grab the entire DMA chunk carved out in early startup in
368 } else if (!msm_use_mmu(dev)) {
369 DRM_INFO("using %s VRAM carveout\n", vram);
370 size = memparse(vram, NULL);
374 unsigned long attrs = 0;
377 priv->vram.size = size;
379 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
380 spin_lock_init(&priv->vram.lock);
382 attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
383 attrs |= DMA_ATTR_WRITE_COMBINE;
385 /* note that for no-kernel-mapping, the vaddr returned
386 * is bogus, but non-null if allocation succeeded:
388 p = dma_alloc_attrs(dev->dev, size,
389 &priv->vram.paddr, GFP_KERNEL, attrs);
391 DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
392 priv->vram.paddr = 0;
396 DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
397 (uint32_t)priv->vram.paddr,
398 (uint32_t)(priv->vram.paddr + size));
404 static void msm_deinit_vram(struct drm_device *ddev)
406 struct msm_drm_private *priv = ddev->dev_private;
407 unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
409 if (!priv->vram.paddr)
412 drm_mm_takedown(&priv->vram.mm);
413 dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr,
417 static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
419 struct msm_drm_private *priv = dev_get_drvdata(dev);
420 struct drm_device *ddev;
424 if (drm_firmware_drivers_only())
427 ddev = drm_dev_alloc(drv, dev);
429 DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
430 return PTR_ERR(ddev);
432 ddev->dev_private = priv;
435 priv->wq = alloc_ordered_workqueue("msm", 0);
441 INIT_LIST_HEAD(&priv->objects);
442 mutex_init(&priv->obj_lock);
445 * Initialize the LRUs:
447 mutex_init(&priv->lru.lock);
448 drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock);
449 drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock);
450 drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
451 drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
453 /* Teach lockdep about lock ordering wrt. shrinker: */
454 fs_reclaim_acquire(GFP_KERNEL);
455 might_lock(&priv->lru.lock);
456 fs_reclaim_release(GFP_KERNEL);
458 drm_mode_config_init(ddev);
460 ret = msm_init_vram(ddev);
464 /* Bind all our sub-components: */
465 ret = component_bind_all(dev, ddev);
467 goto err_deinit_vram;
469 dma_set_max_seg_size(dev, UINT_MAX);
471 msm_gem_shrinker_init(ddev);
473 if (priv->kms_init) {
474 ret = priv->kms_init(ddev);
476 DRM_DEV_ERROR(dev, "failed to load kms\n");
482 /* valid only for the dummy headless case, where of_node=NULL */
483 WARN_ON(dev->of_node);
487 /* Enable normalization of plane zpos */
488 ddev->mode_config.normalize_zpos = true;
492 ret = kms->funcs->hw_init(kms);
494 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
499 drm_helper_move_panel_connectors_to_head(ddev);
501 ddev->mode_config.funcs = &mode_config_funcs;
502 ddev->mode_config.helper_private = &mode_config_helper_funcs;
504 for (i = 0; i < priv->num_crtcs; i++) {
505 /* initialize event thread */
506 priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
507 priv->event_thread[i].dev = ddev;
508 priv->event_thread[i].worker = kthread_create_worker(0,
509 "crtc_event:%d", priv->event_thread[i].crtc_id);
510 if (IS_ERR(priv->event_thread[i].worker)) {
511 ret = PTR_ERR(priv->event_thread[i].worker);
512 DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
513 priv->event_thread[i].worker = NULL;
517 sched_set_fifo(priv->event_thread[i].worker->task);
520 ret = drm_vblank_init(ddev, priv->num_crtcs);
522 DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
527 pm_runtime_get_sync(dev);
528 ret = msm_irq_install(ddev, kms->irq);
529 pm_runtime_put_sync(dev);
531 DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
536 ret = drm_dev_register(ddev, 0);
541 ret = msm_disp_snapshot_init(ddev);
543 DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
545 drm_mode_config_reset(ddev);
547 #ifdef CONFIG_DRM_FBDEV_EMULATION
549 priv->fbdev = msm_fbdev_init(ddev);
552 ret = msm_debugfs_late_init(ddev);
556 drm_kms_helper_poll_init(ddev);
566 msm_deinit_vram(ddev);
577 static void load_gpu(struct drm_device *dev)
579 static DEFINE_MUTEX(init_lock);
580 struct msm_drm_private *priv = dev->dev_private;
582 mutex_lock(&init_lock);
585 priv->gpu = adreno_load_gpu(dev);
587 mutex_unlock(&init_lock);
590 static int context_init(struct drm_device *dev, struct drm_file *file)
592 static atomic_t ident = ATOMIC_INIT(0);
593 struct msm_drm_private *priv = dev->dev_private;
594 struct msm_file_private *ctx;
596 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
600 INIT_LIST_HEAD(&ctx->submitqueues);
601 rwlock_init(&ctx->queuelock);
603 kref_init(&ctx->ref);
604 msm_submitqueue_init(dev, ctx);
606 ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
607 file->driver_priv = ctx;
609 ctx->seqno = atomic_inc_return(&ident);
614 static int msm_open(struct drm_device *dev, struct drm_file *file)
616 /* For now, load gpu on open.. to avoid the requirement of having
617 * firmware in the initrd.
621 return context_init(dev, file);
624 static void context_close(struct msm_file_private *ctx)
626 msm_submitqueue_close(ctx);
627 msm_file_private_put(ctx);
630 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
632 struct msm_drm_private *priv = dev->dev_private;
633 struct msm_file_private *ctx = file->driver_priv;
636 * It is not possible to set sysprof param to non-zero if gpu
637 * is not initialized:
640 msm_file_private_set_sysprof(ctx, priv->gpu, 0);
645 int msm_crtc_enable_vblank(struct drm_crtc *crtc)
647 struct drm_device *dev = crtc->dev;
648 unsigned int pipe = crtc->index;
649 struct msm_drm_private *priv = dev->dev_private;
650 struct msm_kms *kms = priv->kms;
653 drm_dbg_vbl(dev, "crtc=%u", pipe);
654 return vblank_ctrl_queue_work(priv, pipe, true);
657 void msm_crtc_disable_vblank(struct drm_crtc *crtc)
659 struct drm_device *dev = crtc->dev;
660 unsigned int pipe = crtc->index;
661 struct msm_drm_private *priv = dev->dev_private;
662 struct msm_kms *kms = priv->kms;
665 drm_dbg_vbl(dev, "crtc=%u", pipe);
666 vblank_ctrl_queue_work(priv, pipe, false);
673 static int msm_ioctl_get_param(struct drm_device *dev, void *data,
674 struct drm_file *file)
676 struct msm_drm_private *priv = dev->dev_private;
677 struct drm_msm_param *args = data;
680 /* for now, we just have 3d pipe.. eventually this would need to
681 * be more clever to dispatch to appropriate gpu module:
683 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
691 return gpu->funcs->get_param(gpu, file->driver_priv,
692 args->param, &args->value, &args->len);
695 static int msm_ioctl_set_param(struct drm_device *dev, void *data,
696 struct drm_file *file)
698 struct msm_drm_private *priv = dev->dev_private;
699 struct drm_msm_param *args = data;
702 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
710 return gpu->funcs->set_param(gpu, file->driver_priv,
711 args->param, args->value, args->len);
714 static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
715 struct drm_file *file)
717 struct drm_msm_gem_new *args = data;
718 uint32_t flags = args->flags;
720 if (args->flags & ~MSM_BO_FLAGS) {
721 DRM_ERROR("invalid flags: %08x\n", args->flags);
726 * Uncached CPU mappings are deprecated, as of:
728 * 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)")
730 * So promote them to WC.
732 if (flags & MSM_BO_UNCACHED) {
733 flags &= ~MSM_BO_CACHED;
737 if (should_fail(&fail_gem_alloc, args->size))
740 return msm_gem_new_handle(dev, file, args->size,
741 args->flags, &args->handle, NULL);
744 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
746 return ktime_set(timeout.tv_sec, timeout.tv_nsec);
749 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
750 struct drm_file *file)
752 struct drm_msm_gem_cpu_prep *args = data;
753 struct drm_gem_object *obj;
754 ktime_t timeout = to_ktime(args->timeout);
757 if (args->op & ~MSM_PREP_FLAGS) {
758 DRM_ERROR("invalid op: %08x\n", args->op);
762 obj = drm_gem_object_lookup(file, args->handle);
766 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
768 drm_gem_object_put(obj);
773 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
774 struct drm_file *file)
776 struct drm_msm_gem_cpu_fini *args = data;
777 struct drm_gem_object *obj;
780 obj = drm_gem_object_lookup(file, args->handle);
784 ret = msm_gem_cpu_fini(obj);
786 drm_gem_object_put(obj);
791 static int msm_ioctl_gem_info_iova(struct drm_device *dev,
792 struct drm_file *file, struct drm_gem_object *obj,
795 struct msm_drm_private *priv = dev->dev_private;
796 struct msm_file_private *ctx = file->driver_priv;
801 if (should_fail(&fail_gem_iova, obj->size))
805 * Don't pin the memory here - just get an address so that userspace can
808 return msm_gem_get_iova(obj, ctx->aspace, iova);
811 static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
812 struct drm_file *file, struct drm_gem_object *obj,
815 struct msm_drm_private *priv = dev->dev_private;
816 struct msm_file_private *ctx = file->driver_priv;
821 /* Only supported if per-process address space is supported: */
822 if (priv->gpu->aspace == ctx->aspace)
825 if (should_fail(&fail_gem_iova, obj->size))
828 return msm_gem_set_iova(obj, ctx->aspace, iova);
831 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
832 struct drm_file *file)
834 struct drm_msm_gem_info *args = data;
835 struct drm_gem_object *obj;
836 struct msm_gem_object *msm_obj;
842 switch (args->info) {
843 case MSM_INFO_GET_OFFSET:
844 case MSM_INFO_GET_IOVA:
845 case MSM_INFO_SET_IOVA:
846 case MSM_INFO_GET_FLAGS:
847 /* value returned as immediate, not pointer, so len==0: */
851 case MSM_INFO_SET_NAME:
852 case MSM_INFO_GET_NAME:
858 obj = drm_gem_object_lookup(file, args->handle);
862 msm_obj = to_msm_bo(obj);
864 switch (args->info) {
865 case MSM_INFO_GET_OFFSET:
866 args->value = msm_gem_mmap_offset(obj);
868 case MSM_INFO_GET_IOVA:
869 ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
871 case MSM_INFO_SET_IOVA:
872 ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
874 case MSM_INFO_GET_FLAGS:
875 if (obj->import_attach) {
879 /* Hide internal kernel-only flags: */
880 args->value = to_msm_bo(obj)->flags & MSM_BO_FLAGS;
883 case MSM_INFO_SET_NAME:
884 /* length check should leave room for terminating null: */
885 if (args->len >= sizeof(msm_obj->name)) {
889 if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
891 msm_obj->name[0] = '\0';
895 msm_obj->name[args->len] = '\0';
896 for (i = 0; i < args->len; i++) {
897 if (!isprint(msm_obj->name[i])) {
898 msm_obj->name[i] = '\0';
903 case MSM_INFO_GET_NAME:
904 if (args->value && (args->len < strlen(msm_obj->name))) {
908 args->len = strlen(msm_obj->name);
910 if (copy_to_user(u64_to_user_ptr(args->value),
911 msm_obj->name, args->len))
917 drm_gem_object_put(obj);
922 static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
923 ktime_t timeout, uint32_t flags)
925 struct dma_fence *fence;
928 if (fence_after(fence_id, queue->last_fence)) {
929 DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
930 fence_id, queue->last_fence);
935 * Map submitqueue scoped "seqno" (which is actually an idr key)
936 * back to underlying dma-fence
938 * The fence is removed from the fence_idr when the submit is
939 * retired, so if the fence is not found it means there is nothing
942 spin_lock(&queue->idr_lock);
943 fence = idr_find(&queue->fence_idr, fence_id);
945 fence = dma_fence_get_rcu(fence);
946 spin_unlock(&queue->idr_lock);
951 if (flags & MSM_WAIT_FENCE_BOOST)
952 dma_fence_set_deadline(fence, ktime_get());
954 ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout));
957 } else if (ret != -ERESTARTSYS) {
961 dma_fence_put(fence);
966 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
967 struct drm_file *file)
969 struct msm_drm_private *priv = dev->dev_private;
970 struct drm_msm_wait_fence *args = data;
971 struct msm_gpu_submitqueue *queue;
974 if (args->flags & ~MSM_WAIT_FENCE_FLAGS) {
975 DRM_ERROR("invalid flags: %08x\n", args->flags);
982 queue = msm_submitqueue_get(file->driver_priv, args->queueid);
986 ret = wait_fence(queue, args->fence, to_ktime(args->timeout), args->flags);
988 msm_submitqueue_put(queue);
993 static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
994 struct drm_file *file)
996 struct drm_msm_gem_madvise *args = data;
997 struct drm_gem_object *obj;
1000 switch (args->madv) {
1001 case MSM_MADV_DONTNEED:
1002 case MSM_MADV_WILLNEED:
1008 obj = drm_gem_object_lookup(file, args->handle);
1013 ret = msm_gem_madvise(obj, args->madv);
1015 args->retained = ret;
1019 drm_gem_object_put(obj);
1025 static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
1026 struct drm_file *file)
1028 struct drm_msm_submitqueue *args = data;
1030 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
1033 return msm_submitqueue_create(dev, file->driver_priv, args->prio,
1034 args->flags, &args->id);
1037 static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
1038 struct drm_file *file)
1040 return msm_submitqueue_query(dev, file->driver_priv, data);
1043 static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
1044 struct drm_file *file)
1046 u32 id = *(u32 *) data;
1048 return msm_submitqueue_remove(file->driver_priv, id);
1051 static const struct drm_ioctl_desc msm_ioctls[] = {
1052 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
1053 DRM_IOCTL_DEF_DRV(MSM_SET_PARAM, msm_ioctl_set_param, DRM_RENDER_ALLOW),
1054 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
1055 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
1056 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
1057 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
1058 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
1059 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
1060 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
1061 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
1062 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
1063 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
1066 static void msm_fop_show_fdinfo(struct seq_file *m, struct file *f)
1068 struct drm_file *file = f->private_data;
1069 struct drm_device *dev = file->minor->dev;
1070 struct msm_drm_private *priv = dev->dev_private;
1071 struct drm_printer p = drm_seq_file_printer(m);
1076 msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, &p);
1079 static const struct file_operations fops = {
1080 .owner = THIS_MODULE,
1082 .show_fdinfo = msm_fop_show_fdinfo,
1085 static const struct drm_driver msm_driver = {
1086 .driver_features = DRIVER_GEM |
1092 .postclose = msm_postclose,
1093 .lastclose = drm_fb_helper_lastclose,
1094 .dumb_create = msm_gem_dumb_create,
1095 .dumb_map_offset = msm_gem_dumb_map_offset,
1096 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1097 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1098 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
1099 .gem_prime_mmap = msm_gem_prime_mmap,
1100 #ifdef CONFIG_DEBUG_FS
1101 .debugfs_init = msm_debugfs_init,
1103 .ioctls = msm_ioctls,
1104 .num_ioctls = ARRAY_SIZE(msm_ioctls),
1107 .desc = "MSM Snapdragon DRM",
1109 .major = MSM_VERSION_MAJOR,
1110 .minor = MSM_VERSION_MINOR,
1111 .patchlevel = MSM_VERSION_PATCHLEVEL,
1114 int msm_pm_prepare(struct device *dev)
1116 struct msm_drm_private *priv = dev_get_drvdata(dev);
1117 struct drm_device *ddev = priv ? priv->dev : NULL;
1119 if (!priv || !priv->kms)
1122 return drm_mode_config_helper_suspend(ddev);
1125 void msm_pm_complete(struct device *dev)
1127 struct msm_drm_private *priv = dev_get_drvdata(dev);
1128 struct drm_device *ddev = priv ? priv->dev : NULL;
1130 if (!priv || !priv->kms)
1133 drm_mode_config_helper_resume(ddev);
1136 static const struct dev_pm_ops msm_pm_ops = {
1137 .prepare = msm_pm_prepare,
1138 .complete = msm_pm_complete,
1142 * Componentized driver support:
1146 * Identify what components need to be added by parsing what remote-endpoints
1147 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
1148 * is no external component that we need to add since LVDS is within MDP4
1151 static int add_components_mdp(struct device *master_dev,
1152 struct component_match **matchptr)
1154 struct device_node *np = master_dev->of_node;
1155 struct device_node *ep_node;
1157 for_each_endpoint_of_node(np, ep_node) {
1158 struct device_node *intf;
1159 struct of_endpoint ep;
1162 ret = of_graph_parse_endpoint(ep_node, &ep);
1164 DRM_DEV_ERROR(master_dev, "unable to parse port endpoint\n");
1165 of_node_put(ep_node);
1170 * The LCDC/LVDS port on MDP4 is a speacial case where the
1171 * remote-endpoint isn't a component that we need to add
1173 if (of_device_is_compatible(np, "qcom,mdp4") &&
1178 * It's okay if some of the ports don't have a remote endpoint
1179 * specified. It just means that the port isn't connected to
1180 * any external interface.
1182 intf = of_graph_get_remote_port_parent(ep_node);
1186 if (of_device_is_available(intf))
1187 drm_of_component_match_add(master_dev, matchptr,
1188 component_compare_of, intf);
1197 * We don't know what's the best binding to link the gpu with the drm device.
1198 * Fow now, we just hunt for all the possible gpus that we support, and add them
1201 static const struct of_device_id msm_gpu_match[] = {
1202 { .compatible = "qcom,adreno" },
1203 { .compatible = "qcom,adreno-3xx" },
1204 { .compatible = "amd,imageon" },
1205 { .compatible = "qcom,kgsl-3d0" },
1209 static int add_gpu_components(struct device *dev,
1210 struct component_match **matchptr)
1212 struct device_node *np;
1214 np = of_find_matching_node(NULL, msm_gpu_match);
1218 if (of_device_is_available(np))
1219 drm_of_component_match_add(dev, matchptr, component_compare_of, np);
1226 static int msm_drm_bind(struct device *dev)
1228 return msm_drm_init(dev, &msm_driver);
1231 static void msm_drm_unbind(struct device *dev)
1233 msm_drm_uninit(dev);
1236 const struct component_master_ops msm_drm_ops = {
1237 .bind = msm_drm_bind,
1238 .unbind = msm_drm_unbind,
1241 int msm_drv_probe(struct device *master_dev,
1242 int (*kms_init)(struct drm_device *dev))
1244 struct msm_drm_private *priv;
1245 struct component_match *match = NULL;
1248 priv = devm_kzalloc(master_dev, sizeof(*priv), GFP_KERNEL);
1252 priv->kms_init = kms_init;
1253 dev_set_drvdata(master_dev, priv);
1255 /* Add mdp components if we have KMS. */
1257 ret = add_components_mdp(master_dev, &match);
1262 ret = add_gpu_components(master_dev, &match);
1266 /* on all devices that I am aware of, iommu's which can map
1267 * any address the cpu can see are used:
1269 ret = dma_set_mask_and_coherent(master_dev, ~0);
1273 ret = component_master_add_with_match(master_dev, &msm_drm_ops, match);
1282 * Used only for headlesss GPU instances
1285 static int msm_pdev_probe(struct platform_device *pdev)
1287 return msm_drv_probe(&pdev->dev, NULL);
1290 static int msm_pdev_remove(struct platform_device *pdev)
1292 component_master_del(&pdev->dev, &msm_drm_ops);
1297 void msm_drv_shutdown(struct platform_device *pdev)
1299 struct msm_drm_private *priv = platform_get_drvdata(pdev);
1300 struct drm_device *drm = priv ? priv->dev : NULL;
1303 * Shutdown the hw if we're far enough along where things might be on.
1304 * If we run this too early, we'll end up panicking in any variety of
1305 * places. Since we don't register the drm device until late in
1306 * msm_drm_init, drm_dev->registered is used as an indicator that the
1307 * shutdown will be successful.
1309 if (drm && drm->registered && priv->kms)
1310 drm_atomic_helper_shutdown(drm);
1313 static struct platform_driver msm_platform_driver = {
1314 .probe = msm_pdev_probe,
1315 .remove = msm_pdev_remove,
1316 .shutdown = msm_drv_shutdown,
1323 static int __init msm_drm_register(void)
1332 msm_hdmi_register();
1335 msm_mdp4_register();
1336 msm_mdss_register();
1337 return platform_driver_register(&msm_platform_driver);
1340 static void __exit msm_drm_unregister(void)
1343 platform_driver_unregister(&msm_platform_driver);
1344 msm_mdss_unregister();
1345 msm_mdp4_unregister();
1346 msm_dp_unregister();
1347 msm_hdmi_unregister();
1348 adreno_unregister();
1349 msm_dsi_unregister();
1350 msm_mdp_unregister();
1351 msm_dpu_unregister();
1354 module_init(msm_drm_register);
1355 module_exit(msm_drm_unregister);
1357 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1358 MODULE_DESCRIPTION("MSM DRM Driver");
1359 MODULE_LICENSE("GPL");