1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
10 #include <linux/adreno-smmu-priv.h>
11 #include <linux/clk.h>
12 #include <linux/interconnect.h>
13 #include <linux/pm_opp.h>
14 #include <linux/regulator/consumer.h>
17 #include "msm_fence.h"
18 #include "msm_ringbuffer.h"
21 struct msm_gem_submit;
22 struct msm_gpu_perfcntr;
25 struct msm_gpu_config {
27 unsigned int nr_rings;
30 /* So far, with hardware that I've seen to date, we can have:
31 * + zero, one, or two z180 2d cores
32 * + a3xx or a2xx 3d core, which share a common CP (the firmware
33 * for the CP seems to implement some different PM4 packet types
34 * but the basics of cmdstream submission are the same)
36 * Which means that the eventual complete "class" hierarchy, once
37 * support for all past and present hw is in place, becomes:
44 struct msm_gpu_funcs {
45 int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
46 int (*hw_init)(struct msm_gpu *gpu);
47 int (*pm_suspend)(struct msm_gpu *gpu);
48 int (*pm_resume)(struct msm_gpu *gpu);
49 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
50 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
51 irqreturn_t (*irq)(struct msm_gpu *irq);
52 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
53 void (*recover)(struct msm_gpu *gpu);
54 void (*destroy)(struct msm_gpu *gpu);
55 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
56 /* show GPU status in debugfs: */
57 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
58 struct drm_printer *p);
59 /* for generation specific debugfs: */
60 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
62 unsigned long (*gpu_busy)(struct msm_gpu *gpu);
63 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
64 int (*gpu_state_put)(struct msm_gpu_state *state);
65 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
66 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
67 struct msm_gem_address_space *(*create_address_space)
68 (struct msm_gpu *gpu, struct platform_device *pdev);
69 struct msm_gem_address_space *(*create_private_address_space)
70 (struct msm_gpu *gpu);
71 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
74 /* Additional state for iommu faults: */
75 struct msm_gpu_fault_info {
84 * struct msm_gpu_devfreq - devfreq related state
86 struct msm_gpu_devfreq {
87 /** devfreq: devfreq instance */
88 struct devfreq *devfreq;
93 * A PM QoS constraint to limit max freq while the GPU is idle.
95 struct dev_pm_qos_request idle_freq;
100 * A PM QoS constraint to boost min freq for a period of time
101 * until the boost expires.
103 struct dev_pm_qos_request boost_freq;
108 * Used by implementation of gpu->gpu_busy() to track the last
109 * busy counter value, for calculating elapsed busy cycles since
110 * last sampling period.
114 /** time: Time of last sampling period. */
117 /** idle_time: Time of last transition to idle: */
123 * Used to delay clamping to idle freq on active->idle transition.
125 struct msm_hrtimer_work idle_work;
130 * Used to reset the boost_constraint after the boost period has
133 struct msm_hrtimer_work boost_work;
138 struct drm_device *dev;
139 struct platform_device *pdev;
140 const struct msm_gpu_funcs *funcs;
142 struct adreno_smmu_priv adreno_smmu;
144 /* performance counters (hw & sw): */
145 spinlock_t perf_lock;
146 bool perfcntr_active;
151 uint32_t totaltime, activetime; /* sw counters */
152 uint32_t last_cntrs[5]; /* hw counters */
153 const struct msm_gpu_perfcntr *perfcntrs;
154 uint32_t num_perfcntrs;
156 struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
162 * The ctx->seqno value of the last context to submit rendering,
163 * and the one with current pgtables installed (for generations
164 * that support per-context pgtables). Tracked by seqno rather
165 * than pointer value to avoid dangling pointers, and cases where
166 * a ctx can be freed and a new one created with the same address.
171 * List of GEM active objects on this gpu. Protected by
172 * msm_drm_private::mm_lock
174 struct list_head active_list;
179 * General lock for serializing all the gpu things.
181 * TODO move to per-ring locking where feasible (ie. submit/retire
189 * The number of submitted but not yet retired submits, used to
190 * determine transitions between active and idle.
192 * Protected by active_lock
196 /** lock: protects active_submits and idle/active transitions */
197 struct mutex active_lock;
199 /* does gpu need hw_init? */
202 /* number of GPU hangs (for all contexts) */
208 struct msm_gem_address_space *aspace;
211 struct regulator *gpu_reg, *gpu_cx;
212 struct clk_bulk_data *grp_clks;
214 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
217 /* Hang and Inactivity Detection:
219 #define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
221 #define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */
222 struct timer_list hangcheck_timer;
224 /* Fault info for most recent iova fault: */
225 struct msm_gpu_fault_info fault_info;
227 /* work for handling GPU ioval faults: */
228 struct kthread_work fault_work;
230 /* work for handling GPU recovery: */
231 struct kthread_work recover_work;
233 /* work for handling active-list retiring: */
234 struct kthread_work retire_work;
236 /* worker for retire/recover: */
237 struct kthread_worker *worker;
239 struct drm_gem_object *memptrs_bo;
241 struct msm_gpu_devfreq devfreq;
243 uint32_t suspend_count;
245 struct msm_gpu_state *crashstate;
247 /* Enable clamping to idle freq when inactive: */
250 /* True if the hardware supports expanded apriv (a650 and newer) */
253 struct thermal_cooling_device *cooling;
256 static inline struct msm_gpu *dev_to_gpu(struct device *dev)
258 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
259 return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
262 /* It turns out that all targets use the same ringbuffer size */
263 #define MSM_GPU_RINGBUFFER_SZ SZ_32K
264 #define MSM_GPU_RINGBUFFER_BLKSIZE 32
266 #define MSM_GPU_RB_CNTL_DEFAULT \
267 (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
268 AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
270 static inline bool msm_gpu_active(struct msm_gpu *gpu)
274 for (i = 0; i < gpu->nr_rings; i++) {
275 struct msm_ringbuffer *ring = gpu->rb[i];
277 if (fence_after(ring->seqno, ring->memptrs->fence))
285 * The select_reg and select_val are just there for the benefit of the child
286 * class that actually enables the perf counter.. but msm_gpu base class
287 * will handle sampling/displaying the counters.
290 struct msm_gpu_perfcntr {
298 * The number of priority levels provided by drm gpu scheduler. The
299 * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
300 * cases, so we don't use it (no need for kernel generated jobs).
302 #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
305 * struct msm_file_private - per-drm_file context
307 * @queuelock: synchronizes access to submitqueues list
308 * @submitqueues: list of &msm_gpu_submitqueue created by userspace
309 * @queueid: counter incremented each time a submitqueue is created,
310 * used to assign &msm_gpu_submitqueue.id
311 * @aspace: the per-process GPU address-space
312 * @ref: reference count
313 * @seqno: unique per process seqno
315 struct msm_file_private {
317 struct list_head submitqueues;
319 struct msm_gem_address_space *aspace;
326 * Table of per-priority-level sched entities used by submitqueues
327 * associated with this &drm_file. Because some userspace apps
328 * make assumptions about rendering from multiple gl contexts
329 * (of the same priority) within the process happening in FIFO
330 * order without requiring any fencing beyond MakeCurrent(), we
331 * create at most one &drm_sched_entity per-process per-priority-
334 struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
338 * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
340 * @gpu: the gpu instance
341 * @prio: the userspace priority level
342 * @ring_nr: [out] the ringbuffer the userspace priority maps to
343 * @sched_prio: [out] the gpu scheduler priority level which the userspace
346 * With drm/scheduler providing it's own level of prioritization, our total
347 * number of available priority levels is (nr_rings * NR_SCHED_PRIORITIES).
348 * Each ring is associated with it's own scheduler instance. However, our
349 * UABI is that lower numerical values are higher priority. So mapping the
350 * single userspace priority level into ring_nr and sched_prio takes some
351 * care. The userspace provided priority (when a submitqueue is created)
352 * is mapped to ring nr and scheduler priority as such:
354 * ring_nr = userspace_prio / NR_SCHED_PRIORITIES
355 * sched_prio = NR_SCHED_PRIORITIES -
356 * (userspace_prio % NR_SCHED_PRIORITIES) - 1
358 * This allows generations without preemption (nr_rings==1) to have some
359 * amount of prioritization, and provides more priority levels for gens
360 * that do have preemption.
362 static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
363 unsigned *ring_nr, enum drm_sched_priority *sched_prio)
367 rn = div_u64_rem(prio, NR_SCHED_PRIORITIES, &sp);
369 /* invert sched priority to map to higher-numeric-is-higher-
370 * priority convention
372 sp = NR_SCHED_PRIORITIES - sp - 1;
374 if (rn >= gpu->nr_rings)
384 * struct msm_gpu_submitqueues - Userspace created context.
386 * A submitqueue is associated with a gl context or vk queue (or equiv)
389 * @id: userspace id for the submitqueue, unique within the drm_file
390 * @flags: userspace flags for the submitqueue, specified at creation
391 * (currently unusued)
392 * @ring_nr: the ringbuffer used by this submitqueue, which is determined
393 * by the submitqueue's priority
394 * @faults: the number of GPU hangs associated with this submitqueue
395 * @last_fence: the sequence number of the last allocated fence (for error
397 * @ctx: the per-drm_file context associated with the submitqueue (ie.
398 * which set of pgtables do submits jobs associated with the
400 * @node: node in the context's list of submitqueues
401 * @fence_idr: maps fence-id to dma_fence for userspace visible fence
402 * seqno, protected by submitqueue lock
403 * @lock: submitqueue lock
404 * @ref: reference count
405 * @entity: the submit job-queue
407 struct msm_gpu_submitqueue {
413 struct msm_file_private *ctx;
414 struct list_head node;
415 struct idr fence_idr;
418 struct drm_sched_entity *entity;
421 struct msm_gpu_state_bo {
428 struct msm_gpu_state {
430 struct timespec64 time;
441 } ring[MSM_GPU_MAX_RINGS];
451 struct msm_gpu_fault_info fault_info;
454 struct msm_gpu_state_bo *bos;
457 static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
459 msm_writel(data, gpu->mmio + (reg << 2));
462 static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
464 return msm_readl(gpu->mmio + (reg << 2));
467 static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
469 msm_rmw(gpu->mmio + (reg << 2), mask, or);
472 static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
477 * Why not a readq here? Two reasons: 1) many of the LO registers are
478 * not quad word aligned and 2) the GPU hardware designers have a bit
479 * of a history of putting registers where they fit, especially in
480 * spins. The longer a GPU family goes the higher the chance that
481 * we'll get burned. We could do a series of validity checks if we
482 * wanted to, but really is a readq() that much better? Nah.
486 * For some lo/hi registers (like perfcounters), the hi value is latched
487 * when the lo is read, so make sure to read the lo first to trigger
490 val = (u64) msm_readl(gpu->mmio + (lo << 2));
491 val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
496 static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
498 /* Why not a writeq here? Read the screed above */
499 msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
500 msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
503 int msm_gpu_pm_suspend(struct msm_gpu *gpu);
504 int msm_gpu_pm_resume(struct msm_gpu *gpu);
506 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
507 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
509 int msm_submitqueue_create(struct drm_device *drm,
510 struct msm_file_private *ctx,
511 u32 prio, u32 flags, u32 *id);
512 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
513 struct drm_msm_submitqueue_query *args);
514 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
515 void msm_submitqueue_close(struct msm_file_private *ctx);
517 void msm_submitqueue_destroy(struct kref *kref);
519 void __msm_file_private_destroy(struct kref *kref);
521 static inline void msm_file_private_put(struct msm_file_private *ctx)
523 kref_put(&ctx->ref, __msm_file_private_destroy);
526 static inline struct msm_file_private *msm_file_private_get(
527 struct msm_file_private *ctx)
533 void msm_devfreq_init(struct msm_gpu *gpu);
534 void msm_devfreq_cleanup(struct msm_gpu *gpu);
535 void msm_devfreq_resume(struct msm_gpu *gpu);
536 void msm_devfreq_suspend(struct msm_gpu *gpu);
537 void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor);
538 void msm_devfreq_active(struct msm_gpu *gpu);
539 void msm_devfreq_idle(struct msm_gpu *gpu);
541 int msm_gpu_hw_init(struct msm_gpu *gpu);
543 void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
544 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
545 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
546 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
548 void msm_gpu_retire(struct msm_gpu *gpu);
549 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
551 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
552 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
553 const char *name, struct msm_gpu_config *config);
555 struct msm_gem_address_space *
556 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
558 void msm_gpu_cleanup(struct msm_gpu *gpu);
560 struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
561 void __init adreno_register(void);
562 void __exit adreno_unregister(void);
564 static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
567 kref_put(&queue->ref, msm_submitqueue_destroy);
570 static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
572 struct msm_gpu_state *state = NULL;
574 mutex_lock(&gpu->lock);
576 if (gpu->crashstate) {
577 kref_get(&gpu->crashstate->ref);
578 state = gpu->crashstate;
581 mutex_unlock(&gpu->lock);
586 static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
588 mutex_lock(&gpu->lock);
590 if (gpu->crashstate) {
591 if (gpu->funcs->gpu_state_put(gpu->crashstate))
592 gpu->crashstate = NULL;
595 mutex_unlock(&gpu->lock);
599 * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
600 * support expanded privileges
602 #define check_apriv(gpu, flags) \
603 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
606 #endif /* __MSM_GPU_H__ */