1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #ifndef __ETNAVIV_GPU_H__
7 #define __ETNAVIV_GPU_H__
9 #include "etnaviv_cmdbuf.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_mmu.h"
12 #include "etnaviv_drv.h"
14 struct etnaviv_gem_submit;
15 struct etnaviv_vram_mapping;
17 struct etnaviv_chip_identity {
24 /* Supported feature fields. */
27 /* Supported minor feature fields. */
41 /* Number of streams supported. */
44 /* Total number of temporary registers per thread. */
47 /* Maximum number of threads. */
50 /* Number of shader cores. */
51 u32 shader_core_count;
53 /* Size of the vertex cache. */
54 u32 vertex_cache_size;
56 /* Number of entries in the vertex output buffer. */
57 u32 vertex_output_buffer_size;
59 /* Number of pixel pipes. */
62 /* Number of instructions. */
63 u32 instruction_count;
65 /* Number of constants. */
71 /* Number of varyings */
75 enum etnaviv_sec_mode {
81 struct etnaviv_event {
82 struct dma_fence *fence;
83 struct etnaviv_gem_submit *submit;
85 void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
88 struct etnaviv_cmdbuf_suballoc;
92 #define ETNA_NR_EVENTS 30
95 struct drm_device *drm;
96 struct thermal_cooling_device *cooling;
99 struct etnaviv_chip_identity identity;
100 enum etnaviv_sec_mode sec_mode;
101 struct workqueue_struct *wq;
102 struct drm_gpu_scheduler sched;
107 struct etnaviv_cmdbuf buffer;
110 /* event management: */
111 DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
112 struct etnaviv_event event[ETNA_NR_EVENTS];
113 struct completion event_free;
114 spinlock_t event_spinlock;
118 /* Fencing support */
119 struct mutex fence_lock;
120 struct idr fence_idr;
123 wait_queue_head_t fence_event;
125 spinlock_t fence_spinlock;
127 /* worker for handling 'sync' points: */
128 struct work_struct sync_point_work;
129 int sync_point_event;
132 u32 hangcheck_dma_addr;
137 struct etnaviv_iommu_context *mmu_context;
138 unsigned int flush_seq;
143 struct clk *clk_core;
144 struct clk *clk_shader;
146 unsigned int freq_scale;
147 unsigned long base_rate_core;
148 unsigned long base_rate_shader;
151 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
153 writel(data, gpu->mmio + reg);
156 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
158 return readl(gpu->mmio + reg);
161 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
163 int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
164 bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu);
166 #ifdef CONFIG_DEBUG_FS
167 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
170 void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu);
171 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
172 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
173 u32 fence, struct drm_etnaviv_timespec *timeout);
174 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
175 struct etnaviv_gem_object *etnaviv_obj,
176 struct drm_etnaviv_timespec *timeout);
177 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
178 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
179 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
180 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
181 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch);
183 extern struct platform_driver etnaviv_gpu_driver;
185 #endif /* __ETNAVIV_GPU_H__ */