drm/amdgpu: don't use ATRM for external devices
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / amdgpu / sdma_v4_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_ucode.h"
31 #include "amdgpu_trace.h"
32
33 #include "sdma0/sdma0_4_2_offset.h"
34 #include "sdma0/sdma0_4_2_sh_mask.h"
35 #include "sdma1/sdma1_4_2_offset.h"
36 #include "sdma1/sdma1_4_2_sh_mask.h"
37 #include "sdma2/sdma2_4_2_2_offset.h"
38 #include "sdma2/sdma2_4_2_2_sh_mask.h"
39 #include "sdma3/sdma3_4_2_2_offset.h"
40 #include "sdma3/sdma3_4_2_2_sh_mask.h"
41 #include "sdma4/sdma4_4_2_2_offset.h"
42 #include "sdma4/sdma4_4_2_2_sh_mask.h"
43 #include "sdma5/sdma5_4_2_2_offset.h"
44 #include "sdma5/sdma5_4_2_2_sh_mask.h"
45 #include "sdma6/sdma6_4_2_2_offset.h"
46 #include "sdma6/sdma6_4_2_2_sh_mask.h"
47 #include "sdma7/sdma7_4_2_2_offset.h"
48 #include "sdma7/sdma7_4_2_2_sh_mask.h"
49 #include "sdma0/sdma0_4_1_default.h"
50
51 #include "soc15_common.h"
52 #include "soc15.h"
53 #include "vega10_sdma_pkt_open.h"
54
55 #include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
56 #include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
57
58 #include "amdgpu_ras.h"
59 #include "sdma_v4_4.h"
60
61 MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
62 MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
63 MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
64 MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin");
65 MODULE_FIRMWARE("amdgpu/vega20_sdma.bin");
66 MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin");
67 MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
68 MODULE_FIRMWARE("amdgpu/picasso_sdma.bin");
69 MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
70 MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin");
71 MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
72 MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin");
73 MODULE_FIRMWARE("amdgpu/aldebaran_sdma.bin");
74
75 #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK  0x000000F8L
76 #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
77
78 #define WREG32_SDMA(instance, offset, value) \
79         WREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)), value)
80 #define RREG32_SDMA(instance, offset) \
81         RREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)))
82
83 static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
84 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
85 static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
86 static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
87 static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev);
88
89 static const struct soc15_reg_golden golden_settings_sdma_4[] = {
90         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
91         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xff000ff0, 0x3f000100),
92         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0100, 0x00000100),
93         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
94         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
95         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
96         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003ff006, 0x0003c000),
97         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
98         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
99         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
100         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
101         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
102         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
103         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
104         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
105         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
106         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
107         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
108         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_POWER_CNTL, 0x003ff000, 0x0003c000),
109         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
110         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
111         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
112         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
113         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
114         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
115 };
116
117 static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
118         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
119         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
120         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
121         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
122         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
123         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
124         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
125 };
126
127 static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
128         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
129         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
130         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
131         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
132         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
133         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
134         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
135 };
136
137 static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
138         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
139         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
140         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100),
141         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
142         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0xfc3fffff, 0x40000051),
143         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100),
144         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
145         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
146         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
147         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
148         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
149 };
150
151 static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
152         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
153 };
154
155 static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
156 {
157         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
158         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
159         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
160         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
161         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
162         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
163         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
164         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
165         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RD_BURST_CNTL, 0x0000000f, 0x00000003),
166         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
167         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
168         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
169         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
170         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC2_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
171         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
172         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC3_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
173         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
174         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC4_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
175         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
176         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC5_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
177         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
178         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC6_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
179         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
180         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
181         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
182         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
183         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
184 };
185
186 static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
187         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
188         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
189         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
190         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
191         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
192         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
193         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
194         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
195         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RD_BURST_CNTL, 0x0000000f, 0x00000003),
196         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
197         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
198         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
199         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
200         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC2_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
201         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
202         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC3_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
203         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
204         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC4_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
205         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
206         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC5_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
207         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
208         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC6_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
209         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
210         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
211         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
212         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
213         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
214 };
215
216 static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
217 {
218         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
219         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002)
220 };
221
222 static const struct soc15_reg_golden golden_settings_sdma_rv2[] =
223 {
224         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00003001),
225         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00003001)
226 };
227
228 static const struct soc15_reg_golden golden_settings_sdma_arct[] =
229 {
230         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
231         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
232         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
233         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
234         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
235         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
236         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
237         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
238         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
239         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
240         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
241         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
242         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
243         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
244         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
245         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
246         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
247         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
248         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
249         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
250         SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
251         SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
252         SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
253         SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
254         SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
255         SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
256         SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
257         SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
258         SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
259         SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
260         SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
261         SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001)
262 };
263
264 static const struct soc15_reg_golden golden_settings_sdma_aldebaran[] = {
265         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
266         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
267         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
268         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
269         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
270         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
271         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
272         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
273         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
274         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
275         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
276         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
277         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
278         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
279         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
280 };
281
282 static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
283         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
284         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
285         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
286         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002),
287         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
288         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003fff07, 0x40000051),
289         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
290         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
291         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
292         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
293 };
294
295 static const struct soc15_ras_field_entry sdma_v4_0_ras_fields[] = {
296         { "SDMA_UCODE_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
297         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UCODE_BUF_SED),
298         0, 0,
299         },
300         { "SDMA_RB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
301         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_RB_CMD_BUF_SED),
302         0, 0,
303         },
304         { "SDMA_IB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
305         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_IB_CMD_BUF_SED),
306         0, 0,
307         },
308         { "SDMA_UTCL1_RD_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
309         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RD_FIFO_SED),
310         0, 0,
311         },
312         { "SDMA_UTCL1_RDBST_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
313         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RDBST_FIFO_SED),
314         0, 0,
315         },
316         { "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
317         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_DATA_LUT_FIFO_SED),
318         0, 0,
319         },
320         { "SDMA_MBANK_DATA_BUF0_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
321         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF0_SED),
322         0, 0,
323         },
324         { "SDMA_MBANK_DATA_BUF1_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
325         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF1_SED),
326         0, 0,
327         },
328         { "SDMA_MBANK_DATA_BUF2_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
329         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF2_SED),
330         0, 0,
331         },
332         { "SDMA_MBANK_DATA_BUF3_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
333         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF3_SED),
334         0, 0,
335         },
336         { "SDMA_MBANK_DATA_BUF4_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
337         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF4_SED),
338         0, 0,
339         },
340         { "SDMA_MBANK_DATA_BUF5_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
341         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF5_SED),
342         0, 0,
343         },
344         { "SDMA_MBANK_DATA_BUF6_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
345         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF6_SED),
346         0, 0,
347         },
348         { "SDMA_MBANK_DATA_BUF7_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
349         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF7_SED),
350         0, 0,
351         },
352         { "SDMA_MBANK_DATA_BUF8_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
353         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF8_SED),
354         0, 0,
355         },
356         { "SDMA_MBANK_DATA_BUF9_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
357         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF9_SED),
358         0, 0,
359         },
360         { "SDMA_MBANK_DATA_BUF10_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
361         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF10_SED),
362         0, 0,
363         },
364         { "SDMA_MBANK_DATA_BUF11_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
365         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF11_SED),
366         0, 0,
367         },
368         { "SDMA_MBANK_DATA_BUF12_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
369         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF12_SED),
370         0, 0,
371         },
372         { "SDMA_MBANK_DATA_BUF13_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
373         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF13_SED),
374         0, 0,
375         },
376         { "SDMA_MBANK_DATA_BUF14_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
377         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF14_SED),
378         0, 0,
379         },
380         { "SDMA_MBANK_DATA_BUF15_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
381         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF15_SED),
382         0, 0,
383         },
384         { "SDMA_SPLIT_DAT_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
385         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_SPLIT_DAT_BUF_SED),
386         0, 0,
387         },
388         { "SDMA_MC_WR_ADDR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
389         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MC_WR_ADDR_FIFO_SED),
390         0, 0,
391         },
392 };
393
394 static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
395                 u32 instance, u32 offset)
396 {
397         switch (instance) {
398         case 0:
399                 return (adev->reg_offset[SDMA0_HWIP][0][0] + offset);
400         case 1:
401                 return (adev->reg_offset[SDMA1_HWIP][0][0] + offset);
402         case 2:
403                 return (adev->reg_offset[SDMA2_HWIP][0][1] + offset);
404         case 3:
405                 return (adev->reg_offset[SDMA3_HWIP][0][1] + offset);
406         case 4:
407                 return (adev->reg_offset[SDMA4_HWIP][0][1] + offset);
408         case 5:
409                 return (adev->reg_offset[SDMA5_HWIP][0][1] + offset);
410         case 6:
411                 return (adev->reg_offset[SDMA6_HWIP][0][1] + offset);
412         case 7:
413                 return (adev->reg_offset[SDMA7_HWIP][0][1] + offset);
414         default:
415                 break;
416         }
417         return 0;
418 }
419
420 static unsigned sdma_v4_0_seq_to_irq_id(int seq_num)
421 {
422         switch (seq_num) {
423         case 0:
424                 return SOC15_IH_CLIENTID_SDMA0;
425         case 1:
426                 return SOC15_IH_CLIENTID_SDMA1;
427         case 2:
428                 return SOC15_IH_CLIENTID_SDMA2;
429         case 3:
430                 return SOC15_IH_CLIENTID_SDMA3;
431         case 4:
432                 return SOC15_IH_CLIENTID_SDMA4;
433         case 5:
434                 return SOC15_IH_CLIENTID_SDMA5;
435         case 6:
436                 return SOC15_IH_CLIENTID_SDMA6;
437         case 7:
438                 return SOC15_IH_CLIENTID_SDMA7;
439         default:
440                 break;
441         }
442         return -EINVAL;
443 }
444
445 static int sdma_v4_0_irq_id_to_seq(unsigned client_id)
446 {
447         switch (client_id) {
448         case SOC15_IH_CLIENTID_SDMA0:
449                 return 0;
450         case SOC15_IH_CLIENTID_SDMA1:
451                 return 1;
452         case SOC15_IH_CLIENTID_SDMA2:
453                 return 2;
454         case SOC15_IH_CLIENTID_SDMA3:
455                 return 3;
456         case SOC15_IH_CLIENTID_SDMA4:
457                 return 4;
458         case SOC15_IH_CLIENTID_SDMA5:
459                 return 5;
460         case SOC15_IH_CLIENTID_SDMA6:
461                 return 6;
462         case SOC15_IH_CLIENTID_SDMA7:
463                 return 7;
464         default:
465                 break;
466         }
467         return -EINVAL;
468 }
469
470 static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
471 {
472         switch (adev->ip_versions[SDMA0_HWIP][0]) {
473         case IP_VERSION(4, 0, 0):
474                 soc15_program_register_sequence(adev,
475                                                 golden_settings_sdma_4,
476                                                 ARRAY_SIZE(golden_settings_sdma_4));
477                 soc15_program_register_sequence(adev,
478                                                 golden_settings_sdma_vg10,
479                                                 ARRAY_SIZE(golden_settings_sdma_vg10));
480                 break;
481         case IP_VERSION(4, 0, 1):
482                 soc15_program_register_sequence(adev,
483                                                 golden_settings_sdma_4,
484                                                 ARRAY_SIZE(golden_settings_sdma_4));
485                 soc15_program_register_sequence(adev,
486                                                 golden_settings_sdma_vg12,
487                                                 ARRAY_SIZE(golden_settings_sdma_vg12));
488                 break;
489         case IP_VERSION(4, 2, 0):
490                 soc15_program_register_sequence(adev,
491                                                 golden_settings_sdma0_4_2_init,
492                                                 ARRAY_SIZE(golden_settings_sdma0_4_2_init));
493                 soc15_program_register_sequence(adev,
494                                                 golden_settings_sdma0_4_2,
495                                                 ARRAY_SIZE(golden_settings_sdma0_4_2));
496                 soc15_program_register_sequence(adev,
497                                                 golden_settings_sdma1_4_2,
498                                                 ARRAY_SIZE(golden_settings_sdma1_4_2));
499                 break;
500         case IP_VERSION(4, 2, 2):
501                 soc15_program_register_sequence(adev,
502                                                 golden_settings_sdma_arct,
503                                                 ARRAY_SIZE(golden_settings_sdma_arct));
504                 break;
505         case IP_VERSION(4, 4, 0):
506                 soc15_program_register_sequence(adev,
507                                                 golden_settings_sdma_aldebaran,
508                                                 ARRAY_SIZE(golden_settings_sdma_aldebaran));
509                 break;
510         case IP_VERSION(4, 1, 0):
511         case IP_VERSION(4, 1, 1):
512                 soc15_program_register_sequence(adev,
513                                                 golden_settings_sdma_4_1,
514                                                 ARRAY_SIZE(golden_settings_sdma_4_1));
515                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
516                         soc15_program_register_sequence(adev,
517                                                         golden_settings_sdma_rv2,
518                                                         ARRAY_SIZE(golden_settings_sdma_rv2));
519                 else
520                         soc15_program_register_sequence(adev,
521                                                         golden_settings_sdma_rv1,
522                                                         ARRAY_SIZE(golden_settings_sdma_rv1));
523                 break;
524         case IP_VERSION(4, 1, 2):
525                 soc15_program_register_sequence(adev,
526                                                 golden_settings_sdma_4_3,
527                                                 ARRAY_SIZE(golden_settings_sdma_4_3));
528                 break;
529         default:
530                 break;
531         }
532 }
533
534 static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
535 {
536         int i;
537
538         /*
539          * The only chips with SDMAv4 and ULV are VG10 and VG20.
540          * Server SKUs take a different hysteresis setting from other SKUs.
541          */
542         switch (adev->ip_versions[SDMA0_HWIP][0]) {
543         case IP_VERSION(4, 0, 0):
544                 if (adev->pdev->device == 0x6860)
545                         break;
546                 return;
547         case IP_VERSION(4, 2, 0):
548                 if (adev->pdev->device == 0x66a1)
549                         break;
550                 return;
551         default:
552                 return;
553         }
554
555         for (i = 0; i < adev->sdma.num_instances; i++) {
556                 uint32_t temp;
557
558                 temp = RREG32_SDMA(i, mmSDMA0_ULV_CNTL);
559                 temp = REG_SET_FIELD(temp, SDMA0_ULV_CNTL, HYSTERESIS, 0x0);
560                 WREG32_SDMA(i, mmSDMA0_ULV_CNTL, temp);
561         }
562 }
563
564 /**
565  * sdma_v4_0_init_microcode - load ucode images from disk
566  *
567  * @adev: amdgpu_device pointer
568  *
569  * Use the firmware interface to load the ucode images into
570  * the driver (not loaded into hw).
571  * Returns 0 on success, error on failure.
572  */
573
574 // emulation only, won't work on real chip
575 // vega10 real chip need to use PSP to load firmware
576 static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
577 {
578         int ret, i;
579
580         for (i = 0; i < adev->sdma.num_instances; i++) {
581                 if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
582                     adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {
583                         /* Acturus & Aldebaran will leverage the same FW memory
584                            for every SDMA instance */
585                         ret = amdgpu_sdma_init_microcode(adev, 0, true);
586                         break;
587                 } else {
588                         ret = amdgpu_sdma_init_microcode(adev, i, false);
589                         if (ret)
590                                 return ret;
591                 }
592         }
593
594         return ret;
595 }
596
597 /**
598  * sdma_v4_0_ring_get_rptr - get the current read pointer
599  *
600  * @ring: amdgpu ring pointer
601  *
602  * Get the current rptr from the hardware (VEGA10+).
603  */
604 static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
605 {
606         u64 *rptr;
607
608         /* XXX check if swapping is necessary on BE */
609         rptr = ((u64 *)ring->rptr_cpu_addr);
610
611         DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
612         return ((*rptr) >> 2);
613 }
614
615 /**
616  * sdma_v4_0_ring_get_wptr - get the current write pointer
617  *
618  * @ring: amdgpu ring pointer
619  *
620  * Get the current wptr from the hardware (VEGA10+).
621  */
622 static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
623 {
624         struct amdgpu_device *adev = ring->adev;
625         u64 wptr;
626
627         if (ring->use_doorbell) {
628                 /* XXX check if swapping is necessary on BE */
629                 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
630                 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
631         } else {
632                 wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI);
633                 wptr = wptr << 32;
634                 wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR);
635                 DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
636                                 ring->me, wptr);
637         }
638
639         return wptr >> 2;
640 }
641
642 /**
643  * sdma_v4_0_ring_set_wptr - commit the write pointer
644  *
645  * @ring: amdgpu ring pointer
646  *
647  * Write the wptr back to the hardware (VEGA10+).
648  */
649 static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
650 {
651         struct amdgpu_device *adev = ring->adev;
652
653         DRM_DEBUG("Setting write pointer\n");
654         if (ring->use_doorbell) {
655                 u64 *wb = (u64 *)ring->wptr_cpu_addr;
656
657                 DRM_DEBUG("Using doorbell -- "
658                                 "wptr_offs == 0x%08x "
659                                 "lower_32_bits(ring->wptr << 2) == 0x%08x "
660                                 "upper_32_bits(ring->wptr << 2) == 0x%08x\n",
661                                 ring->wptr_offs,
662                                 lower_32_bits(ring->wptr << 2),
663                                 upper_32_bits(ring->wptr << 2));
664                 /* XXX check if swapping is necessary on BE */
665                 WRITE_ONCE(*wb, (ring->wptr << 2));
666                 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
667                                 ring->doorbell_index, ring->wptr << 2);
668                 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
669         } else {
670                 DRM_DEBUG("Not using doorbell -- "
671                                 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
672                                 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
673                                 ring->me,
674                                 lower_32_bits(ring->wptr << 2),
675                                 ring->me,
676                                 upper_32_bits(ring->wptr << 2));
677                 WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR,
678                             lower_32_bits(ring->wptr << 2));
679                 WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI,
680                             upper_32_bits(ring->wptr << 2));
681         }
682 }
683
684 /**
685  * sdma_v4_0_page_ring_get_wptr - get the current write pointer
686  *
687  * @ring: amdgpu ring pointer
688  *
689  * Get the current wptr from the hardware (VEGA10+).
690  */
691 static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
692 {
693         struct amdgpu_device *adev = ring->adev;
694         u64 wptr;
695
696         if (ring->use_doorbell) {
697                 /* XXX check if swapping is necessary on BE */
698                 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
699         } else {
700                 wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI);
701                 wptr = wptr << 32;
702                 wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR);
703         }
704
705         return wptr >> 2;
706 }
707
708 /**
709  * sdma_v4_0_page_ring_set_wptr - commit the write pointer
710  *
711  * @ring: amdgpu ring pointer
712  *
713  * Write the wptr back to the hardware (VEGA10+).
714  */
715 static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
716 {
717         struct amdgpu_device *adev = ring->adev;
718
719         if (ring->use_doorbell) {
720                 u64 *wb = (u64 *)ring->wptr_cpu_addr;
721
722                 /* XXX check if swapping is necessary on BE */
723                 WRITE_ONCE(*wb, (ring->wptr << 2));
724                 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
725         } else {
726                 uint64_t wptr = ring->wptr << 2;
727
728                 WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR,
729                             lower_32_bits(wptr));
730                 WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI,
731                             upper_32_bits(wptr));
732         }
733 }
734
735 static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
736 {
737         struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
738         int i;
739
740         for (i = 0; i < count; i++)
741                 if (sdma && sdma->burst_nop && (i == 0))
742                         amdgpu_ring_write(ring, ring->funcs->nop |
743                                 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
744                 else
745                         amdgpu_ring_write(ring, ring->funcs->nop);
746 }
747
748 /**
749  * sdma_v4_0_ring_emit_ib - Schedule an IB on the DMA engine
750  *
751  * @ring: amdgpu ring pointer
752  * @job: job to retrieve vmid from
753  * @ib: IB object to schedule
754  * @flags: unused
755  *
756  * Schedule an IB in the DMA ring (VEGA10).
757  */
758 static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
759                                    struct amdgpu_job *job,
760                                    struct amdgpu_ib *ib,
761                                    uint32_t flags)
762 {
763         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
764
765         /* IB packet must end on a 8 DW boundary */
766         sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
767
768         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
769                           SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
770         /* base must be 32 byte aligned */
771         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
772         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
773         amdgpu_ring_write(ring, ib->length_dw);
774         amdgpu_ring_write(ring, 0);
775         amdgpu_ring_write(ring, 0);
776
777 }
778
779 static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring,
780                                    int mem_space, int hdp,
781                                    uint32_t addr0, uint32_t addr1,
782                                    uint32_t ref, uint32_t mask,
783                                    uint32_t inv)
784 {
785         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
786                           SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
787                           SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
788                           SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
789         if (mem_space) {
790                 /* memory */
791                 amdgpu_ring_write(ring, addr0);
792                 amdgpu_ring_write(ring, addr1);
793         } else {
794                 /* registers */
795                 amdgpu_ring_write(ring, addr0 << 2);
796                 amdgpu_ring_write(ring, addr1 << 2);
797         }
798         amdgpu_ring_write(ring, ref); /* reference */
799         amdgpu_ring_write(ring, mask); /* mask */
800         amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
801                           SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
802 }
803
804 /**
805  * sdma_v4_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
806  *
807  * @ring: amdgpu ring pointer
808  *
809  * Emit an hdp flush packet on the requested DMA ring.
810  */
811 static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
812 {
813         struct amdgpu_device *adev = ring->adev;
814         u32 ref_and_mask = 0;
815         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
816
817         ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
818
819         sdma_v4_0_wait_reg_mem(ring, 0, 1,
820                                adev->nbio.funcs->get_hdp_flush_done_offset(adev),
821                                adev->nbio.funcs->get_hdp_flush_req_offset(adev),
822                                ref_and_mask, ref_and_mask, 10);
823 }
824
825 /**
826  * sdma_v4_0_ring_emit_fence - emit a fence on the DMA ring
827  *
828  * @ring: amdgpu ring pointer
829  * @addr: address
830  * @seq: sequence number
831  * @flags: fence related flags
832  *
833  * Add a DMA fence packet to the ring to write
834  * the fence seq number and DMA trap packet to generate
835  * an interrupt if needed (VEGA10).
836  */
837 static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
838                                       unsigned flags)
839 {
840         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
841         /* write the fence */
842         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
843         /* zero in first two bits */
844         BUG_ON(addr & 0x3);
845         amdgpu_ring_write(ring, lower_32_bits(addr));
846         amdgpu_ring_write(ring, upper_32_bits(addr));
847         amdgpu_ring_write(ring, lower_32_bits(seq));
848
849         /* optionally write high bits as well */
850         if (write64bit) {
851                 addr += 4;
852                 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
853                 /* zero in first two bits */
854                 BUG_ON(addr & 0x3);
855                 amdgpu_ring_write(ring, lower_32_bits(addr));
856                 amdgpu_ring_write(ring, upper_32_bits(addr));
857                 amdgpu_ring_write(ring, upper_32_bits(seq));
858         }
859
860         /* generate an interrupt */
861         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
862         amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
863 }
864
865
866 /**
867  * sdma_v4_0_gfx_enable - enable the gfx async dma engines
868  *
869  * @adev: amdgpu_device pointer
870  * @enable: enable SDMA RB/IB
871  * control the gfx async dma ring buffers (VEGA10).
872  */
873 static void sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable)
874 {
875         u32 rb_cntl, ib_cntl;
876         int i;
877
878         amdgpu_sdma_unset_buffer_funcs_helper(adev);
879
880         for (i = 0; i < adev->sdma.num_instances; i++) {
881                 rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
882                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, enable ? 1 : 0);
883                 WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
884                 ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
885                 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, enable ? 1 : 0);
886                 WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
887         }
888 }
889
890 /**
891  * sdma_v4_0_rlc_stop - stop the compute async dma engines
892  *
893  * @adev: amdgpu_device pointer
894  *
895  * Stop the compute async dma queues (VEGA10).
896  */
897 static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
898 {
899         /* XXX todo */
900 }
901
902 /**
903  * sdma_v4_0_page_stop - stop the page async dma engines
904  *
905  * @adev: amdgpu_device pointer
906  *
907  * Stop the page async dma ring buffers (VEGA10).
908  */
909 static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
910 {
911         u32 rb_cntl, ib_cntl;
912         int i;
913
914         amdgpu_sdma_unset_buffer_funcs_helper(adev);
915
916         for (i = 0; i < adev->sdma.num_instances; i++) {
917                 rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
918                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
919                                         RB_ENABLE, 0);
920                 WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
921                 ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
922                 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
923                                         IB_ENABLE, 0);
924                 WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
925         }
926 }
927
928 /**
929  * sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch
930  *
931  * @adev: amdgpu_device pointer
932  * @enable: enable/disable the DMA MEs context switch.
933  *
934  * Halt or unhalt the async dma engines context switch (VEGA10).
935  */
936 static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
937 {
938         u32 f32_cntl, phase_quantum = 0;
939         int i;
940
941         if (amdgpu_sdma_phase_quantum) {
942                 unsigned value = amdgpu_sdma_phase_quantum;
943                 unsigned unit = 0;
944
945                 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
946                                 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
947                         value = (value + 1) >> 1;
948                         unit++;
949                 }
950                 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
951                             SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
952                         value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
953                                  SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
954                         unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
955                                 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
956                         WARN_ONCE(1,
957                         "clamping sdma_phase_quantum to %uK clock cycles\n",
958                                   value << unit);
959                 }
960                 phase_quantum =
961                         value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
962                         unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
963         }
964
965         for (i = 0; i < adev->sdma.num_instances; i++) {
966                 f32_cntl = RREG32_SDMA(i, mmSDMA0_CNTL);
967                 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
968                                 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
969                 if (enable && amdgpu_sdma_phase_quantum) {
970                         WREG32_SDMA(i, mmSDMA0_PHASE0_QUANTUM, phase_quantum);
971                         WREG32_SDMA(i, mmSDMA0_PHASE1_QUANTUM, phase_quantum);
972                         WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
973                 }
974                 WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
975
976                 /*
977                  * Enable SDMA utilization. Its only supported on
978                  * Arcturus for the moment and firmware version 14
979                  * and above.
980                  */
981                 if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) &&
982                     adev->sdma.instance[i].fw_version >= 14)
983                         WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable);
984                 /* Extend page fault timeout to avoid interrupt storm */
985                 WREG32_SDMA(i, mmSDMA0_UTCL1_TIMEOUT, 0x00800080);
986         }
987
988 }
989
990 /**
991  * sdma_v4_0_enable - stop the async dma engines
992  *
993  * @adev: amdgpu_device pointer
994  * @enable: enable/disable the DMA MEs.
995  *
996  * Halt or unhalt the async dma engines (VEGA10).
997  */
998 static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
999 {
1000         u32 f32_cntl;
1001         int i;
1002
1003         if (!enable) {
1004                 sdma_v4_0_gfx_enable(adev, enable);
1005                 sdma_v4_0_rlc_stop(adev);
1006                 if (adev->sdma.has_page_queue)
1007                         sdma_v4_0_page_stop(adev);
1008         }
1009
1010         for (i = 0; i < adev->sdma.num_instances; i++) {
1011                 f32_cntl = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
1012                 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
1013                 WREG32_SDMA(i, mmSDMA0_F32_CNTL, f32_cntl);
1014         }
1015 }
1016
1017 /*
1018  * sdma_v4_0_rb_cntl - get parameters for rb_cntl
1019  */
1020 static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
1021 {
1022         /* Set ring buffer size in dwords */
1023         uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
1024
1025         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
1026 #ifdef __BIG_ENDIAN
1027         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
1028         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
1029                                 RPTR_WRITEBACK_SWAP_ENABLE, 1);
1030 #endif
1031         return rb_cntl;
1032 }
1033
1034 /**
1035  * sdma_v4_0_gfx_resume - setup and start the async dma engines
1036  *
1037  * @adev: amdgpu_device pointer
1038  * @i: instance to resume
1039  *
1040  * Set up the gfx DMA ring buffers and enable them (VEGA10).
1041  * Returns 0 for success, error for failure.
1042  */
1043 static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
1044 {
1045         struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
1046         u32 rb_cntl, ib_cntl, wptr_poll_cntl;
1047         u32 doorbell;
1048         u32 doorbell_offset;
1049         u64 wptr_gpu_addr;
1050
1051         rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
1052         rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
1053         WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
1054
1055         /* Initialize the ring buffer's read and write pointers */
1056         WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR, 0);
1057         WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_HI, 0);
1058         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR, 0);
1059         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_HI, 0);
1060
1061         /* set the wb address whether it's enabled or not */
1062         WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI,
1063                upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
1064         WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO,
1065                lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
1066
1067         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
1068                                 RPTR_WRITEBACK_ENABLE, 1);
1069
1070         WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8);
1071         WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
1072
1073         ring->wptr = 0;
1074
1075         /* before programing wptr to a less value, need set minor_ptr_update first */
1076         WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 1);
1077
1078         doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL);
1079         doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET);
1080
1081         doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE,
1082                                  ring->use_doorbell);
1083         doorbell_offset = REG_SET_FIELD(doorbell_offset,
1084                                         SDMA0_GFX_DOORBELL_OFFSET,
1085                                         OFFSET, ring->doorbell_index);
1086         WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell);
1087         WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset);
1088
1089         sdma_v4_0_ring_set_wptr(ring);
1090
1091         /* set minor_ptr_update to 0 after wptr programed */
1092         WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 0);
1093
1094         /* setup the wptr shadow polling */
1095         wptr_gpu_addr = ring->wptr_gpu_addr;
1096         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO,
1097                     lower_32_bits(wptr_gpu_addr));
1098         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI,
1099                     upper_32_bits(wptr_gpu_addr));
1100         wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
1101         wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
1102                                        SDMA0_GFX_RB_WPTR_POLL_CNTL,
1103                                        F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
1104         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
1105
1106         /* enable DMA RB */
1107         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
1108         WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
1109
1110         ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
1111         ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
1112 #ifdef __BIG_ENDIAN
1113         ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
1114 #endif
1115         /* enable DMA IBs */
1116         WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
1117 }
1118
1119 /**
1120  * sdma_v4_0_page_resume - setup and start the async dma engines
1121  *
1122  * @adev: amdgpu_device pointer
1123  * @i: instance to resume
1124  *
1125  * Set up the page DMA ring buffers and enable them (VEGA10).
1126  * Returns 0 for success, error for failure.
1127  */
1128 static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
1129 {
1130         struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
1131         u32 rb_cntl, ib_cntl, wptr_poll_cntl;
1132         u32 doorbell;
1133         u32 doorbell_offset;
1134         u64 wptr_gpu_addr;
1135
1136         rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
1137         rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
1138         WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
1139
1140         /* Initialize the ring buffer's read and write pointers */
1141         WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR, 0);
1142         WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_HI, 0);
1143         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR, 0);
1144         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_HI, 0);
1145
1146         /* set the wb address whether it's enabled or not */
1147         WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_HI,
1148                upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
1149         WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_LO,
1150                lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
1151
1152         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
1153                                 RPTR_WRITEBACK_ENABLE, 1);
1154
1155         WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8);
1156         WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
1157
1158         ring->wptr = 0;
1159
1160         /* before programing wptr to a less value, need set minor_ptr_update first */
1161         WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 1);
1162
1163         doorbell = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL);
1164         doorbell_offset = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET);
1165
1166         doorbell = REG_SET_FIELD(doorbell, SDMA0_PAGE_DOORBELL, ENABLE,
1167                                  ring->use_doorbell);
1168         doorbell_offset = REG_SET_FIELD(doorbell_offset,
1169                                         SDMA0_PAGE_DOORBELL_OFFSET,
1170                                         OFFSET, ring->doorbell_index);
1171         WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
1172         WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
1173
1174         /* paging queue doorbell range is setup at sdma_v4_0_gfx_resume */
1175         sdma_v4_0_page_ring_set_wptr(ring);
1176
1177         /* set minor_ptr_update to 0 after wptr programed */
1178         WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
1179
1180         /* setup the wptr shadow polling */
1181         wptr_gpu_addr = ring->wptr_gpu_addr;
1182         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO,
1183                     lower_32_bits(wptr_gpu_addr));
1184         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI,
1185                     upper_32_bits(wptr_gpu_addr));
1186         wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
1187         wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
1188                                        SDMA0_PAGE_RB_WPTR_POLL_CNTL,
1189                                        F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
1190         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
1191
1192         /* enable DMA RB */
1193         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 1);
1194         WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
1195
1196         ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
1197         ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 1);
1198 #ifdef __BIG_ENDIAN
1199         ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
1200 #endif
1201         /* enable DMA IBs */
1202         WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
1203 }
1204
1205 static void
1206 sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable)
1207 {
1208         uint32_t def, data;
1209
1210         if (enable && (adev->pg_flags & AMD_PG_SUPPORT_SDMA)) {
1211                 /* enable idle interrupt */
1212                 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
1213                 data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
1214
1215                 if (data != def)
1216                         WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
1217         } else {
1218                 /* disable idle interrupt */
1219                 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
1220                 data &= ~SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
1221                 if (data != def)
1222                         WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
1223         }
1224 }
1225
1226 static void sdma_v4_1_init_power_gating(struct amdgpu_device *adev)
1227 {
1228         uint32_t def, data;
1229
1230         /* Enable HW based PG. */
1231         def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
1232         data |= SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK;
1233         if (data != def)
1234                 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1235
1236         /* enable interrupt */
1237         def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
1238         data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
1239         if (data != def)
1240                 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
1241
1242         /* Configure hold time to filter in-valid power on/off request. Use default right now */
1243         def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
1244         data &= ~SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK;
1245         data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK);
1246         /* Configure switch time for hysteresis purpose. Use default right now */
1247         data &= ~SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK;
1248         data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK);
1249         if(data != def)
1250                 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1251 }
1252
1253 static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
1254 {
1255         if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA))
1256                 return;
1257
1258         switch (adev->ip_versions[SDMA0_HWIP][0]) {
1259         case IP_VERSION(4, 1, 0):
1260         case IP_VERSION(4, 1, 1):
1261         case IP_VERSION(4, 1, 2):
1262                 sdma_v4_1_init_power_gating(adev);
1263                 sdma_v4_1_update_power_gating(adev, true);
1264                 break;
1265         default:
1266                 break;
1267         }
1268 }
1269
1270 /**
1271  * sdma_v4_0_rlc_resume - setup and start the async dma engines
1272  *
1273  * @adev: amdgpu_device pointer
1274  *
1275  * Set up the compute DMA queues and enable them (VEGA10).
1276  * Returns 0 for success, error for failure.
1277  */
1278 static int sdma_v4_0_rlc_resume(struct amdgpu_device *adev)
1279 {
1280         sdma_v4_0_init_pg(adev);
1281
1282         return 0;
1283 }
1284
1285 /**
1286  * sdma_v4_0_load_microcode - load the sDMA ME ucode
1287  *
1288  * @adev: amdgpu_device pointer
1289  *
1290  * Loads the sDMA0/1 ucode.
1291  * Returns 0 for success, -EINVAL if the ucode is not available.
1292  */
1293 static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
1294 {
1295         const struct sdma_firmware_header_v1_0 *hdr;
1296         const __le32 *fw_data;
1297         u32 fw_size;
1298         int i, j;
1299
1300         /* halt the MEs */
1301         sdma_v4_0_enable(adev, false);
1302
1303         for (i = 0; i < adev->sdma.num_instances; i++) {
1304                 if (!adev->sdma.instance[i].fw)
1305                         return -EINVAL;
1306
1307                 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
1308                 amdgpu_ucode_print_sdma_hdr(&hdr->header);
1309                 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1310
1311                 fw_data = (const __le32 *)
1312                         (adev->sdma.instance[i].fw->data +
1313                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1314
1315                 WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, 0);
1316
1317                 for (j = 0; j < fw_size; j++)
1318                         WREG32_SDMA(i, mmSDMA0_UCODE_DATA,
1319                                     le32_to_cpup(fw_data++));
1320
1321                 WREG32_SDMA(i, mmSDMA0_UCODE_ADDR,
1322                             adev->sdma.instance[i].fw_version);
1323         }
1324
1325         return 0;
1326 }
1327
1328 /**
1329  * sdma_v4_0_start - setup and start the async dma engines
1330  *
1331  * @adev: amdgpu_device pointer
1332  *
1333  * Set up the DMA engines and enable them (VEGA10).
1334  * Returns 0 for success, error for failure.
1335  */
1336 static int sdma_v4_0_start(struct amdgpu_device *adev)
1337 {
1338         struct amdgpu_ring *ring;
1339         int i, r = 0;
1340
1341         if (amdgpu_sriov_vf(adev)) {
1342                 sdma_v4_0_ctx_switch_enable(adev, false);
1343                 sdma_v4_0_enable(adev, false);
1344         } else {
1345
1346                 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1347                         r = sdma_v4_0_load_microcode(adev);
1348                         if (r)
1349                                 return r;
1350                 }
1351
1352                 /* unhalt the MEs */
1353                 sdma_v4_0_enable(adev, true);
1354                 /* enable sdma ring preemption */
1355                 sdma_v4_0_ctx_switch_enable(adev, true);
1356         }
1357
1358         /* start the gfx rings and rlc compute queues */
1359         for (i = 0; i < adev->sdma.num_instances; i++) {
1360                 uint32_t temp;
1361
1362                 WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
1363                 sdma_v4_0_gfx_resume(adev, i);
1364                 if (adev->sdma.has_page_queue)
1365                         sdma_v4_0_page_resume(adev, i);
1366
1367                 /* set utc l1 enable flag always to 1 */
1368                 temp = RREG32_SDMA(i, mmSDMA0_CNTL);
1369                 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
1370                 WREG32_SDMA(i, mmSDMA0_CNTL, temp);
1371
1372                 if (!amdgpu_sriov_vf(adev)) {
1373                         /* unhalt engine */
1374                         temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
1375                         temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
1376                         WREG32_SDMA(i, mmSDMA0_F32_CNTL, temp);
1377                 }
1378         }
1379
1380         if (amdgpu_sriov_vf(adev)) {
1381                 sdma_v4_0_ctx_switch_enable(adev, true);
1382                 sdma_v4_0_enable(adev, true);
1383         } else {
1384                 r = sdma_v4_0_rlc_resume(adev);
1385                 if (r)
1386                         return r;
1387         }
1388
1389         for (i = 0; i < adev->sdma.num_instances; i++) {
1390                 ring = &adev->sdma.instance[i].ring;
1391
1392                 r = amdgpu_ring_test_helper(ring);
1393                 if (r)
1394                         return r;
1395
1396                 if (adev->sdma.has_page_queue) {
1397                         struct amdgpu_ring *page = &adev->sdma.instance[i].page;
1398
1399                         r = amdgpu_ring_test_helper(page);
1400                         if (r)
1401                                 return r;
1402
1403                         if (adev->mman.buffer_funcs_ring == page)
1404                                 amdgpu_ttm_set_buffer_funcs_status(adev, true);
1405                 }
1406
1407                 if (adev->mman.buffer_funcs_ring == ring)
1408                         amdgpu_ttm_set_buffer_funcs_status(adev, true);
1409         }
1410
1411         return r;
1412 }
1413
1414 /**
1415  * sdma_v4_0_ring_test_ring - simple async dma engine test
1416  *
1417  * @ring: amdgpu_ring structure holding ring information
1418  *
1419  * Test the DMA engine by writing using it to write an
1420  * value to memory. (VEGA10).
1421  * Returns 0 for success, error for failure.
1422  */
1423 static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
1424 {
1425         struct amdgpu_device *adev = ring->adev;
1426         unsigned i;
1427         unsigned index;
1428         int r;
1429         u32 tmp;
1430         u64 gpu_addr;
1431
1432         r = amdgpu_device_wb_get(adev, &index);
1433         if (r)
1434                 return r;
1435
1436         gpu_addr = adev->wb.gpu_addr + (index * 4);
1437         tmp = 0xCAFEDEAD;
1438         adev->wb.wb[index] = cpu_to_le32(tmp);
1439
1440         r = amdgpu_ring_alloc(ring, 5);
1441         if (r)
1442                 goto error_free_wb;
1443
1444         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1445                           SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
1446         amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
1447         amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
1448         amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
1449         amdgpu_ring_write(ring, 0xDEADBEEF);
1450         amdgpu_ring_commit(ring);
1451
1452         for (i = 0; i < adev->usec_timeout; i++) {
1453                 tmp = le32_to_cpu(adev->wb.wb[index]);
1454                 if (tmp == 0xDEADBEEF)
1455                         break;
1456                 udelay(1);
1457         }
1458
1459         if (i >= adev->usec_timeout)
1460                 r = -ETIMEDOUT;
1461
1462 error_free_wb:
1463         amdgpu_device_wb_free(adev, index);
1464         return r;
1465 }
1466
1467 /**
1468  * sdma_v4_0_ring_test_ib - test an IB on the DMA engine
1469  *
1470  * @ring: amdgpu_ring structure holding ring information
1471  * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1472  *
1473  * Test a simple IB in the DMA ring (VEGA10).
1474  * Returns 0 on success, error on failure.
1475  */
1476 static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1477 {
1478         struct amdgpu_device *adev = ring->adev;
1479         struct amdgpu_ib ib;
1480         struct dma_fence *f = NULL;
1481         unsigned index;
1482         long r;
1483         u32 tmp = 0;
1484         u64 gpu_addr;
1485
1486         r = amdgpu_device_wb_get(adev, &index);
1487         if (r)
1488                 return r;
1489
1490         gpu_addr = adev->wb.gpu_addr + (index * 4);
1491         tmp = 0xCAFEDEAD;
1492         adev->wb.wb[index] = cpu_to_le32(tmp);
1493         memset(&ib, 0, sizeof(ib));
1494         r = amdgpu_ib_get(adev, NULL, 256,
1495                                         AMDGPU_IB_POOL_DIRECT, &ib);
1496         if (r)
1497                 goto err0;
1498
1499         ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1500                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1501         ib.ptr[1] = lower_32_bits(gpu_addr);
1502         ib.ptr[2] = upper_32_bits(gpu_addr);
1503         ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1504         ib.ptr[4] = 0xDEADBEEF;
1505         ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1506         ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1507         ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1508         ib.length_dw = 8;
1509
1510         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1511         if (r)
1512                 goto err1;
1513
1514         r = dma_fence_wait_timeout(f, false, timeout);
1515         if (r == 0) {
1516                 r = -ETIMEDOUT;
1517                 goto err1;
1518         } else if (r < 0) {
1519                 goto err1;
1520         }
1521         tmp = le32_to_cpu(adev->wb.wb[index]);
1522         if (tmp == 0xDEADBEEF)
1523                 r = 0;
1524         else
1525                 r = -EINVAL;
1526
1527 err1:
1528         amdgpu_ib_free(adev, &ib, NULL);
1529         dma_fence_put(f);
1530 err0:
1531         amdgpu_device_wb_free(adev, index);
1532         return r;
1533 }
1534
1535
1536 /**
1537  * sdma_v4_0_vm_copy_pte - update PTEs by copying them from the GART
1538  *
1539  * @ib: indirect buffer to fill with commands
1540  * @pe: addr of the page entry
1541  * @src: src addr to copy from
1542  * @count: number of page entries to update
1543  *
1544  * Update PTEs by copying them from the GART using sDMA (VEGA10).
1545  */
1546 static void sdma_v4_0_vm_copy_pte(struct amdgpu_ib *ib,
1547                                   uint64_t pe, uint64_t src,
1548                                   unsigned count)
1549 {
1550         unsigned bytes = count * 8;
1551
1552         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1553                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1554         ib->ptr[ib->length_dw++] = bytes - 1;
1555         ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1556         ib->ptr[ib->length_dw++] = lower_32_bits(src);
1557         ib->ptr[ib->length_dw++] = upper_32_bits(src);
1558         ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1559         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1560
1561 }
1562
1563 /**
1564  * sdma_v4_0_vm_write_pte - update PTEs by writing them manually
1565  *
1566  * @ib: indirect buffer to fill with commands
1567  * @pe: addr of the page entry
1568  * @value: dst addr to write into pe
1569  * @count: number of page entries to update
1570  * @incr: increase next addr by incr bytes
1571  *
1572  * Update PTEs by writing them manually using sDMA (VEGA10).
1573  */
1574 static void sdma_v4_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1575                                    uint64_t value, unsigned count,
1576                                    uint32_t incr)
1577 {
1578         unsigned ndw = count * 2;
1579
1580         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1581                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1582         ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1583         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1584         ib->ptr[ib->length_dw++] = ndw - 1;
1585         for (; ndw > 0; ndw -= 2) {
1586                 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1587                 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1588                 value += incr;
1589         }
1590 }
1591
1592 /**
1593  * sdma_v4_0_vm_set_pte_pde - update the page tables using sDMA
1594  *
1595  * @ib: indirect buffer to fill with commands
1596  * @pe: addr of the page entry
1597  * @addr: dst addr to write into pe
1598  * @count: number of page entries to update
1599  * @incr: increase next addr by incr bytes
1600  * @flags: access flags
1601  *
1602  * Update the page tables using sDMA (VEGA10).
1603  */
1604 static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1605                                      uint64_t pe,
1606                                      uint64_t addr, unsigned count,
1607                                      uint32_t incr, uint64_t flags)
1608 {
1609         /* for physically contiguous pages (vram) */
1610         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1611         ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1612         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1613         ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1614         ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1615         ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1616         ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1617         ib->ptr[ib->length_dw++] = incr; /* increment size */
1618         ib->ptr[ib->length_dw++] = 0;
1619         ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1620 }
1621
1622 /**
1623  * sdma_v4_0_ring_pad_ib - pad the IB to the required number of dw
1624  *
1625  * @ring: amdgpu_ring structure holding ring information
1626  * @ib: indirect buffer to fill with padding
1627  */
1628 static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1629 {
1630         struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1631         u32 pad_count;
1632         int i;
1633
1634         pad_count = (-ib->length_dw) & 7;
1635         for (i = 0; i < pad_count; i++)
1636                 if (sdma && sdma->burst_nop && (i == 0))
1637                         ib->ptr[ib->length_dw++] =
1638                                 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1639                                 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1640                 else
1641                         ib->ptr[ib->length_dw++] =
1642                                 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1643 }
1644
1645
1646 /**
1647  * sdma_v4_0_ring_emit_pipeline_sync - sync the pipeline
1648  *
1649  * @ring: amdgpu_ring pointer
1650  *
1651  * Make sure all previous operations are completed (CIK).
1652  */
1653 static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1654 {
1655         uint32_t seq = ring->fence_drv.sync_seq;
1656         uint64_t addr = ring->fence_drv.gpu_addr;
1657
1658         /* wait for idle */
1659         sdma_v4_0_wait_reg_mem(ring, 1, 0,
1660                                addr & 0xfffffffc,
1661                                upper_32_bits(addr) & 0xffffffff,
1662                                seq, 0xffffffff, 4);
1663 }
1664
1665
1666 /**
1667  * sdma_v4_0_ring_emit_vm_flush - vm flush using sDMA
1668  *
1669  * @ring: amdgpu_ring pointer
1670  * @vmid: vmid number to use
1671  * @pd_addr: address
1672  *
1673  * Update the page table base and flush the VM TLB
1674  * using sDMA (VEGA10).
1675  */
1676 static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1677                                          unsigned vmid, uint64_t pd_addr)
1678 {
1679         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1680 }
1681
1682 static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
1683                                      uint32_t reg, uint32_t val)
1684 {
1685         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1686                           SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1687         amdgpu_ring_write(ring, reg);
1688         amdgpu_ring_write(ring, val);
1689 }
1690
1691 static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1692                                          uint32_t val, uint32_t mask)
1693 {
1694         sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
1695 }
1696
1697 static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
1698 {
1699         uint fw_version = adev->sdma.instance[0].fw_version;
1700
1701         switch (adev->ip_versions[SDMA0_HWIP][0]) {
1702         case IP_VERSION(4, 0, 0):
1703                 return fw_version >= 430;
1704         case IP_VERSION(4, 0, 1):
1705                 /*return fw_version >= 31;*/
1706                 return false;
1707         case IP_VERSION(4, 2, 0):
1708                 return fw_version >= 123;
1709         default:
1710                 return false;
1711         }
1712 }
1713
1714 static int sdma_v4_0_early_init(void *handle)
1715 {
1716         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1717         int r;
1718
1719         r = sdma_v4_0_init_microcode(adev);
1720         if (r) {
1721                 DRM_ERROR("Failed to load sdma firmware!\n");
1722                 return r;
1723         }
1724
1725         /* TODO: Page queue breaks driver reload under SRIOV */
1726         if ((adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 0, 0)) &&
1727             amdgpu_sriov_vf((adev)))
1728                 adev->sdma.has_page_queue = false;
1729         else if (sdma_v4_0_fw_support_paging_queue(adev))
1730                 adev->sdma.has_page_queue = true;
1731
1732         sdma_v4_0_set_ring_funcs(adev);
1733         sdma_v4_0_set_buffer_funcs(adev);
1734         sdma_v4_0_set_vm_pte_funcs(adev);
1735         sdma_v4_0_set_irq_funcs(adev);
1736         sdma_v4_0_set_ras_funcs(adev);
1737
1738         return 0;
1739 }
1740
1741 static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
1742                 void *err_data,
1743                 struct amdgpu_iv_entry *entry);
1744
1745 static int sdma_v4_0_late_init(void *handle)
1746 {
1747         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1748
1749         sdma_v4_0_setup_ulv(adev);
1750
1751         if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1752                 if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
1753                     adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
1754                         adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
1755         }
1756
1757         return 0;
1758 }
1759
1760 static int sdma_v4_0_sw_init(void *handle)
1761 {
1762         struct amdgpu_ring *ring;
1763         int r, i;
1764         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1765
1766         /* SDMA trap event */
1767         for (i = 0; i < adev->sdma.num_instances; i++) {
1768                 r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
1769                                       SDMA0_4_0__SRCID__SDMA_TRAP,
1770                                       &adev->sdma.trap_irq);
1771                 if (r)
1772                         return r;
1773         }
1774
1775         /* SDMA SRAM ECC event */
1776         for (i = 0; i < adev->sdma.num_instances; i++) {
1777                 r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
1778                                       SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
1779                                       &adev->sdma.ecc_irq);
1780                 if (r)
1781                         return r;
1782         }
1783
1784         /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/
1785         for (i = 0; i < adev->sdma.num_instances; i++) {
1786                 r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
1787                                       SDMA0_4_0__SRCID__SDMA_VM_HOLE,
1788                                       &adev->sdma.vm_hole_irq);
1789                 if (r)
1790                         return r;
1791
1792                 r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
1793                                       SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID,
1794                                       &adev->sdma.doorbell_invalid_irq);
1795                 if (r)
1796                         return r;
1797
1798                 r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
1799                                       SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT,
1800                                       &adev->sdma.pool_timeout_irq);
1801                 if (r)
1802                         return r;
1803
1804                 r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
1805                                       SDMA0_4_0__SRCID__SDMA_SRBMWRITE,
1806                                       &adev->sdma.srbm_write_irq);
1807                 if (r)
1808                         return r;
1809         }
1810
1811         for (i = 0; i < adev->sdma.num_instances; i++) {
1812                 ring = &adev->sdma.instance[i].ring;
1813                 ring->ring_obj = NULL;
1814                 ring->use_doorbell = true;
1815
1816                 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1817                                 ring->use_doorbell?"true":"false");
1818
1819                 /* doorbell size is 2 dwords, get DWORD offset */
1820                 ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
1821
1822                 /*
1823                  * On Arcturus, SDMA instance 5~7 has a different vmhub
1824                  * type(AMDGPU_MMHUB1).
1825                  */
1826                 if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
1827                         ring->vm_hub = AMDGPU_MMHUB1(0);
1828                 else
1829                         ring->vm_hub = AMDGPU_MMHUB0(0);
1830
1831                 sprintf(ring->name, "sdma%d", i);
1832                 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1833                                      AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1834                                      AMDGPU_RING_PRIO_DEFAULT, NULL);
1835                 if (r)
1836                         return r;
1837
1838                 if (adev->sdma.has_page_queue) {
1839                         ring = &adev->sdma.instance[i].page;
1840                         ring->ring_obj = NULL;
1841                         ring->use_doorbell = true;
1842
1843                         /* paging queue use same doorbell index/routing as gfx queue
1844                          * with 0x400 (4096 dwords) offset on second doorbell page
1845                          */
1846                         if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(4, 0, 0) &&
1847                             adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(4, 2, 0)) {
1848                                 ring->doorbell_index =
1849                                         adev->doorbell_index.sdma_engine[i] << 1;
1850                                 ring->doorbell_index += 0x400;
1851                         } else {
1852                                 /* From vega20, the sdma_doorbell_range in 1st
1853                                  * doorbell page is reserved for page queue.
1854                                  */
1855                                 ring->doorbell_index =
1856                                         (adev->doorbell_index.sdma_engine[i] + 1) << 1;
1857                         }
1858
1859                         if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
1860                                 ring->vm_hub = AMDGPU_MMHUB1(0);
1861                         else
1862                                 ring->vm_hub = AMDGPU_MMHUB0(0);
1863
1864                         sprintf(ring->name, "page%d", i);
1865                         r = amdgpu_ring_init(adev, ring, 1024,
1866                                              &adev->sdma.trap_irq,
1867                                              AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1868                                              AMDGPU_RING_PRIO_DEFAULT, NULL);
1869                         if (r)
1870                                 return r;
1871                 }
1872         }
1873
1874         if (amdgpu_sdma_ras_sw_init(adev)) {
1875                 dev_err(adev->dev, "Failed to initialize sdma ras block!\n");
1876                 return -EINVAL;
1877         }
1878
1879         return r;
1880 }
1881
1882 static int sdma_v4_0_sw_fini(void *handle)
1883 {
1884         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1885         int i;
1886
1887         for (i = 0; i < adev->sdma.num_instances; i++) {
1888                 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1889                 if (adev->sdma.has_page_queue)
1890                         amdgpu_ring_fini(&adev->sdma.instance[i].page);
1891         }
1892
1893         if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
1894             adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
1895                 amdgpu_sdma_destroy_inst_ctx(adev, true);
1896         else
1897                 amdgpu_sdma_destroy_inst_ctx(adev, false);
1898
1899         return 0;
1900 }
1901
1902 static int sdma_v4_0_hw_init(void *handle)
1903 {
1904         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1905
1906         if (adev->flags & AMD_IS_APU)
1907                 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
1908
1909         if (!amdgpu_sriov_vf(adev))
1910                 sdma_v4_0_init_golden_registers(adev);
1911
1912         return sdma_v4_0_start(adev);
1913 }
1914
1915 static int sdma_v4_0_hw_fini(void *handle)
1916 {
1917         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1918         int i;
1919
1920         if (amdgpu_sriov_vf(adev)) {
1921                 /* disable the scheduler for SDMA */
1922                 amdgpu_sdma_unset_buffer_funcs_helper(adev);
1923                 return 0;
1924         }
1925
1926         if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
1927                 for (i = 0; i < adev->sdma.num_instances; i++) {
1928                         amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
1929                                        AMDGPU_SDMA_IRQ_INSTANCE0 + i);
1930                 }
1931         }
1932
1933         sdma_v4_0_ctx_switch_enable(adev, false);
1934         sdma_v4_0_enable(adev, false);
1935
1936         if (adev->flags & AMD_IS_APU)
1937                 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
1938
1939         return 0;
1940 }
1941
1942 static int sdma_v4_0_suspend(void *handle)
1943 {
1944         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1945
1946         /* SMU saves SDMA state for us */
1947         if (adev->in_s0ix) {
1948                 sdma_v4_0_gfx_enable(adev, false);
1949                 return 0;
1950         }
1951
1952         return sdma_v4_0_hw_fini(adev);
1953 }
1954
1955 static int sdma_v4_0_resume(void *handle)
1956 {
1957         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1958
1959         /* SMU restores SDMA state for us */
1960         if (adev->in_s0ix) {
1961                 sdma_v4_0_enable(adev, true);
1962                 sdma_v4_0_gfx_enable(adev, true);
1963                 amdgpu_ttm_set_buffer_funcs_status(adev, true);
1964                 return 0;
1965         }
1966
1967         return sdma_v4_0_hw_init(adev);
1968 }
1969
1970 static bool sdma_v4_0_is_idle(void *handle)
1971 {
1972         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1973         u32 i;
1974
1975         for (i = 0; i < adev->sdma.num_instances; i++) {
1976                 u32 tmp = RREG32_SDMA(i, mmSDMA0_STATUS_REG);
1977
1978                 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1979                         return false;
1980         }
1981
1982         return true;
1983 }
1984
1985 static int sdma_v4_0_wait_for_idle(void *handle)
1986 {
1987         unsigned i, j;
1988         u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
1989         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1990
1991         for (i = 0; i < adev->usec_timeout; i++) {
1992                 for (j = 0; j < adev->sdma.num_instances; j++) {
1993                         sdma[j] = RREG32_SDMA(j, mmSDMA0_STATUS_REG);
1994                         if (!(sdma[j] & SDMA0_STATUS_REG__IDLE_MASK))
1995                                 break;
1996                 }
1997                 if (j == adev->sdma.num_instances)
1998                         return 0;
1999                 udelay(1);
2000         }
2001         return -ETIMEDOUT;
2002 }
2003
2004 static int sdma_v4_0_soft_reset(void *handle)
2005 {
2006         /* todo */
2007
2008         return 0;
2009 }
2010
2011 static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
2012                                         struct amdgpu_irq_src *source,
2013                                         unsigned type,
2014                                         enum amdgpu_interrupt_state state)
2015 {
2016         u32 sdma_cntl;
2017
2018         sdma_cntl = RREG32_SDMA(type, mmSDMA0_CNTL);
2019         sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
2020                        state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
2021         WREG32_SDMA(type, mmSDMA0_CNTL, sdma_cntl);
2022
2023         return 0;
2024 }
2025
2026 static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
2027                                       struct amdgpu_irq_src *source,
2028                                       struct amdgpu_iv_entry *entry)
2029 {
2030         uint32_t instance;
2031
2032         DRM_DEBUG("IH: SDMA trap\n");
2033         instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
2034         switch (entry->ring_id) {
2035         case 0:
2036                 amdgpu_fence_process(&adev->sdma.instance[instance].ring);
2037                 break;
2038         case 1:
2039                 if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0))
2040                         amdgpu_fence_process(&adev->sdma.instance[instance].page);
2041                 break;
2042         case 2:
2043                 /* XXX compute */
2044                 break;
2045         case 3:
2046                 if (adev->ip_versions[SDMA0_HWIP][0] != IP_VERSION(4, 2, 0))
2047                         amdgpu_fence_process(&adev->sdma.instance[instance].page);
2048                 break;
2049         }
2050         return 0;
2051 }
2052
2053 static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
2054                 void *err_data,
2055                 struct amdgpu_iv_entry *entry)
2056 {
2057         int instance;
2058
2059         /* When “Full RAS” is enabled, the per-IP interrupt sources should
2060          * be disabled and the driver should only look for the aggregated
2061          * interrupt via sync flood
2062          */
2063         if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
2064                 goto out;
2065
2066         instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
2067         if (instance < 0)
2068                 goto out;
2069
2070         amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
2071
2072 out:
2073         return AMDGPU_RAS_SUCCESS;
2074 }
2075
2076 static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
2077                                               struct amdgpu_irq_src *source,
2078                                               struct amdgpu_iv_entry *entry)
2079 {
2080         int instance;
2081
2082         DRM_ERROR("Illegal instruction in SDMA command stream\n");
2083
2084         instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
2085         if (instance < 0)
2086                 return 0;
2087
2088         switch (entry->ring_id) {
2089         case 0:
2090                 drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
2091                 break;
2092         }
2093         return 0;
2094 }
2095
2096 static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev,
2097                                         struct amdgpu_irq_src *source,
2098                                         unsigned type,
2099                                         enum amdgpu_interrupt_state state)
2100 {
2101         u32 sdma_edc_config;
2102
2103         sdma_edc_config = RREG32_SDMA(type, mmSDMA0_EDC_CONFIG);
2104         sdma_edc_config = REG_SET_FIELD(sdma_edc_config, SDMA0_EDC_CONFIG, ECC_INT_ENABLE,
2105                        state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
2106         WREG32_SDMA(type, mmSDMA0_EDC_CONFIG, sdma_edc_config);
2107
2108         return 0;
2109 }
2110
2111 static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev,
2112                                               struct amdgpu_iv_entry *entry)
2113 {
2114         int instance;
2115         struct amdgpu_task_info task_info;
2116         u64 addr;
2117
2118         instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
2119         if (instance < 0 || instance >= adev->sdma.num_instances) {
2120                 dev_err(adev->dev, "sdma instance invalid %d\n", instance);
2121                 return -EINVAL;
2122         }
2123
2124         addr = (u64)entry->src_data[0] << 12;
2125         addr |= ((u64)entry->src_data[1] & 0xf) << 44;
2126
2127         memset(&task_info, 0, sizeof(struct amdgpu_task_info));
2128         amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
2129
2130         dev_dbg_ratelimited(adev->dev,
2131                    "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u "
2132                    "pasid:%u, for process %s pid %d thread %s pid %d\n",
2133                    instance, addr, entry->src_id, entry->ring_id, entry->vmid,
2134                    entry->pasid, task_info.process_name, task_info.tgid,
2135                    task_info.task_name, task_info.pid);
2136         return 0;
2137 }
2138
2139 static int sdma_v4_0_process_vm_hole_irq(struct amdgpu_device *adev,
2140                                               struct amdgpu_irq_src *source,
2141                                               struct amdgpu_iv_entry *entry)
2142 {
2143         dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n");
2144         sdma_v4_0_print_iv_entry(adev, entry);
2145         return 0;
2146 }
2147
2148 static int sdma_v4_0_process_doorbell_invalid_irq(struct amdgpu_device *adev,
2149                                               struct amdgpu_irq_src *source,
2150                                               struct amdgpu_iv_entry *entry)
2151 {
2152         dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n");
2153         sdma_v4_0_print_iv_entry(adev, entry);
2154         return 0;
2155 }
2156
2157 static int sdma_v4_0_process_pool_timeout_irq(struct amdgpu_device *adev,
2158                                               struct amdgpu_irq_src *source,
2159                                               struct amdgpu_iv_entry *entry)
2160 {
2161         dev_dbg_ratelimited(adev->dev,
2162                 "Polling register/memory timeout executing POLL_REG/MEM with finite timer\n");
2163         sdma_v4_0_print_iv_entry(adev, entry);
2164         return 0;
2165 }
2166
2167 static int sdma_v4_0_process_srbm_write_irq(struct amdgpu_device *adev,
2168                                               struct amdgpu_irq_src *source,
2169                                               struct amdgpu_iv_entry *entry)
2170 {
2171         dev_dbg_ratelimited(adev->dev,
2172                 "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n");
2173         sdma_v4_0_print_iv_entry(adev, entry);
2174         return 0;
2175 }
2176
2177 static void sdma_v4_0_update_medium_grain_clock_gating(
2178                 struct amdgpu_device *adev,
2179                 bool enable)
2180 {
2181         uint32_t data, def;
2182         int i;
2183
2184         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
2185                 for (i = 0; i < adev->sdma.num_instances; i++) {
2186                         def = data = RREG32_SDMA(i, mmSDMA0_CLK_CTRL);
2187                         data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
2188                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
2189                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
2190                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
2191                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
2192                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
2193                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
2194                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
2195                         if (def != data)
2196                                 WREG32_SDMA(i, mmSDMA0_CLK_CTRL, data);
2197                 }
2198         } else {
2199                 for (i = 0; i < adev->sdma.num_instances; i++) {
2200                         def = data = RREG32_SDMA(i, mmSDMA0_CLK_CTRL);
2201                         data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
2202                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
2203                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
2204                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
2205                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
2206                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
2207                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
2208                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
2209                         if (def != data)
2210                                 WREG32_SDMA(i, mmSDMA0_CLK_CTRL, data);
2211                 }
2212         }
2213 }
2214
2215
2216 static void sdma_v4_0_update_medium_grain_light_sleep(
2217                 struct amdgpu_device *adev,
2218                 bool enable)
2219 {
2220         uint32_t data, def;
2221         int i;
2222
2223         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
2224                 for (i = 0; i < adev->sdma.num_instances; i++) {
2225                         /* 1-not override: enable sdma mem light sleep */
2226                         def = data = RREG32_SDMA(0, mmSDMA0_POWER_CNTL);
2227                         data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
2228                         if (def != data)
2229                                 WREG32_SDMA(0, mmSDMA0_POWER_CNTL, data);
2230                 }
2231         } else {
2232                 for (i = 0; i < adev->sdma.num_instances; i++) {
2233                 /* 0-override:disable sdma mem light sleep */
2234                         def = data = RREG32_SDMA(0, mmSDMA0_POWER_CNTL);
2235                         data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
2236                         if (def != data)
2237                                 WREG32_SDMA(0, mmSDMA0_POWER_CNTL, data);
2238                 }
2239         }
2240 }
2241
2242 static int sdma_v4_0_set_clockgating_state(void *handle,
2243                                           enum amd_clockgating_state state)
2244 {
2245         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2246
2247         if (amdgpu_sriov_vf(adev))
2248                 return 0;
2249
2250         sdma_v4_0_update_medium_grain_clock_gating(adev,
2251                         state == AMD_CG_STATE_GATE);
2252         sdma_v4_0_update_medium_grain_light_sleep(adev,
2253                         state == AMD_CG_STATE_GATE);
2254         return 0;
2255 }
2256
2257 static int sdma_v4_0_set_powergating_state(void *handle,
2258                                           enum amd_powergating_state state)
2259 {
2260         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2261
2262         switch (adev->ip_versions[SDMA0_HWIP][0]) {
2263         case IP_VERSION(4, 1, 0):
2264         case IP_VERSION(4, 1, 1):
2265         case IP_VERSION(4, 1, 2):
2266                 sdma_v4_1_update_power_gating(adev,
2267                                 state == AMD_PG_STATE_GATE);
2268                 break;
2269         default:
2270                 break;
2271         }
2272
2273         return 0;
2274 }
2275
2276 static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags)
2277 {
2278         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2279         int data;
2280
2281         if (amdgpu_sriov_vf(adev))
2282                 *flags = 0;
2283
2284         /* AMD_CG_SUPPORT_SDMA_MGCG */
2285         data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
2286         if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
2287                 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
2288
2289         /* AMD_CG_SUPPORT_SDMA_LS */
2290         data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
2291         if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
2292                 *flags |= AMD_CG_SUPPORT_SDMA_LS;
2293 }
2294
2295 const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
2296         .name = "sdma_v4_0",
2297         .early_init = sdma_v4_0_early_init,
2298         .late_init = sdma_v4_0_late_init,
2299         .sw_init = sdma_v4_0_sw_init,
2300         .sw_fini = sdma_v4_0_sw_fini,
2301         .hw_init = sdma_v4_0_hw_init,
2302         .hw_fini = sdma_v4_0_hw_fini,
2303         .suspend = sdma_v4_0_suspend,
2304         .resume = sdma_v4_0_resume,
2305         .is_idle = sdma_v4_0_is_idle,
2306         .wait_for_idle = sdma_v4_0_wait_for_idle,
2307         .soft_reset = sdma_v4_0_soft_reset,
2308         .set_clockgating_state = sdma_v4_0_set_clockgating_state,
2309         .set_powergating_state = sdma_v4_0_set_powergating_state,
2310         .get_clockgating_state = sdma_v4_0_get_clockgating_state,
2311 };
2312
2313 static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
2314         .type = AMDGPU_RING_TYPE_SDMA,
2315         .align_mask = 0xff,
2316         .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2317         .support_64bit_ptrs = true,
2318         .secure_submission_supported = true,
2319         .get_rptr = sdma_v4_0_ring_get_rptr,
2320         .get_wptr = sdma_v4_0_ring_get_wptr,
2321         .set_wptr = sdma_v4_0_ring_set_wptr,
2322         .emit_frame_size =
2323                 6 + /* sdma_v4_0_ring_emit_hdp_flush */
2324                 3 + /* hdp invalidate */
2325                 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
2326                 /* sdma_v4_0_ring_emit_vm_flush */
2327                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2328                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2329                 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
2330         .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
2331         .emit_ib = sdma_v4_0_ring_emit_ib,
2332         .emit_fence = sdma_v4_0_ring_emit_fence,
2333         .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
2334         .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
2335         .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
2336         .test_ring = sdma_v4_0_ring_test_ring,
2337         .test_ib = sdma_v4_0_ring_test_ib,
2338         .insert_nop = sdma_v4_0_ring_insert_nop,
2339         .pad_ib = sdma_v4_0_ring_pad_ib,
2340         .emit_wreg = sdma_v4_0_ring_emit_wreg,
2341         .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
2342         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2343 };
2344
2345 static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
2346         .type = AMDGPU_RING_TYPE_SDMA,
2347         .align_mask = 0xff,
2348         .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2349         .support_64bit_ptrs = true,
2350         .secure_submission_supported = true,
2351         .get_rptr = sdma_v4_0_ring_get_rptr,
2352         .get_wptr = sdma_v4_0_page_ring_get_wptr,
2353         .set_wptr = sdma_v4_0_page_ring_set_wptr,
2354         .emit_frame_size =
2355                 6 + /* sdma_v4_0_ring_emit_hdp_flush */
2356                 3 + /* hdp invalidate */
2357                 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
2358                 /* sdma_v4_0_ring_emit_vm_flush */
2359                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2360                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2361                 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
2362         .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
2363         .emit_ib = sdma_v4_0_ring_emit_ib,
2364         .emit_fence = sdma_v4_0_ring_emit_fence,
2365         .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
2366         .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
2367         .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
2368         .test_ring = sdma_v4_0_ring_test_ring,
2369         .test_ib = sdma_v4_0_ring_test_ib,
2370         .insert_nop = sdma_v4_0_ring_insert_nop,
2371         .pad_ib = sdma_v4_0_ring_pad_ib,
2372         .emit_wreg = sdma_v4_0_ring_emit_wreg,
2373         .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
2374         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2375 };
2376
2377 static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
2378 {
2379         int i;
2380
2381         for (i = 0; i < adev->sdma.num_instances; i++) {
2382                 adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
2383                 adev->sdma.instance[i].ring.me = i;
2384                 if (adev->sdma.has_page_queue) {
2385                         adev->sdma.instance[i].page.funcs =
2386                                         &sdma_v4_0_page_ring_funcs;
2387                         adev->sdma.instance[i].page.me = i;
2388                 }
2389         }
2390 }
2391
2392 static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
2393         .set = sdma_v4_0_set_trap_irq_state,
2394         .process = sdma_v4_0_process_trap_irq,
2395 };
2396
2397 static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
2398         .process = sdma_v4_0_process_illegal_inst_irq,
2399 };
2400
2401 static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
2402         .set = sdma_v4_0_set_ecc_irq_state,
2403         .process = amdgpu_sdma_process_ecc_irq,
2404 };
2405
2406 static const struct amdgpu_irq_src_funcs sdma_v4_0_vm_hole_irq_funcs = {
2407         .process = sdma_v4_0_process_vm_hole_irq,
2408 };
2409
2410 static const struct amdgpu_irq_src_funcs sdma_v4_0_doorbell_invalid_irq_funcs = {
2411         .process = sdma_v4_0_process_doorbell_invalid_irq,
2412 };
2413
2414 static const struct amdgpu_irq_src_funcs sdma_v4_0_pool_timeout_irq_funcs = {
2415         .process = sdma_v4_0_process_pool_timeout_irq,
2416 };
2417
2418 static const struct amdgpu_irq_src_funcs sdma_v4_0_srbm_write_irq_funcs = {
2419         .process = sdma_v4_0_process_srbm_write_irq,
2420 };
2421
2422 static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
2423 {
2424         adev->sdma.trap_irq.num_types = adev->sdma.num_instances;
2425         adev->sdma.ecc_irq.num_types = adev->sdma.num_instances;
2426         /*For Arcturus and Aldebaran, add another 4 irq handler*/
2427         switch (adev->sdma.num_instances) {
2428         case 5:
2429         case 8:
2430                 adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances;
2431                 adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances;
2432                 adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances;
2433                 adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances;
2434                 break;
2435         default:
2436                 break;
2437         }
2438         adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs;
2439         adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs;
2440         adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs;
2441         adev->sdma.vm_hole_irq.funcs = &sdma_v4_0_vm_hole_irq_funcs;
2442         adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_0_doorbell_invalid_irq_funcs;
2443         adev->sdma.pool_timeout_irq.funcs = &sdma_v4_0_pool_timeout_irq_funcs;
2444         adev->sdma.srbm_write_irq.funcs = &sdma_v4_0_srbm_write_irq_funcs;
2445 }
2446
2447 /**
2448  * sdma_v4_0_emit_copy_buffer - copy buffer using the sDMA engine
2449  *
2450  * @ib: indirect buffer to copy to
2451  * @src_offset: src GPU address
2452  * @dst_offset: dst GPU address
2453  * @byte_count: number of bytes to xfer
2454  * @tmz: if a secure copy should be used
2455  *
2456  * Copy GPU buffers using the DMA engine (VEGA10/12).
2457  * Used by the amdgpu ttm implementation to move pages if
2458  * registered as the asic copy callback.
2459  */
2460 static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
2461                                        uint64_t src_offset,
2462                                        uint64_t dst_offset,
2463                                        uint32_t byte_count,
2464                                        bool tmz)
2465 {
2466         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
2467                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
2468                 SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
2469         ib->ptr[ib->length_dw++] = byte_count - 1;
2470         ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
2471         ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
2472         ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
2473         ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2474         ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2475 }
2476
2477 /**
2478  * sdma_v4_0_emit_fill_buffer - fill buffer using the sDMA engine
2479  *
2480  * @ib: indirect buffer to copy to
2481  * @src_data: value to write to buffer
2482  * @dst_offset: dst GPU address
2483  * @byte_count: number of bytes to xfer
2484  *
2485  * Fill GPU buffers using the DMA engine (VEGA10/12).
2486  */
2487 static void sdma_v4_0_emit_fill_buffer(struct amdgpu_ib *ib,
2488                                        uint32_t src_data,
2489                                        uint64_t dst_offset,
2490                                        uint32_t byte_count)
2491 {
2492         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
2493         ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2494         ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2495         ib->ptr[ib->length_dw++] = src_data;
2496         ib->ptr[ib->length_dw++] = byte_count - 1;
2497 }
2498
2499 static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
2500         .copy_max_bytes = 0x400000,
2501         .copy_num_dw = 7,
2502         .emit_copy_buffer = sdma_v4_0_emit_copy_buffer,
2503
2504         .fill_max_bytes = 0x400000,
2505         .fill_num_dw = 5,
2506         .emit_fill_buffer = sdma_v4_0_emit_fill_buffer,
2507 };
2508
2509 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
2510 {
2511         adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
2512         if (adev->sdma.has_page_queue)
2513                 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
2514         else
2515                 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
2516 }
2517
2518 static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
2519         .copy_pte_num_dw = 7,
2520         .copy_pte = sdma_v4_0_vm_copy_pte,
2521
2522         .write_pte = sdma_v4_0_vm_write_pte,
2523         .set_pte_pde = sdma_v4_0_vm_set_pte_pde,
2524 };
2525
2526 static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
2527 {
2528         struct drm_gpu_scheduler *sched;
2529         unsigned i;
2530
2531         adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
2532         for (i = 0; i < adev->sdma.num_instances; i++) {
2533                 if (adev->sdma.has_page_queue)
2534                         sched = &adev->sdma.instance[i].page.sched;
2535                 else
2536                         sched = &adev->sdma.instance[i].ring.sched;
2537                 adev->vm_manager.vm_pte_scheds[i] = sched;
2538         }
2539         adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
2540 }
2541
2542 static void sdma_v4_0_get_ras_error_count(uint32_t value,
2543                                         uint32_t instance,
2544                                         uint32_t *sec_count)
2545 {
2546         uint32_t i;
2547         uint32_t sec_cnt;
2548
2549         /* double bits error (multiple bits) error detection is not supported */
2550         for (i = 0; i < ARRAY_SIZE(sdma_v4_0_ras_fields); i++) {
2551                 /* the SDMA_EDC_COUNTER register in each sdma instance
2552                  * shares the same sed shift_mask
2553                  * */
2554                 sec_cnt = (value &
2555                         sdma_v4_0_ras_fields[i].sec_count_mask) >>
2556                         sdma_v4_0_ras_fields[i].sec_count_shift;
2557                 if (sec_cnt) {
2558                         DRM_INFO("Detected %s in SDMA%d, SED %d\n",
2559                                 sdma_v4_0_ras_fields[i].name,
2560                                 instance, sec_cnt);
2561                         *sec_count += sec_cnt;
2562                 }
2563         }
2564 }
2565
2566 static int sdma_v4_0_query_ras_error_count_by_instance(struct amdgpu_device *adev,
2567                         uint32_t instance, void *ras_error_status)
2568 {
2569         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
2570         uint32_t sec_count = 0;
2571         uint32_t reg_value = 0;
2572
2573         reg_value = RREG32_SDMA(instance, mmSDMA0_EDC_COUNTER);
2574         /* double bit error is not supported */
2575         if (reg_value)
2576                 sdma_v4_0_get_ras_error_count(reg_value,
2577                                 instance, &sec_count);
2578         /* err_data->ce_count should be initialized to 0
2579          * before calling into this function */
2580         err_data->ce_count += sec_count;
2581         /* double bit error is not supported
2582          * set ue count to 0 */
2583         err_data->ue_count = 0;
2584
2585         return 0;
2586 };
2587
2588 static void sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,  void *ras_error_status)
2589 {
2590         int i = 0;
2591
2592         for (i = 0; i < adev->sdma.num_instances; i++) {
2593                 if (sdma_v4_0_query_ras_error_count_by_instance(adev, i, ras_error_status)) {
2594                         dev_err(adev->dev, "Query ras error count failed in SDMA%d\n", i);
2595                         return;
2596                 }
2597         }
2598 }
2599
2600 static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
2601 {
2602         int i;
2603
2604         /* read back edc counter registers to clear the counters */
2605         if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
2606                 for (i = 0; i < adev->sdma.num_instances; i++)
2607                         RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
2608         }
2609 }
2610
2611 const struct amdgpu_ras_block_hw_ops sdma_v4_0_ras_hw_ops = {
2612         .query_ras_error_count = sdma_v4_0_query_ras_error_count,
2613         .reset_ras_error_count = sdma_v4_0_reset_ras_error_count,
2614 };
2615
2616 static struct amdgpu_sdma_ras sdma_v4_0_ras = {
2617         .ras_block = {
2618                 .hw_ops = &sdma_v4_0_ras_hw_ops,
2619                 .ras_cb = sdma_v4_0_process_ras_data_cb,
2620         },
2621 };
2622
2623 static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
2624 {
2625         switch (adev->ip_versions[SDMA0_HWIP][0]) {
2626         case IP_VERSION(4, 2, 0):
2627         case IP_VERSION(4, 2, 2):
2628                 adev->sdma.ras = &sdma_v4_0_ras;
2629                 break;
2630         case IP_VERSION(4, 4, 0):
2631                 adev->sdma.ras = &sdma_v4_4_ras;
2632                 break;
2633         default:
2634                 break;
2635         }
2636
2637 }
2638
2639 const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
2640         .type = AMD_IP_BLOCK_TYPE_SDMA,
2641         .major = 4,
2642         .minor = 0,
2643         .rev = 0,
2644         .funcs = &sdma_v4_0_ip_funcs,
2645 };