drm/nouveau: fence: fix undefined fence state after emit
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / etnaviv / etnaviv_dump.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5
6 #include <linux/devcoredump.h>
7 #include <linux/moduleparam.h>
8
9 #include "etnaviv_cmdbuf.h"
10 #include "etnaviv_dump.h"
11 #include "etnaviv_gem.h"
12 #include "etnaviv_gpu.h"
13 #include "etnaviv_mmu.h"
14 #include "etnaviv_sched.h"
15 #include "state.xml.h"
16 #include "state_hi.xml.h"
17
18 static bool etnaviv_dump_core = true;
19 module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
20
21 struct core_dump_iterator {
22         void *start;
23         struct etnaviv_dump_object_header *hdr;
24         void *data;
25 };
26
27 static const unsigned short etnaviv_dump_registers[] = {
28         VIVS_HI_AXI_STATUS,
29         VIVS_HI_CLOCK_CONTROL,
30         VIVS_HI_IDLE_STATE,
31         VIVS_HI_AXI_CONFIG,
32         VIVS_HI_INTR_ENBL,
33         VIVS_HI_CHIP_IDENTITY,
34         VIVS_HI_CHIP_FEATURE,
35         VIVS_HI_CHIP_MODEL,
36         VIVS_HI_CHIP_REV,
37         VIVS_HI_CHIP_DATE,
38         VIVS_HI_CHIP_TIME,
39         VIVS_HI_CHIP_MINOR_FEATURE_0,
40         VIVS_HI_CACHE_CONTROL,
41         VIVS_HI_AXI_CONTROL,
42         VIVS_PM_POWER_CONTROLS,
43         VIVS_PM_MODULE_CONTROLS,
44         VIVS_PM_MODULE_STATUS,
45         VIVS_PM_PULSE_EATER,
46         VIVS_MC_MMU_FE_PAGE_TABLE,
47         VIVS_MC_MMU_TX_PAGE_TABLE,
48         VIVS_MC_MMU_PE_PAGE_TABLE,
49         VIVS_MC_MMU_PEZ_PAGE_TABLE,
50         VIVS_MC_MMU_RA_PAGE_TABLE,
51         VIVS_MC_DEBUG_MEMORY,
52         VIVS_MC_MEMORY_BASE_ADDR_RA,
53         VIVS_MC_MEMORY_BASE_ADDR_FE,
54         VIVS_MC_MEMORY_BASE_ADDR_TX,
55         VIVS_MC_MEMORY_BASE_ADDR_PEZ,
56         VIVS_MC_MEMORY_BASE_ADDR_PE,
57         VIVS_MC_MEMORY_TIMING_CONTROL,
58         VIVS_MC_BUS_CONFIG,
59         VIVS_FE_DMA_STATUS,
60         VIVS_FE_DMA_DEBUG_STATE,
61         VIVS_FE_DMA_ADDRESS,
62         VIVS_FE_DMA_LOW,
63         VIVS_FE_DMA_HIGH,
64         VIVS_FE_AUTO_FLUSH,
65 };
66
67 static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
68         u32 type, void *data_end)
69 {
70         struct etnaviv_dump_object_header *hdr = iter->hdr;
71
72         hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
73         hdr->type = cpu_to_le32(type);
74         hdr->file_offset = cpu_to_le32(iter->data - iter->start);
75         hdr->file_size = cpu_to_le32(data_end - iter->data);
76
77         iter->hdr++;
78         iter->data += le32_to_cpu(hdr->file_size);
79 }
80
81 static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
82         struct etnaviv_gpu *gpu)
83 {
84         struct etnaviv_dump_registers *reg = iter->data;
85         unsigned int i;
86         u32 read_addr;
87
88         for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
89                 read_addr = etnaviv_dump_registers[i];
90                 if (read_addr >= VIVS_PM_POWER_CONTROLS &&
91                     read_addr <= VIVS_PM_PULSE_EATER)
92                         read_addr = gpu_fix_power_address(gpu, read_addr);
93                 reg->reg = cpu_to_le32(etnaviv_dump_registers[i]);
94                 reg->value = cpu_to_le32(gpu_read(gpu, read_addr));
95         }
96
97         etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
98 }
99
100 static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
101         struct etnaviv_iommu_context *mmu, size_t mmu_size)
102 {
103         etnaviv_iommu_dump(mmu, iter->data);
104
105         etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
106 }
107
108 static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
109         void *ptr, size_t size, u64 iova)
110 {
111         memcpy(iter->data, ptr, size);
112
113         iter->hdr->iova = cpu_to_le64(iova);
114
115         etnaviv_core_dump_header(iter, type, iter->data + size);
116 }
117
118 void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
119 {
120         struct etnaviv_gpu *gpu = submit->gpu;
121         struct core_dump_iterator iter;
122         struct etnaviv_gem_object *obj;
123         unsigned int n_obj, n_bomap_pages;
124         size_t file_size, mmu_size;
125         __le64 *bomap, *bomap_start;
126         int i;
127
128         /* Only catch the first event, or when manually re-armed */
129         if (!etnaviv_dump_core)
130                 return;
131         etnaviv_dump_core = false;
132
133         mutex_lock(&gpu->mmu_context->lock);
134
135         mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context);
136
137         /* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
138         n_obj = 5;
139         n_bomap_pages = 0;
140         file_size = ARRAY_SIZE(etnaviv_dump_registers) *
141                         sizeof(struct etnaviv_dump_registers) +
142                     mmu_size + gpu->buffer.size + submit->cmdbuf.size;
143
144         /* Add in the active buffer objects */
145         for (i = 0; i < submit->nr_bos; i++) {
146                 obj = submit->bos[i].obj;
147                 file_size += obj->base.size;
148                 n_bomap_pages += obj->base.size >> PAGE_SHIFT;
149                 n_obj++;
150         }
151
152         /* If we have any buffer objects, add a bomap object */
153         if (n_bomap_pages) {
154                 file_size += n_bomap_pages * sizeof(__le64);
155                 n_obj++;
156         }
157
158         /* Add the size of the headers */
159         file_size += sizeof(*iter.hdr) * n_obj;
160
161         /* Allocate the file in vmalloc memory, it's likely to be big */
162         iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
163                         __GFP_NORETRY);
164         if (!iter.start) {
165                 mutex_unlock(&gpu->mmu_context->lock);
166                 dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
167                 return;
168         }
169
170         /* Point the data member after the headers */
171         iter.hdr = iter.start;
172         iter.data = &iter.hdr[n_obj];
173
174         memset(iter.hdr, 0, iter.data - iter.start);
175
176         etnaviv_core_dump_registers(&iter, gpu);
177         etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
178         etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
179                               gpu->buffer.size,
180                               etnaviv_cmdbuf_get_va(&gpu->buffer,
181                                         &gpu->mmu_context->cmdbuf_mapping));
182
183         etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
184                               submit->cmdbuf.vaddr, submit->cmdbuf.size,
185                               etnaviv_cmdbuf_get_va(&submit->cmdbuf,
186                                         &gpu->mmu_context->cmdbuf_mapping));
187
188         mutex_unlock(&gpu->mmu_context->lock);
189
190         /* Reserve space for the bomap */
191         if (n_bomap_pages) {
192                 bomap_start = bomap = iter.data;
193                 memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
194                 etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
195                                          bomap + n_bomap_pages);
196         } else {
197                 /* Silence warning */
198                 bomap_start = bomap = NULL;
199         }
200
201         for (i = 0; i < submit->nr_bos; i++) {
202                 struct etnaviv_vram_mapping *vram;
203                 struct page **pages;
204                 void *vaddr;
205
206                 obj = submit->bos[i].obj;
207                 vram = submit->bos[i].mapping;
208
209                 mutex_lock(&obj->lock);
210                 pages = etnaviv_gem_get_pages(obj);
211                 mutex_unlock(&obj->lock);
212                 if (!IS_ERR(pages)) {
213                         int j;
214
215                         iter.hdr->data[0] = cpu_to_le32((bomap - bomap_start));
216
217                         for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
218                                 *bomap++ = cpu_to_le64(page_to_phys(*pages++));
219                 }
220
221                 iter.hdr->iova = cpu_to_le64(vram->iova);
222
223                 vaddr = etnaviv_gem_vmap(&obj->base);
224                 if (vaddr)
225                         memcpy(iter.data, vaddr, obj->base.size);
226
227                 etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
228                                          obj->base.size);
229         }
230
231         etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
232
233         dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
234 }