2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
27 #include <core/gpuobj.h>
30 * r367 ACR: new LS signature format requires a rewrite of LS firmware and
31 * blob creation functions. Also the hsflcn_desc layout has changed slightly.
34 #define LSF_LSB_DEPMAP_SIZE 11
37 * struct acr_r367_lsf_lsb_header - LS firmware header
39 * See also struct acr_r352_lsf_lsb_header for documentation.
41 struct acr_r367_lsf_lsb_header {
43 * LS falcon signatures
44 * @prd_keys: signature to use in production mode
45 * @dgb_keys: signature to use in debug mode
46 * @b_prd_present: whether the production key is present
47 * @b_dgb_present: whether the debug key is present
48 * @falcon_id: ID of the falcon the ucode applies to
56 u32 supports_versioning;
59 u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4];
77 * struct acr_r367_lsf_wpr_header - LS blob WPR Header
79 * See also struct acr_r352_lsf_wpr_header for documentation.
81 struct acr_r367_lsf_wpr_header {
88 #define LSF_IMAGE_STATUS_NONE 0
89 #define LSF_IMAGE_STATUS_COPY 1
90 #define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
91 #define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
92 #define LSF_IMAGE_STATUS_VALIDATION_DONE 4
93 #define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
94 #define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
95 #define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED 7
99 * struct ls_ucode_img_r367 - ucode image augmented with r367 headers
101 struct ls_ucode_img_r367 {
102 struct ls_ucode_img base;
104 const struct acr_r352_lsf_func *func;
106 struct acr_r367_lsf_wpr_header wpr_header;
107 struct acr_r367_lsf_lsb_header lsb_header;
109 #define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base)
111 struct ls_ucode_img *
112 acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
113 const struct nvkm_secboot *sb,
114 enum nvkm_secboot_falcon falcon_id)
116 const struct nvkm_subdev *subdev = acr->base.subdev;
117 const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
118 struct ls_ucode_img_r367 *img;
121 img = kzalloc(sizeof(*img), GFP_KERNEL);
123 return ERR_PTR(-ENOMEM);
125 img->base.falcon_id = falcon_id;
127 ret = func->load(sb, func->version_max, &img->base);
129 kfree(img->base.ucode_data);
130 kfree(img->base.sig);
135 img->func = func->version[ret];
137 /* Check that the signature size matches our expectations... */
138 if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
139 nvkm_error(subdev, "invalid signature size for %s falcon!\n",
140 nvkm_secboot_falcon_name[falcon_id]);
141 return ERR_PTR(-EINVAL);
144 /* Copy signature to the right place */
145 memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
147 /* not needed? the signature should already have the right value */
148 img->lsb_header.signature.falcon_id = falcon_id;
153 #define LSF_LSB_HEADER_ALIGN 256
154 #define LSF_BL_DATA_ALIGN 256
155 #define LSF_BL_DATA_SIZE_ALIGN 256
156 #define LSF_BL_CODE_SIZE_ALIGN 256
157 #define LSF_UCODE_DATA_ALIGN 4096
160 acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
161 struct ls_ucode_img_r367 *img, u32 offset)
163 struct ls_ucode_img *_img = &img->base;
164 struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
165 struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
166 struct ls_ucode_img_desc *desc = &_img->ucode_desc;
167 const struct acr_r352_lsf_func *func = img->func;
169 /* Fill WPR header */
170 whdr->falcon_id = _img->falcon_id;
171 whdr->bootstrap_owner = acr->base.boot_falcon;
172 whdr->bin_version = lhdr->signature.version;
173 whdr->status = LSF_IMAGE_STATUS_COPY;
175 /* Skip bootstrapping falcons started by someone else than ACR */
176 if (acr->lazy_bootstrap & BIT(_img->falcon_id))
177 whdr->lazy_bootstrap = 1;
179 /* Align, save off, and include an LSB header size */
180 offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
181 whdr->lsb_offset = offset;
182 offset += sizeof(*lhdr);
185 * Align, save off, and include the original (static) ucode
188 offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
189 _img->ucode_off = lhdr->ucode_off = offset;
190 offset += _img->ucode_size;
193 * For falcons that use a boot loader (BL), we append a loader
194 * desc structure on the end of the ucode image and consider
195 * this the boot loader data. The host will then copy the loader
196 * desc args to this space within the WPR region (before locking
197 * down) and the HS bin will then copy them to DMEM 0 for the
200 lhdr->bl_code_size = ALIGN(desc->bootloader_size,
201 LSF_BL_CODE_SIZE_ALIGN);
202 lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
203 LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
204 lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
205 lhdr->bl_code_size - lhdr->ucode_size;
207 * Though the BL is located at 0th offset of the image, the VA
208 * is different to make sure that it doesn't collide the actual
211 lhdr->bl_imem_off = desc->bootloader_imem_offset;
212 lhdr->app_code_off = desc->app_start_offset +
213 desc->app_resident_code_offset;
214 lhdr->app_code_size = desc->app_resident_code_size;
215 lhdr->app_data_off = desc->app_start_offset +
216 desc->app_resident_data_offset;
217 lhdr->app_data_size = desc->app_resident_data_size;
219 lhdr->flags = func->lhdr_flags;
220 if (_img->falcon_id == acr->base.boot_falcon)
221 lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
223 /* Align and save off BL descriptor size */
224 lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
227 * Align, save off, and include the additional BL data
229 offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
230 lhdr->bl_data_off = offset;
231 offset += lhdr->bl_data_size;
237 acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
239 struct ls_ucode_img_r367 *img;
244 /* Count the number of images to manage */
245 list_for_each(l, imgs)
249 * Start with an array of WPR headers at the base of the WPR.
250 * The expectation here is that the secure falcon will do a single DMA
251 * read of this array and cache it internally so it's ok to pack these.
252 * Also, we add 1 to the falcon count to indicate the end of the array.
254 offset = sizeof(img->wpr_header) * (count + 1);
257 * Walk the managed falcons, accounting for the LSB structs
258 * as well as the ucode images.
260 list_for_each_entry(img, imgs, base.node) {
261 offset = acr_r367_ls_img_fill_headers(acr, img, offset);
268 acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
269 struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
271 struct ls_ucode_img *_img;
273 u32 max_desc_size = 0;
276 list_for_each_entry(_img, imgs, node) {
277 struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
278 const struct acr_r352_lsf_func *ls_func = img->func;
280 max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
283 gdesc = kmalloc(max_desc_size, GFP_KERNEL);
289 list_for_each_entry(_img, imgs, node) {
290 struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
291 const struct acr_r352_lsf_func *ls_func = img->func;
293 nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
294 sizeof(img->wpr_header));
296 nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
297 &img->lsb_header, sizeof(img->lsb_header));
299 /* Generate and write BL descriptor */
300 memset(gdesc, 0, ls_func->bl_desc_size);
301 ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
303 nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
304 gdesc, ls_func->bl_desc_size);
307 nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
308 _img->ucode_data, _img->ucode_size);
310 pos += sizeof(img->wpr_header);
313 nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
322 struct acr_r367_hsflcn_desc {
323 u8 reserved_dmem[0x200];
327 u32 mmu_memory_range;
328 #define FLCN_ACR_MAX_REGIONS 2
338 u32 shadow_mem_start_addr;
339 } region_props[FLCN_ACR_MAX_REGIONS];
342 u64 ucode_blob_base __aligned(8);
352 acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
355 struct acr_r367_hsflcn_desc *desc = _desc;
356 struct nvkm_gpuobj *ls_blob = acr->ls_blob;
358 /* WPR region information if WPR is not fixed */
359 if (sb->wpr_size == 0) {
360 u64 wpr_start = ls_blob->addr;
361 u64 wpr_end = ls_blob->addr + ls_blob->size;
363 if (acr->func->shadow_blob)
364 wpr_start += ls_blob->size / 2;
366 desc->wpr_region_id = 1;
367 desc->regions.no_regions = 2;
368 desc->regions.region_props[0].start_addr = wpr_start >> 8;
369 desc->regions.region_props[0].end_addr = wpr_end >> 8;
370 desc->regions.region_props[0].region_id = 1;
371 desc->regions.region_props[0].read_mask = 0xf;
372 desc->regions.region_props[0].write_mask = 0xc;
373 desc->regions.region_props[0].client_mask = 0x2;
374 if (acr->func->shadow_blob)
375 desc->regions.region_props[0].shadow_mem_start_addr =
378 desc->regions.region_props[0].shadow_mem_start_addr = 0;
380 desc->ucode_blob_base = ls_blob->addr;
381 desc->ucode_blob_size = ls_blob->size;
385 static const struct acr_r352_ls_func
386 acr_r367_ls_sec2_func = {
387 .load = acr_ls_ucode_load_sec2,
388 .post_run = acr_ls_sec2_post_run,
391 &acr_r361_ls_sec2_func_0,
392 &acr_r370_ls_sec2_func_0,
396 const struct acr_r352_func
398 .fixup_hs_desc = acr_r367_fixup_hs_desc,
399 .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
400 .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
402 .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
403 .ls_fill_headers = acr_r367_ls_fill_headers,
404 .ls_write_wpr = acr_r367_ls_write_wpr,
406 [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
407 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
408 [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
409 [NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func,
414 acr_r367_new(enum nvkm_secboot_falcon boot_falcon,
415 unsigned long managed_falcons)
417 return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons);