1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2020, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
15 /* Intel specific NVM offsets */
16 #define INTEL_NVM_DEVID 0x05
17 #define INTEL_NVM_VERSION 0x08
18 #define INTEL_NVM_CSS 0x10
19 #define INTEL_NVM_FLASH_SIZE 0x45
21 /* ASMedia specific NVM offsets */
22 #define ASMEDIA_NVM_DATE 0x1c
23 #define ASMEDIA_NVM_VERSION 0x28
25 static DEFINE_IDA(nvm_ida);
28 * struct tb_nvm_vendor_ops - Vendor specific NVM operations
29 * @read_version: Reads out NVM version from the flash
30 * @validate: Validates the NVM image before update (optional)
31 * @write_headers: Writes headers before the rest of the image (optional)
33 struct tb_nvm_vendor_ops {
34 int (*read_version)(struct tb_nvm *nvm);
35 int (*validate)(struct tb_nvm *nvm);
36 int (*write_headers)(struct tb_nvm *nvm);
40 * struct tb_nvm_vendor - Vendor to &struct tb_nvm_vendor_ops mapping
42 * @vops: Vendor specific NVM operations
44 * Maps vendor ID to NVM vendor operations. If there is no mapping then
45 * NVM firmware upgrade is disabled for the device.
47 struct tb_nvm_vendor {
49 const struct tb_nvm_vendor_ops *vops;
52 static int intel_switch_nvm_version(struct tb_nvm *nvm)
54 struct tb_switch *sw = tb_to_switch(nvm->dev);
55 u32 val, nvm_size, hdr_size;
59 * If the switch is in safe-mode the only accessible portion of
60 * the NVM is the non-active one where userspace is expected to
61 * write new functional NVM.
66 ret = tb_switch_nvm_read(sw, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
70 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
71 nvm_size = (SZ_1M << (val & 7)) / 8;
72 nvm_size = (nvm_size - hdr_size) / 2;
74 ret = tb_switch_nvm_read(sw, INTEL_NVM_VERSION, &val, sizeof(val));
78 nvm->major = (val >> 16) & 0xff;
79 nvm->minor = (val >> 8) & 0xff;
80 nvm->active_size = nvm_size;
85 static int intel_switch_nvm_validate(struct tb_nvm *nvm)
87 struct tb_switch *sw = tb_to_switch(nvm->dev);
88 unsigned int image_size, hdr_size;
89 u16 ds_size, device_id;
92 image_size = nvm->buf_data_size;
95 * FARB pointer must point inside the image and must at least
96 * contain parts of the digital section we will be reading here.
98 hdr_size = (*(u32 *)buf) & 0xffffff;
99 if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
102 /* Digital section start should be aligned to 4k page */
103 if (!IS_ALIGNED(hdr_size, SZ_4K))
107 * Read digital section size and check that it also fits inside
110 ds_size = *(u16 *)(buf + hdr_size);
111 if (ds_size >= image_size)
118 * Make sure the device ID in the image matches the one
119 * we read from the switch config space.
121 device_id = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
122 if (device_id != sw->config.device_id)
125 /* Skip headers in the image */
126 nvm->buf_data_start = buf + hdr_size;
127 nvm->buf_data_size = image_size - hdr_size;
132 static int intel_switch_nvm_write_headers(struct tb_nvm *nvm)
134 struct tb_switch *sw = tb_to_switch(nvm->dev);
136 if (sw->generation < 3) {
139 /* Write CSS headers first */
140 ret = dma_port_flash_write(sw->dma_port,
141 DMA_PORT_CSS_ADDRESS, nvm->buf + INTEL_NVM_CSS,
142 DMA_PORT_CSS_MAX_SIZE);
150 static const struct tb_nvm_vendor_ops intel_switch_nvm_ops = {
151 .read_version = intel_switch_nvm_version,
152 .validate = intel_switch_nvm_validate,
153 .write_headers = intel_switch_nvm_write_headers,
156 static int asmedia_switch_nvm_version(struct tb_nvm *nvm)
158 struct tb_switch *sw = tb_to_switch(nvm->dev);
162 ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_VERSION, &val, sizeof(val));
166 nvm->major = (val << 16) & 0xff0000;
167 nvm->major |= val & 0x00ff00;
168 nvm->major |= (val >> 16) & 0x0000ff;
170 ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_DATE, &val, sizeof(val));
174 nvm->minor = (val << 16) & 0xff0000;
175 nvm->minor |= val & 0x00ff00;
176 nvm->minor |= (val >> 16) & 0x0000ff;
178 /* ASMedia NVM size is fixed to 512k */
179 nvm->active_size = SZ_512K;
184 static const struct tb_nvm_vendor_ops asmedia_switch_nvm_ops = {
185 .read_version = asmedia_switch_nvm_version,
188 /* Router vendor NVM support table */
189 static const struct tb_nvm_vendor switch_nvm_vendors[] = {
190 { 0x174c, &asmedia_switch_nvm_ops },
191 { PCI_VENDOR_ID_INTEL, &intel_switch_nvm_ops },
192 { 0x8087, &intel_switch_nvm_ops },
195 static int intel_retimer_nvm_version(struct tb_nvm *nvm)
197 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
201 ret = tb_retimer_nvm_read(rt, INTEL_NVM_VERSION, &val, sizeof(val));
205 nvm->major = (val >> 16) & 0xff;
206 nvm->minor = (val >> 8) & 0xff;
208 ret = tb_retimer_nvm_read(rt, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
212 nvm_size = (SZ_1M << (val & 7)) / 8;
213 nvm_size = (nvm_size - SZ_16K) / 2;
214 nvm->active_size = nvm_size;
219 static int intel_retimer_nvm_validate(struct tb_nvm *nvm)
221 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
222 unsigned int image_size, hdr_size;
226 image_size = nvm->buf_data_size;
229 * FARB pointer must point inside the image and must at least
230 * contain parts of the digital section we will be reading here.
232 hdr_size = (*(u32 *)buf) & 0xffffff;
233 if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
236 /* Digital section start should be aligned to 4k page */
237 if (!IS_ALIGNED(hdr_size, SZ_4K))
241 * Read digital section size and check that it also fits inside
244 ds_size = *(u16 *)(buf + hdr_size);
245 if (ds_size >= image_size)
249 * Make sure the device ID in the image matches the retimer
252 device = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
253 if (device != rt->device)
256 /* Skip headers in the image */
257 nvm->buf_data_start = buf + hdr_size;
258 nvm->buf_data_size = image_size - hdr_size;
263 static const struct tb_nvm_vendor_ops intel_retimer_nvm_ops = {
264 .read_version = intel_retimer_nvm_version,
265 .validate = intel_retimer_nvm_validate,
268 /* Retimer vendor NVM support table */
269 static const struct tb_nvm_vendor retimer_nvm_vendors[] = {
270 { 0x8087, &intel_retimer_nvm_ops },
274 * tb_nvm_alloc() - Allocate new NVM structure
275 * @dev: Device owning the NVM
277 * Allocates new NVM structure with unique @id and returns it. In case
278 * of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the
279 * NVM format of the @dev is not known by the kernel.
281 struct tb_nvm *tb_nvm_alloc(struct device *dev)
283 const struct tb_nvm_vendor_ops *vops = NULL;
287 if (tb_is_switch(dev)) {
288 const struct tb_switch *sw = tb_to_switch(dev);
290 for (i = 0; i < ARRAY_SIZE(switch_nvm_vendors); i++) {
291 const struct tb_nvm_vendor *v = &switch_nvm_vendors[i];
293 if (v->vendor == sw->config.vendor_id) {
300 tb_sw_dbg(sw, "router NVM format of vendor %#x unknown\n",
301 sw->config.vendor_id);
302 return ERR_PTR(-EOPNOTSUPP);
304 } else if (tb_is_retimer(dev)) {
305 const struct tb_retimer *rt = tb_to_retimer(dev);
307 for (i = 0; i < ARRAY_SIZE(retimer_nvm_vendors); i++) {
308 const struct tb_nvm_vendor *v = &retimer_nvm_vendors[i];
310 if (v->vendor == rt->vendor) {
317 dev_dbg(dev, "retimer NVM format of vendor %#x unknown\n",
319 return ERR_PTR(-EOPNOTSUPP);
322 return ERR_PTR(-EOPNOTSUPP);
325 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
327 return ERR_PTR(-ENOMEM);
329 ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
343 * tb_nvm_read_version() - Read and populate NVM version
344 * @nvm: NVM structure
346 * Uses vendor specific means to read out and fill in the existing
347 * active NVM version. Returns %0 in case of success and negative errno
350 int tb_nvm_read_version(struct tb_nvm *nvm)
352 const struct tb_nvm_vendor_ops *vops = nvm->vops;
354 if (vops && vops->read_version)
355 return vops->read_version(nvm);
361 * tb_nvm_validate() - Validate new NVM image
362 * @nvm: NVM structure
364 * Runs vendor specific validation over the new NVM image and if all
365 * checks pass returns %0. As side effect updates @nvm->buf_data_start
366 * and @nvm->buf_data_size fields to match the actual data to be written
369 * If the validation does not pass then returns negative errno.
371 int tb_nvm_validate(struct tb_nvm *nvm)
373 const struct tb_nvm_vendor_ops *vops = nvm->vops;
374 unsigned int image_size;
382 /* Just do basic image size checks */
383 image_size = nvm->buf_data_size;
384 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
388 * Set the default data start in the buffer. The validate method
389 * below can change this if needed.
391 nvm->buf_data_start = buf;
393 return vops->validate ? vops->validate(nvm) : 0;
397 * tb_nvm_write_headers() - Write headers before the rest of the image
398 * @nvm: NVM structure
400 * If the vendor NVM format requires writing headers before the rest of
401 * the image, this function does that. Can be called even if the device
402 * does not need this.
404 * Returns %0 in case of success and negative errno otherwise.
406 int tb_nvm_write_headers(struct tb_nvm *nvm)
408 const struct tb_nvm_vendor_ops *vops = nvm->vops;
410 return vops->write_headers ? vops->write_headers(nvm) : 0;
414 * tb_nvm_add_active() - Adds active NVMem device to NVM
415 * @nvm: NVM structure
416 * @reg_read: Pointer to the function to read the NVM (passed directly to the
419 * Registers new active NVmem device for @nvm. The @reg_read is called
420 * directly from NVMem so it must handle possible concurrent access if
421 * needed. The first parameter passed to @reg_read is @nvm structure.
422 * Returns %0 in success and negative errno otherwise.
424 int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read)
426 struct nvmem_config config;
427 struct nvmem_device *nvmem;
429 memset(&config, 0, sizeof(config));
431 config.name = "nvm_active";
432 config.reg_read = reg_read;
433 config.read_only = true;
436 config.word_size = 4;
437 config.size = nvm->active_size;
438 config.dev = nvm->dev;
439 config.owner = THIS_MODULE;
442 nvmem = nvmem_register(&config);
444 return PTR_ERR(nvmem);
451 * tb_nvm_write_buf() - Write data to @nvm buffer
452 * @nvm: NVM structure
453 * @offset: Offset where to write the data
454 * @val: Data buffer to write
455 * @bytes: Number of bytes to write
457 * Helper function to cache the new NVM image before it is actually
458 * written to the flash. Copies @bytes from @val to @nvm->buf starting
461 int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
465 nvm->buf = vmalloc(NVM_MAX_SIZE);
470 nvm->flushed = false;
471 nvm->buf_data_size = offset + bytes;
472 memcpy(nvm->buf + offset, val, bytes);
477 * tb_nvm_add_non_active() - Adds non-active NVMem device to NVM
478 * @nvm: NVM structure
479 * @reg_write: Pointer to the function to write the NVM (passed directly
480 * to the NVMem device)
482 * Registers new non-active NVmem device for @nvm. The @reg_write is called
483 * directly from NVMem so it must handle possible concurrent access if
484 * needed. The first parameter passed to @reg_write is @nvm structure.
485 * The size of the NVMem device is set to %NVM_MAX_SIZE.
487 * Returns %0 in success and negative errno otherwise.
489 int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write)
491 struct nvmem_config config;
492 struct nvmem_device *nvmem;
494 memset(&config, 0, sizeof(config));
496 config.name = "nvm_non_active";
497 config.reg_write = reg_write;
498 config.root_only = true;
501 config.word_size = 4;
502 config.size = NVM_MAX_SIZE;
503 config.dev = nvm->dev;
504 config.owner = THIS_MODULE;
507 nvmem = nvmem_register(&config);
509 return PTR_ERR(nvmem);
511 nvm->non_active = nvmem;
516 * tb_nvm_free() - Release NVM and its resources
517 * @nvm: NVM structure to release
519 * Releases NVM and the NVMem devices if they were registered.
521 void tb_nvm_free(struct tb_nvm *nvm)
524 nvmem_unregister(nvm->non_active);
525 nvmem_unregister(nvm->active);
527 ida_simple_remove(&nvm_ida, nvm->id);
533 * tb_nvm_read_data() - Read data from NVM
534 * @address: Start address on the flash
535 * @buf: Buffer where the read data is copied
536 * @size: Size of the buffer in bytes
537 * @retries: Number of retries if block read fails
538 * @read_block: Function that reads block from the flash
539 * @read_block_data: Data passsed to @read_block
541 * This is a generic function that reads data from NVM or NVM like
544 * Returns %0 on success and negative errno otherwise.
546 int tb_nvm_read_data(unsigned int address, void *buf, size_t size,
547 unsigned int retries, read_block_fn read_block,
548 void *read_block_data)
551 unsigned int dwaddress, dwords, offset;
552 u8 data[NVM_DATA_DWORDS * 4];
556 offset = address & 3;
557 nbytes = min_t(size_t, size + offset, NVM_DATA_DWORDS * 4);
559 dwaddress = address / 4;
560 dwords = ALIGN(nbytes, 4) / 4;
562 ret = read_block(read_block_data, dwaddress, data, dwords);
564 if (ret != -ENODEV && retries--)
570 memcpy(buf, data + offset, nbytes);
581 * tb_nvm_write_data() - Write data to NVM
582 * @address: Start address on the flash
583 * @buf: Buffer where the data is copied from
584 * @size: Size of the buffer in bytes
585 * @retries: Number of retries if the block write fails
586 * @write_block: Function that writes block to the flash
587 * @write_block_data: Data passwd to @write_block
589 * This is generic function that writes data to NVM or NVM like device.
591 * Returns %0 on success and negative errno otherwise.
593 int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
594 unsigned int retries, write_block_fn write_block,
595 void *write_block_data)
598 unsigned int offset, dwaddress;
599 u8 data[NVM_DATA_DWORDS * 4];
603 offset = address & 3;
604 nbytes = min_t(u32, size + offset, NVM_DATA_DWORDS * 4);
606 memcpy(data + offset, buf, nbytes);
608 dwaddress = address / 4;
609 ret = write_block(write_block_data, dwaddress, data, nbytes / 4);
611 if (ret == -ETIMEDOUT) {
627 void tb_nvm_exit(void)
629 ida_destroy(&nvm_ida);