1 // SPDX-License-Identifier: GPL-2.0
3 * Intel MAX10 Board Management Controller Secure Update Driver
5 * Copyright (C) 2019-2022 Intel Corporation. All rights reserved.
8 #include <linux/bitfield.h>
9 #include <linux/device.h>
10 #include <linux/firmware.h>
11 #include <linux/mfd/intel-m10-bmc.h>
12 #include <linux/mod_devicetable.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
19 struct m10bmc_sec_ops {
20 int (*rsu_status)(struct m10bmc_sec *sec);
25 struct intel_m10bmc *m10bmc;
26 struct fw_upload *fwl;
30 const struct m10bmc_sec_ops *ops;
33 static DEFINE_XARRAY_ALLOC(fw_upload_xa);
35 /* Root Entry Hash (REH) support */
36 #define REH_SHA256_SIZE 32
37 #define REH_SHA384_SIZE 48
38 #define REH_MAGIC GENMASK(15, 0)
39 #define REH_SHA_NUM_BYTES GENMASK(31, 16)
41 static int m10bmc_sec_write(struct m10bmc_sec *sec, const u8 *buf, u32 offset, u32 size)
43 struct intel_m10bmc *m10bmc = sec->m10bmc;
44 unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
45 u32 write_count = size / stride;
46 u32 leftover_offset = write_count * stride;
47 u32 leftover_size = size - leftover_offset;
51 if (sec->m10bmc->flash_bulk_ops)
52 return sec->m10bmc->flash_bulk_ops->write(m10bmc, buf, offset, size);
54 if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
57 ret = regmap_bulk_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset,
58 buf + offset, write_count);
62 /* If size is not aligned to stride, handle the remainder bytes with regmap_write() */
64 memcpy(&leftover_tmp, buf + leftover_offset, leftover_size);
65 ret = regmap_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset + leftover_offset,
74 static int m10bmc_sec_read(struct m10bmc_sec *sec, u8 *buf, u32 addr, u32 size)
76 struct intel_m10bmc *m10bmc = sec->m10bmc;
77 unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
78 u32 read_count = size / stride;
79 u32 leftover_offset = read_count * stride;
80 u32 leftover_size = size - leftover_offset;
84 if (sec->m10bmc->flash_bulk_ops)
85 return sec->m10bmc->flash_bulk_ops->read(m10bmc, buf, addr, size);
87 if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
90 ret = regmap_bulk_read(m10bmc->regmap, addr, buf, read_count);
94 /* If size is not aligned to stride, handle the remainder bytes with regmap_read() */
96 ret = regmap_read(m10bmc->regmap, addr + leftover_offset, &leftover_tmp);
99 memcpy(buf + leftover_offset, &leftover_tmp, leftover_size);
107 show_root_entry_hash(struct device *dev, u32 exp_magic,
108 u32 prog_addr, u32 reh_addr, char *buf)
110 struct m10bmc_sec *sec = dev_get_drvdata(dev);
111 int sha_num_bytes, i, ret, cnt = 0;
112 u8 hash[REH_SHA384_SIZE];
115 ret = m10bmc_sec_read(sec, (u8 *)&magic, prog_addr, sizeof(magic));
119 if (FIELD_GET(REH_MAGIC, magic) != exp_magic)
120 return sysfs_emit(buf, "hash not programmed\n");
122 sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8;
123 if (sha_num_bytes != REH_SHA256_SIZE &&
124 sha_num_bytes != REH_SHA384_SIZE) {
125 dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__,
130 ret = m10bmc_sec_read(sec, hash, reh_addr, sha_num_bytes);
132 dev_err(dev, "failed to read root entry hash\n");
136 for (i = 0; i < sha_num_bytes; i++)
137 cnt += sprintf(buf + cnt, "%02x", hash[i]);
138 cnt += sprintf(buf + cnt, "\n");
143 #define DEVICE_ATTR_SEC_REH_RO(_name) \
144 static ssize_t _name##_root_entry_hash_show(struct device *dev, \
145 struct device_attribute *attr, \
148 struct m10bmc_sec *sec = dev_get_drvdata(dev); \
149 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
151 return show_root_entry_hash(dev, csr_map->_name##_magic, \
152 csr_map->_name##_prog_addr, \
153 csr_map->_name##_reh_addr, \
156 static DEVICE_ATTR_RO(_name##_root_entry_hash)
158 DEVICE_ATTR_SEC_REH_RO(bmc);
159 DEVICE_ATTR_SEC_REH_RO(sr);
160 DEVICE_ATTR_SEC_REH_RO(pr);
162 #define CSK_BIT_LEN 128U
163 #define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32)
166 show_canceled_csk(struct device *dev, u32 addr, char *buf)
168 unsigned int i, size = CSK_32ARRAY_SIZE * sizeof(u32);
169 struct m10bmc_sec *sec = dev_get_drvdata(dev);
170 DECLARE_BITMAP(csk_map, CSK_BIT_LEN);
171 __le32 csk_le32[CSK_32ARRAY_SIZE];
172 u32 csk32[CSK_32ARRAY_SIZE];
175 ret = m10bmc_sec_read(sec, (u8 *)&csk_le32, addr, size);
177 dev_err(sec->dev, "failed to read CSK vector\n");
181 for (i = 0; i < CSK_32ARRAY_SIZE; i++)
182 csk32[i] = le32_to_cpu(((csk_le32[i])));
184 bitmap_from_arr32(csk_map, csk32, CSK_BIT_LEN);
185 bitmap_complement(csk_map, csk_map, CSK_BIT_LEN);
186 return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN);
189 #define DEVICE_ATTR_SEC_CSK_RO(_name) \
190 static ssize_t _name##_canceled_csks_show(struct device *dev, \
191 struct device_attribute *attr, \
194 struct m10bmc_sec *sec = dev_get_drvdata(dev); \
195 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
197 return show_canceled_csk(dev, \
198 csr_map->_name##_prog_addr + CSK_VEC_OFFSET, \
201 static DEVICE_ATTR_RO(_name##_canceled_csks)
203 #define CSK_VEC_OFFSET 0x34
205 DEVICE_ATTR_SEC_CSK_RO(bmc);
206 DEVICE_ATTR_SEC_CSK_RO(sr);
207 DEVICE_ATTR_SEC_CSK_RO(pr);
209 #define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */
211 static ssize_t flash_count_show(struct device *dev,
212 struct device_attribute *attr, char *buf)
214 struct m10bmc_sec *sec = dev_get_drvdata(dev);
215 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
216 unsigned int num_bits;
220 num_bits = FLASH_COUNT_SIZE * 8;
222 flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
226 ret = m10bmc_sec_read(sec, flash_buf, csr_map->rsu_update_counter,
229 dev_err(sec->dev, "failed to read flash count\n");
232 cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits);
237 return ret ? : sysfs_emit(buf, "%u\n", cnt);
239 static DEVICE_ATTR_RO(flash_count);
241 static struct attribute *m10bmc_security_attrs[] = {
242 &dev_attr_flash_count.attr,
243 &dev_attr_bmc_root_entry_hash.attr,
244 &dev_attr_sr_root_entry_hash.attr,
245 &dev_attr_pr_root_entry_hash.attr,
246 &dev_attr_sr_canceled_csks.attr,
247 &dev_attr_pr_canceled_csks.attr,
248 &dev_attr_bmc_canceled_csks.attr,
252 static struct attribute_group m10bmc_security_attr_group = {
254 .attrs = m10bmc_security_attrs,
257 static const struct attribute_group *m10bmc_sec_attr_groups[] = {
258 &m10bmc_security_attr_group,
262 static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell)
264 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
267 dev_err(sec->dev, "Doorbell: 0x%08x\n", doorbell);
269 if (!m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result))
270 dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result);
273 static int m10bmc_sec_n3000_rsu_status(struct m10bmc_sec *sec)
275 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
279 ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
283 return FIELD_GET(DRBL_RSU_STATUS, doorbell);
286 static int m10bmc_sec_n6000_rsu_status(struct m10bmc_sec *sec)
288 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
292 ret = m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result);
296 return FIELD_GET(AUTH_RESULT_RSU_STATUS, auth_result);
299 static bool rsu_status_ok(u32 status)
301 return (status == RSU_STAT_NORMAL ||
302 status == RSU_STAT_NIOS_OK ||
303 status == RSU_STAT_USER_OK ||
304 status == RSU_STAT_FACTORY_OK);
307 static bool rsu_progress_done(u32 progress)
309 return (progress == RSU_PROG_IDLE ||
310 progress == RSU_PROG_RSU_DONE);
313 static bool rsu_progress_busy(u32 progress)
315 return (progress == RSU_PROG_AUTHENTICATING ||
316 progress == RSU_PROG_COPYING ||
317 progress == RSU_PROG_UPDATE_CANCEL ||
318 progress == RSU_PROG_PROGRAM_KEY_HASH);
321 static int m10bmc_sec_progress_status(struct m10bmc_sec *sec, u32 *doorbell_reg,
322 u32 *progress, u32 *status)
324 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
327 ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, doorbell_reg);
331 ret = sec->ops->rsu_status(sec);
336 *progress = rsu_prog(*doorbell_reg);
341 static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
343 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
347 ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
349 return FW_UPLOAD_ERR_RW_ERROR;
351 if (!rsu_progress_done(rsu_prog(doorbell))) {
352 log_error_regs(sec, doorbell);
353 return FW_UPLOAD_ERR_BUSY;
356 return FW_UPLOAD_ERR_NONE;
359 static inline bool rsu_start_done(u32 doorbell_reg, u32 progress, u32 status)
361 if (doorbell_reg & DRBL_RSU_REQUEST)
364 if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT)
367 if (!rsu_progress_done(progress))
373 static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
375 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
376 u32 doorbell_reg, progress, status;
379 ret = regmap_update_bits(sec->m10bmc->regmap,
380 csr_map->base + csr_map->doorbell,
381 DRBL_RSU_REQUEST | DRBL_HOST_STATUS,
383 FIELD_PREP(DRBL_HOST_STATUS,
386 return FW_UPLOAD_ERR_RW_ERROR;
388 ret = read_poll_timeout(m10bmc_sec_progress_status, err,
389 err < 0 || rsu_start_done(doorbell_reg, progress, status),
390 NIOS_HANDSHAKE_INTERVAL_US,
391 NIOS_HANDSHAKE_TIMEOUT_US,
393 sec, &doorbell_reg, &progress, &status);
395 if (ret == -ETIMEDOUT) {
396 log_error_regs(sec, doorbell_reg);
397 return FW_UPLOAD_ERR_TIMEOUT;
399 return FW_UPLOAD_ERR_RW_ERROR;
402 if (status == RSU_STAT_WEAROUT) {
403 dev_warn(sec->dev, "Excessive flash update count detected\n");
404 return FW_UPLOAD_ERR_WEAROUT;
405 } else if (status == RSU_STAT_ERASE_FAIL) {
406 log_error_regs(sec, doorbell_reg);
407 return FW_UPLOAD_ERR_HW_ERROR;
410 return FW_UPLOAD_ERR_NONE;
413 static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
415 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
416 unsigned long poll_timeout;
417 u32 doorbell, progress;
420 ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
422 return FW_UPLOAD_ERR_RW_ERROR;
424 poll_timeout = jiffies + msecs_to_jiffies(RSU_PREP_TIMEOUT_MS);
425 while (rsu_prog(doorbell) == RSU_PROG_PREPARE) {
426 msleep(RSU_PREP_INTERVAL_MS);
427 if (time_after(jiffies, poll_timeout))
430 ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
432 return FW_UPLOAD_ERR_RW_ERROR;
435 progress = rsu_prog(doorbell);
436 if (progress == RSU_PROG_PREPARE) {
437 log_error_regs(sec, doorbell);
438 return FW_UPLOAD_ERR_TIMEOUT;
439 } else if (progress != RSU_PROG_READY) {
440 log_error_regs(sec, doorbell);
441 return FW_UPLOAD_ERR_HW_ERROR;
444 return FW_UPLOAD_ERR_NONE;
447 static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
449 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
450 u32 doorbell_reg, status;
453 ret = regmap_update_bits(sec->m10bmc->regmap,
454 csr_map->base + csr_map->doorbell,
456 FIELD_PREP(DRBL_HOST_STATUS,
457 HOST_STATUS_WRITE_DONE));
459 return FW_UPLOAD_ERR_RW_ERROR;
461 ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
462 csr_map->base + csr_map->doorbell,
464 rsu_prog(doorbell_reg) != RSU_PROG_READY,
465 NIOS_HANDSHAKE_INTERVAL_US,
466 NIOS_HANDSHAKE_TIMEOUT_US);
468 if (ret == -ETIMEDOUT) {
469 log_error_regs(sec, doorbell_reg);
470 return FW_UPLOAD_ERR_TIMEOUT;
472 return FW_UPLOAD_ERR_RW_ERROR;
475 ret = sec->ops->rsu_status(sec);
477 return FW_UPLOAD_ERR_HW_ERROR;
480 if (!rsu_status_ok(status)) {
481 log_error_regs(sec, doorbell_reg);
482 return FW_UPLOAD_ERR_HW_ERROR;
485 return FW_UPLOAD_ERR_NONE;
488 static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell_reg)
490 u32 progress, status;
492 if (m10bmc_sec_progress_status(sec, doorbell_reg, &progress, &status))
495 if (!rsu_status_ok(status))
498 if (rsu_progress_done(progress))
501 if (rsu_progress_busy(progress))
507 static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
509 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
513 ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
515 return FW_UPLOAD_ERR_RW_ERROR;
517 if (rsu_prog(doorbell) != RSU_PROG_READY)
518 return FW_UPLOAD_ERR_BUSY;
520 ret = regmap_update_bits(sec->m10bmc->regmap,
521 csr_map->base + csr_map->doorbell,
523 FIELD_PREP(DRBL_HOST_STATUS,
524 HOST_STATUS_ABORT_RSU));
526 return FW_UPLOAD_ERR_RW_ERROR;
528 return FW_UPLOAD_ERR_CANCELED;
531 static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl,
532 const u8 *data, u32 size)
534 struct m10bmc_sec *sec = fwl->dd_handle;
537 sec->cancel_request = false;
539 if (!size || size > M10BMC_STAGING_SIZE)
540 return FW_UPLOAD_ERR_INVALID_SIZE;
542 if (sec->m10bmc->flash_bulk_ops)
543 if (sec->m10bmc->flash_bulk_ops->lock_write(sec->m10bmc))
544 return FW_UPLOAD_ERR_BUSY;
546 ret = rsu_check_idle(sec);
547 if (ret != FW_UPLOAD_ERR_NONE)
550 ret = rsu_update_init(sec);
551 if (ret != FW_UPLOAD_ERR_NONE)
554 ret = rsu_prog_ready(sec);
555 if (ret != FW_UPLOAD_ERR_NONE)
558 if (sec->cancel_request) {
559 ret = rsu_cancel(sec);
563 return FW_UPLOAD_ERR_NONE;
566 if (sec->m10bmc->flash_bulk_ops)
567 sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
571 #define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */
573 static enum fw_upload_err m10bmc_sec_fw_write(struct fw_upload *fwl, const u8 *data,
574 u32 offset, u32 size, u32 *written)
576 struct m10bmc_sec *sec = fwl->dd_handle;
577 const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
578 struct intel_m10bmc *m10bmc = sec->m10bmc;
579 u32 blk_size, doorbell;
582 if (sec->cancel_request)
583 return rsu_cancel(sec);
585 ret = m10bmc_sys_read(m10bmc, csr_map->doorbell, &doorbell);
587 return FW_UPLOAD_ERR_RW_ERROR;
588 } else if (rsu_prog(doorbell) != RSU_PROG_READY) {
589 log_error_regs(sec, doorbell);
590 return FW_UPLOAD_ERR_HW_ERROR;
593 WARN_ON_ONCE(WRITE_BLOCK_SIZE % regmap_get_reg_stride(m10bmc->regmap));
594 blk_size = min_t(u32, WRITE_BLOCK_SIZE, size);
595 ret = m10bmc_sec_write(sec, data, offset, blk_size);
597 return FW_UPLOAD_ERR_RW_ERROR;
600 return FW_UPLOAD_ERR_NONE;
603 static enum fw_upload_err m10bmc_sec_poll_complete(struct fw_upload *fwl)
605 struct m10bmc_sec *sec = fwl->dd_handle;
606 unsigned long poll_timeout;
607 u32 doorbell, result;
610 if (sec->cancel_request)
611 return rsu_cancel(sec);
613 result = rsu_send_data(sec);
614 if (result != FW_UPLOAD_ERR_NONE)
617 poll_timeout = jiffies + msecs_to_jiffies(RSU_COMPLETE_TIMEOUT_MS);
619 msleep(RSU_COMPLETE_INTERVAL_MS);
620 ret = rsu_check_complete(sec, &doorbell);
621 } while (ret == -EAGAIN && !time_after(jiffies, poll_timeout));
623 if (ret == -EAGAIN) {
624 log_error_regs(sec, doorbell);
625 return FW_UPLOAD_ERR_TIMEOUT;
626 } else if (ret == -EIO) {
627 return FW_UPLOAD_ERR_RW_ERROR;
629 log_error_regs(sec, doorbell);
630 return FW_UPLOAD_ERR_HW_ERROR;
633 return FW_UPLOAD_ERR_NONE;
637 * m10bmc_sec_cancel() may be called asynchronously with an on-going update.
638 * All other functions are called sequentially in a single thread. To avoid
639 * contention on register accesses, m10bmc_sec_cancel() must only update
640 * the cancel_request flag. Other functions will check this flag and handle
641 * the cancel request synchronously.
643 static void m10bmc_sec_cancel(struct fw_upload *fwl)
645 struct m10bmc_sec *sec = fwl->dd_handle;
647 sec->cancel_request = true;
650 static void m10bmc_sec_cleanup(struct fw_upload *fwl)
652 struct m10bmc_sec *sec = fwl->dd_handle;
654 (void)rsu_cancel(sec);
656 if (sec->m10bmc->flash_bulk_ops)
657 sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
660 static const struct fw_upload_ops m10bmc_ops = {
661 .prepare = m10bmc_sec_prepare,
662 .write = m10bmc_sec_fw_write,
663 .poll_complete = m10bmc_sec_poll_complete,
664 .cancel = m10bmc_sec_cancel,
665 .cleanup = m10bmc_sec_cleanup,
668 static const struct m10bmc_sec_ops m10sec_n3000_ops = {
669 .rsu_status = m10bmc_sec_n3000_rsu_status,
672 static const struct m10bmc_sec_ops m10sec_n6000_ops = {
673 .rsu_status = m10bmc_sec_n6000_rsu_status,
676 #define SEC_UPDATE_LEN_MAX 32
677 static int m10bmc_sec_probe(struct platform_device *pdev)
679 char buf[SEC_UPDATE_LEN_MAX];
680 struct m10bmc_sec *sec;
681 struct fw_upload *fwl;
685 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
689 sec->dev = &pdev->dev;
690 sec->m10bmc = dev_get_drvdata(pdev->dev.parent);
691 sec->ops = (struct m10bmc_sec_ops *)platform_get_device_id(pdev)->driver_data;
692 dev_set_drvdata(&pdev->dev, sec);
694 ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec,
695 xa_limit_32b, GFP_KERNEL);
699 len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d",
701 sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL);
707 fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name,
710 dev_err(sec->dev, "Firmware Upload driver failed to start\n");
712 goto fw_uploader_fail;
721 xa_erase(&fw_upload_xa, sec->fw_name_id);
725 static int m10bmc_sec_remove(struct platform_device *pdev)
727 struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev);
729 firmware_upload_unregister(sec->fwl);
731 xa_erase(&fw_upload_xa, sec->fw_name_id);
736 static const struct platform_device_id intel_m10bmc_sec_ids[] = {
738 .name = "n3000bmc-sec-update",
739 .driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
742 .name = "d5005bmc-sec-update",
743 .driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
746 .name = "n6000bmc-sec-update",
747 .driver_data = (kernel_ulong_t)&m10sec_n6000_ops,
751 MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids);
753 static struct platform_driver intel_m10bmc_sec_driver = {
754 .probe = m10bmc_sec_probe,
755 .remove = m10bmc_sec_remove,
757 .name = "intel-m10bmc-sec-update",
758 .dev_groups = m10bmc_sec_attr_groups,
760 .id_table = intel_m10bmc_sec_ids,
762 module_platform_driver(intel_m10bmc_sec_driver);
764 MODULE_AUTHOR("Intel Corporation");
765 MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update");
766 MODULE_LICENSE("GPL");