1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/crypto.h>
8 #include <linux/moduleparam.h>
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/platform_device.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
15 #include <linux/clk.h>
16 #include <linux/of_address.h>
17 #include <linux/pm_runtime.h>
19 #include "cc_driver.h"
20 #include "cc_request_mgr.h"
21 #include "cc_buffer_mgr.h"
22 #include "cc_debugfs.h"
23 #include "cc_cipher.h"
26 #include "cc_sram_mgr.h"
31 module_param_named(dump_desc, cc_dump_desc, bool, 0600);
32 MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
34 module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
35 MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
37 static bool cc_sec_disable;
38 module_param_named(sec_disable, cc_sec_disable, bool, 0600);
39 MODULE_PARM_DESC(cc_sec_disable, "Disable security functions");
51 #define CC_HW_RESET_LOOP_COUNT 10
53 /* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
54 static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
55 CC_REG(PERIPHERAL_ID_0), CC_REG(PERIPHERAL_ID_1),
56 CC_REG(PERIPHERAL_ID_2), CC_REG(PERIPHERAL_ID_4)
59 static const u32 cidr_0123_offsets[CC_NUM_IDRS] = {
60 CC_REG(COMPONENT_ID_0), CC_REG(COMPONENT_ID_1),
61 CC_REG(COMPONENT_ID_2), CC_REG(COMPONENT_ID_3)
64 /* Hardware revisions defs. */
66 /* The 703 is a OSCCA only variant of the 713 */
67 static const struct cc_hw_data cc703_hw = {
68 .name = "703", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
69 .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_OSCCA
72 static const struct cc_hw_data cc713_hw = {
73 .name = "713", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
74 .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_ALL
77 static const struct cc_hw_data cc712_hw = {
78 .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U,
79 .std_bodies = CC_STD_ALL
82 static const struct cc_hw_data cc710_hw = {
83 .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U,
84 .std_bodies = CC_STD_ALL
87 static const struct cc_hw_data cc630p_hw = {
88 .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U,
89 .std_bodies = CC_STD_ALL
92 static const struct of_device_id arm_ccree_dev_of_match[] = {
93 { .compatible = "arm,cryptocell-703-ree", .data = &cc703_hw },
94 { .compatible = "arm,cryptocell-713-ree", .data = &cc713_hw },
95 { .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw },
96 { .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw },
97 { .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw },
100 MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
102 static void init_cc_cache_params(struct cc_drvdata *drvdata)
104 struct device *dev = drvdata_to_dev(drvdata);
105 u32 cache_params, ace_const, val;
108 /* compute CC_AXIM_CACHE_PARAMS */
109 cache_params = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
110 dev_dbg(dev, "Cache params previous: 0x%08X\n", cache_params);
112 /* non cached or write-back, write allocate */
113 val = drvdata->coherent ? 0xb : 0x2;
115 mask = CC_GENMASK(CC_AXIM_CACHE_PARAMS_AWCACHE);
116 cache_params &= ~mask;
117 cache_params |= FIELD_PREP(mask, val);
119 mask = CC_GENMASK(CC_AXIM_CACHE_PARAMS_AWCACHE_LAST);
120 cache_params &= ~mask;
121 cache_params |= FIELD_PREP(mask, val);
123 mask = CC_GENMASK(CC_AXIM_CACHE_PARAMS_ARCACHE);
124 cache_params &= ~mask;
125 cache_params |= FIELD_PREP(mask, val);
127 drvdata->cache_params = cache_params;
129 dev_dbg(dev, "Cache params current: 0x%08X\n", cache_params);
131 if (drvdata->hw_rev <= CC_HW_REV_710)
134 /* compute CC_AXIM_ACE_CONST */
135 ace_const = cc_ioread(drvdata, CC_REG(AXIM_ACE_CONST));
136 dev_dbg(dev, "ACE-const previous: 0x%08X\n", ace_const);
138 /* system or outer-sharable */
139 val = drvdata->coherent ? 0x2 : 0x3;
141 mask = CC_GENMASK(CC_AXIM_ACE_CONST_ARDOMAIN);
143 ace_const |= FIELD_PREP(mask, val);
145 mask = CC_GENMASK(CC_AXIM_ACE_CONST_AWDOMAIN);
147 ace_const |= FIELD_PREP(mask, val);
149 dev_dbg(dev, "ACE-const current: 0x%08X\n", ace_const);
151 drvdata->ace_const = ace_const;
154 static u32 cc_read_idr(struct cc_drvdata *drvdata, const u32 *idr_offsets)
158 u8 regs[CC_NUM_IDRS];
162 for (i = 0; i < CC_NUM_IDRS; ++i)
163 idr.regs[i] = cc_ioread(drvdata, idr_offsets[i]);
165 return le32_to_cpu(idr.val);
168 void __dump_byte_array(const char *name, const u8 *buf, size_t len)
175 snprintf(prefix, sizeof(prefix), "%s[%zu]: ", name, len);
177 print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, buf,
181 static irqreturn_t cc_isr(int irq, void *dev_id)
183 struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id;
184 struct device *dev = drvdata_to_dev(drvdata);
188 /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
189 /* if driver suspended return, probably shared interrupt */
190 if (pm_runtime_suspended(dev))
193 /* read the interrupt status */
194 irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
195 dev_dbg(dev, "Got IRR=0x%08X\n", irr);
197 if (irr == 0) /* Probably shared interrupt line */
200 imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
202 /* clear interrupt - must be before processing events */
203 cc_iowrite(drvdata, CC_REG(HOST_ICR), irr);
206 /* Completion interrupt - most probable */
207 if (irr & drvdata->comp_mask) {
208 /* Mask all completion interrupts - will be unmasked in
209 * deferred service handler
211 cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | drvdata->comp_mask);
212 irr &= ~drvdata->comp_mask;
213 complete_request(drvdata);
215 #ifdef CONFIG_CRYPTO_FIPS
216 /* TEE FIPS interrupt */
217 if (irr & CC_GPR0_IRQ_MASK) {
218 /* Mask interrupt - will be unmasked in Deferred service
221 cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_GPR0_IRQ_MASK);
222 irr &= ~CC_GPR0_IRQ_MASK;
223 fips_handler(drvdata);
226 /* AXI error interrupt */
227 if (irr & CC_AXI_ERR_IRQ_MASK) {
230 /* Read the AXI error ID */
231 axi_err = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
232 dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
235 irr &= ~CC_AXI_ERR_IRQ_MASK;
239 dev_dbg_ratelimited(dev, "IRR includes unknown cause bits (0x%08X)\n",
247 bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata)
252 /* 712/710/63 has no reset completion indication, always return true */
253 if (drvdata->hw_rev <= CC_HW_REV_712)
256 for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
257 /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
258 * completed and device is fully functional
260 val = cc_ioread(drvdata, CC_REG(NVM_IS_IDLE));
261 if (val & CC_NVM_IS_IDLE_MASK) {
262 /* hw indicate reset completed */
265 /* allow scheduling other process on the processor */
268 /* reset not completed */
272 int init_cc_regs(struct cc_drvdata *drvdata)
275 struct device *dev = drvdata_to_dev(drvdata);
277 /* Unmask all AXI interrupt sources AXI_CFG1 register */
278 /* AXI interrupt config are obsoleted startign at cc7x3 */
279 if (drvdata->hw_rev <= CC_HW_REV_712) {
280 val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
281 cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
282 dev_dbg(dev, "AXIM_CFG=0x%08X\n",
283 cc_ioread(drvdata, CC_REG(AXIM_CFG)));
286 /* Clear all pending interrupts */
287 val = cc_ioread(drvdata, CC_REG(HOST_IRR));
288 dev_dbg(dev, "IRR=0x%08X\n", val);
289 cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
291 /* Unmask relevant interrupt cause */
292 val = drvdata->comp_mask | CC_AXI_ERR_IRQ_MASK;
294 if (drvdata->hw_rev >= CC_HW_REV_712)
295 val |= CC_GPR0_IRQ_MASK;
297 cc_iowrite(drvdata, CC_REG(HOST_IMR), ~val);
299 cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), drvdata->cache_params);
300 if (drvdata->hw_rev >= CC_HW_REV_712)
301 cc_iowrite(drvdata, CC_REG(AXIM_ACE_CONST), drvdata->ace_const);
306 static int init_cc_resources(struct platform_device *plat_dev)
308 struct resource *req_mem_cc_regs = NULL;
309 struct cc_drvdata *new_drvdata;
310 struct device *dev = &plat_dev->dev;
311 struct device_node *np = dev->of_node;
312 u32 val, hw_rev_pidr, sig_cidr;
314 const struct cc_hw_data *hw_rev;
319 new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
323 hw_rev = of_device_get_match_data(dev);
324 new_drvdata->hw_rev_name = hw_rev->name;
325 new_drvdata->hw_rev = hw_rev->rev;
326 new_drvdata->std_bodies = hw_rev->std_bodies;
328 if (hw_rev->rev >= CC_HW_REV_712) {
329 new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
330 new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712);
331 new_drvdata->ver_offset = CC_REG(HOST_VERSION_712);
333 new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
334 new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630);
335 new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
338 new_drvdata->comp_mask = CC_COMP_IRQ_MASK;
340 platform_set_drvdata(plat_dev, new_drvdata);
341 new_drvdata->plat_dev = plat_dev;
343 clk = devm_clk_get_optional(dev, NULL);
345 return dev_err_probe(dev, PTR_ERR(clk), "Error getting clock\n");
346 new_drvdata->clk = clk;
348 new_drvdata->coherent = of_dma_is_coherent(np);
350 /* Get device resources */
351 /* First CC registers space */
352 /* Map registers space */
353 new_drvdata->cc_base = devm_platform_get_and_ioremap_resource(plat_dev,
354 0, &req_mem_cc_regs);
355 if (IS_ERR(new_drvdata->cc_base))
356 return PTR_ERR(new_drvdata->cc_base);
358 dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
360 dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
361 &req_mem_cc_regs->start, new_drvdata->cc_base);
364 irq = platform_get_irq(plat_dev, 0);
368 init_completion(&new_drvdata->hw_queue_avail);
371 dev->dma_mask = &dev->coherent_dma_mask;
373 dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
374 rc = dma_set_coherent_mask(dev, dma_mask);
376 dev_err(dev, "Failed in dma_set_coherent_mask, mask=%llx\n",
381 rc = clk_prepare_enable(new_drvdata->clk);
383 dev_err(dev, "Failed to enable clock");
387 new_drvdata->sec_disabled = cc_sec_disable;
389 pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
390 pm_runtime_use_autosuspend(dev);
391 pm_runtime_set_active(dev);
392 pm_runtime_enable(dev);
393 rc = pm_runtime_get_sync(dev);
395 dev_err(dev, "pm_runtime_get_sync() failed: %d\n", rc);
399 /* Wait for Cryptocell reset completion */
400 if (!cc_wait_for_reset_completion(new_drvdata)) {
401 dev_err(dev, "Cryptocell reset not completed");
404 if (hw_rev->rev <= CC_HW_REV_712) {
405 /* Verify correct mapping */
406 val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
407 if (val != hw_rev->sig) {
408 dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
414 hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
416 /* Verify correct mapping */
417 val = cc_read_idr(new_drvdata, pidr_0124_offsets);
418 if (val != hw_rev->pidr_0124) {
419 dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
420 val, hw_rev->pidr_0124);
426 val = cc_read_idr(new_drvdata, cidr_0123_offsets);
427 if (val != hw_rev->cidr_0123) {
428 dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
429 val, hw_rev->cidr_0123);
435 /* Check HW engine configuration */
436 val = cc_ioread(new_drvdata, CC_REG(HOST_REMOVE_INPUT_PINS));
442 if (new_drvdata->std_bodies & CC_STD_NIST) {
443 dev_warn(dev, "703 mode forced due to HW configuration.\n");
444 new_drvdata->std_bodies = CC_STD_OSCCA;
448 dev_err(dev, "Unsupported engines configuration.\n");
453 /* Check security disable state */
454 val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
455 val &= CC_SECURITY_DISABLED_MASK;
456 new_drvdata->sec_disabled |= !!val;
458 if (!new_drvdata->sec_disabled) {
459 new_drvdata->comp_mask |= CC_CPP_SM4_ABORT_MASK;
460 if (new_drvdata->std_bodies & CC_STD_NIST)
461 new_drvdata->comp_mask |= CC_CPP_AES_ABORT_MASK;
465 if (new_drvdata->sec_disabled)
466 dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
468 /* Display HW versions */
469 dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
470 hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
471 /* register the driver isr function */
472 rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "ccree",
475 dev_err(dev, "Could not register to interrupt %d\n", irq);
478 dev_dbg(dev, "Registered to IRQ: %d\n", irq);
480 init_cc_cache_params(new_drvdata);
482 rc = init_cc_regs(new_drvdata);
484 dev_err(dev, "init_cc_regs failed\n");
488 rc = cc_debugfs_init(new_drvdata);
490 dev_err(dev, "Failed registering debugfs interface\n");
494 rc = cc_fips_init(new_drvdata);
496 dev_err(dev, "cc_fips_init failed 0x%x\n", rc);
497 goto post_debugfs_err;
499 rc = cc_sram_mgr_init(new_drvdata);
501 dev_err(dev, "cc_sram_mgr_init failed\n");
502 goto post_fips_init_err;
505 new_drvdata->mlli_sram_addr =
506 cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
507 if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
509 goto post_fips_init_err;
512 rc = cc_req_mgr_init(new_drvdata);
514 dev_err(dev, "cc_req_mgr_init failed\n");
515 goto post_fips_init_err;
518 rc = cc_buffer_mgr_init(new_drvdata);
520 dev_err(dev, "cc_buffer_mgr_init failed\n");
521 goto post_req_mgr_err;
524 /* hash must be allocated first due to use of send_request_init()
525 * and dependency of AEAD on it
527 rc = cc_hash_alloc(new_drvdata);
529 dev_err(dev, "cc_hash_alloc failed\n");
530 goto post_buf_mgr_err;
533 /* Allocate crypto algs */
534 rc = cc_cipher_alloc(new_drvdata);
536 dev_err(dev, "cc_cipher_alloc failed\n");
540 rc = cc_aead_alloc(new_drvdata);
542 dev_err(dev, "cc_aead_alloc failed\n");
543 goto post_cipher_err;
546 /* If we got here and FIPS mode is enabled
547 * it means all FIPS test passed, so let TEE
550 cc_set_ree_fips_status(new_drvdata, true);
556 cc_cipher_free(new_drvdata);
558 cc_hash_free(new_drvdata);
560 cc_buffer_mgr_fini(new_drvdata);
562 cc_req_mgr_fini(new_drvdata);
564 cc_fips_fini(new_drvdata);
566 cc_debugfs_fini(new_drvdata);
568 fini_cc_regs(new_drvdata);
570 pm_runtime_put_noidle(dev);
571 pm_runtime_disable(dev);
572 pm_runtime_set_suspended(dev);
573 clk_disable_unprepare(new_drvdata->clk);
577 void fini_cc_regs(struct cc_drvdata *drvdata)
579 /* Mask all interrupts */
580 cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
583 static void cleanup_cc_resources(struct platform_device *plat_dev)
585 struct device *dev = &plat_dev->dev;
586 struct cc_drvdata *drvdata =
587 (struct cc_drvdata *)platform_get_drvdata(plat_dev);
589 cc_aead_free(drvdata);
590 cc_cipher_free(drvdata);
591 cc_hash_free(drvdata);
592 cc_buffer_mgr_fini(drvdata);
593 cc_req_mgr_fini(drvdata);
594 cc_fips_fini(drvdata);
595 cc_debugfs_fini(drvdata);
596 fini_cc_regs(drvdata);
597 pm_runtime_put_noidle(dev);
598 pm_runtime_disable(dev);
599 pm_runtime_set_suspended(dev);
600 clk_disable_unprepare(drvdata->clk);
603 unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata)
605 if (drvdata->hw_rev >= CC_HW_REV_712)
606 return HASH_LEN_SIZE_712;
608 return HASH_LEN_SIZE_630;
611 static int ccree_probe(struct platform_device *plat_dev)
614 struct device *dev = &plat_dev->dev;
616 /* Map registers space */
617 rc = init_cc_resources(plat_dev);
621 dev_info(dev, "ARM ccree device initialized\n");
626 static int ccree_remove(struct platform_device *plat_dev)
628 struct device *dev = &plat_dev->dev;
630 dev_dbg(dev, "Releasing ccree resources...\n");
632 cleanup_cc_resources(plat_dev);
634 dev_info(dev, "ARM ccree device terminated\n");
639 static struct platform_driver ccree_driver = {
642 .of_match_table = arm_ccree_dev_of_match,
647 .probe = ccree_probe,
648 .remove = ccree_remove,
651 static int __init ccree_init(void)
655 cc_debugfs_global_init();
657 rc = platform_driver_register(&ccree_driver);
659 cc_debugfs_global_fini();
665 module_init(ccree_init);
667 static void __exit ccree_exit(void)
669 platform_driver_unregister(&ccree_driver);
670 cc_debugfs_global_fini();
672 module_exit(ccree_exit);
674 /* Module description */
675 MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
676 MODULE_VERSION(DRV_MODULE_VERSION);
677 MODULE_AUTHOR("ARM");
678 MODULE_LICENSE("GPL v2");