};
* New DMA for GXL and beyond
+* Dma engine for crypto operations
+Required properties:
+- compatible : Should be "amlogic,aml_gxl_dma" or "amlogic,aml_txlx_dma".
+- reg: Should contain the base address of regs
+- interrupts: Should contain the IRQ line for DMA.
+
+Example:
+aml_dma {
+ compatible = "amlogic,aml_gxl_dma";
+ reg = <0x0 0xff63e000 0x0 0x48>;
+ interrupts = <0 180 1>;
+};
+
* Advanced Encryption Standard (AES)
Required properties:
- compatible : Should be "amlogic,aes".
- dev_name : Should be "aml_aes"
-- interrupts: Should contain the IRQ line for the AES.
-- resets: Should contain the clock to enable the module
-- reg: Should contain the base address of regs
Example:
aml_aes{
compatible = "amlogic,aes_dma";
dev_name = "aml_aes_dma";
- interrupts = <0 188 1
- 0 189 1>;
- reg = <0x0 0xc883e000 0x0 0x28>;
};
Required properties:
- compatible : Should be "amlogic,des,tdes".
- dev_name : Should be "aml_aes"
-- interrupts: Should contain the IRQ line for the TDES.
-- resets: Should contain the clock to enable the module
-- reg: Should contain the base address of regs
Example:
aml_tdes{
compatible = "amlogic,des_dma,tdes_dma";
dev_name = "aml_tdes_dma";
- interrupts = <0 188 1
- 0 189 1>;
- reg = <0x0 0xc883e000 0x0 0x28>;
};
* Secure Hash Algorithm (SHA1/SHA224/SHA256/HMAC)
Required properties:
- compatible : Should be "amlogic,sha".
- dev_name : Should be "aml_sha"
-- interrupts: Should contain the IRQ line for the SHA.
-- resets: Should contain the clock to enable the module
-- reg: Should contain the base address of regs
Example:
aml_sha{
compatible = "amlogic,sha_dma";
dev_name = "aml_sha_dma";
- interrupts = <0 188 1
- 0 189 1>;
- reg = <0x0 0xc883e000 0x0 0x28>;
};
AMLOGIC CRYPTO DMA BLKMV
M: Matthew Shyu <matthew.shyu@amlogic.com>
-F: drivers/amlogic/crypto/*
+F: drivers/amlogic/crypto/aml-aes-blkmv.c
+F: drivers/amlogic/crypto/aml-aes-dma.c
+F: drivers/amlogic/crypto/aml-crypto-blkmv.c
+F: drivers/amlogic/crypto/aml-crypto-blkmv.h
+F: drivers/amlogic/crypto/aml-crypto-dma.c
+F: drivers/amlogic/crypto/aml-crypto-dma.h
+F: drivers/amlogic/crypto/aml-dma.c
+F: drivers/amlogic/crypto/aml-sha-dma.c
+F: drivers/amlogic/crypto/aml-tdes-blkmv.c
+F: drivers/amlogic/crypto/aml-tdes-dma.c
AMLOGIC saradc
M: Xingyu Chen <xingyu.chen@amlogic.com>
pinctrl-0=<&i2c_slave_pin>;
};
- aml_aes {
- compatible = "amlogic,aes_dma";
- dev_name = "aml_aes_dma";
- status = "okay";
- interrupts = <0 180 1 0 181 1>;
+ aml_dma {
+ compatible = "amlogic,aml_txlx_dma";
reg = <0x0 0xff63e000 0x0 0x48>;
- };
+ interrupts = <0 180 1>;
- aml_sha {
- compatible = "amlogic,sha_dma";
- dev_name = "aml_sha_dma";
- status = "okay";
- interrupts = <0 180 1 0 181 1>;
- reg = <0x0 0xff63e000 0x0 0x48>;
+ aml_aes {
+ compatible = "amlogic,aes_dma";
+ dev_name = "aml_aes_dma";
+ status = "okay";
+ };
+
+ aml_sha {
+ compatible = "amlogic,sha_dma";
+ dev_name = "aml_sha_dma";
+ status = "okay";
+ };
};
saradc:saradc {
};
/{
- aml_aes {
- compatible = "amlogic,aes_dma";
- dev_name = "aml_aes_dma";
- status = "okay";
- interrupts = <0 188 1 0 189 1>;
+ aml_dma {
+ compatible = "amlogic,aml_gxl_dma";
reg = <0x0 0xc883e000 0x0 0x28>;
- };
+ interrupts = <0 180 1>;
- aml_tdes {
- compatible = "amlogic,des_dma,tdes_dma";
- dev_name = "aml_tdes_dma";
- status = "okay";
- interrupts = <0 188 1 0 189 1>;
- reg = <0x0 0xc883e000 0x0 0x28>;
- };
+ aml_aes {
+ compatible = "amlogic,aes_dma";
+ dev_name = "aml_aes_dma";
+ status = "okay";
+ };
- aml_sha {
- compatible = "amlogic,sha_dma";
- dev_name = "aml_sha_dma";
- status = "okay";
- interrupts = <0 188 1 0 189 1>;
- reg = <0x0 0xc883e000 0x0 0x28>;
+ aml_tdes {
+ compatible = "amlogic,des_dma,tdes_dma";
+ dev_name = "aml_tdes_dma";
+ status = "okay";
+ };
};
audio_data:audio_data {
};
/{
- aml_aes {
- compatible = "amlogic,aes_dma";
- dev_name = "aml_aes_dma";
- status = "okay";
- interrupts = <0 188 1 0 189 1>;
+ aml_dma {
+ compatible = "amlogic,aml_gxl_dma";
reg = <0x0 0xc883e000 0x0 0x28>;
- };
+ interrupts = <0 180 1>;
- aml_tdes {
- compatible = "amlogic,des_dma,tdes_dma";
- dev_name = "aml_tdes_dma";
- status = "okay";
- interrupts = <0 188 1 0 189 1>;
- reg = <0x0 0xc883e000 0x0 0x28>;
- };
+ aml_aes {
+ compatible = "amlogic,aes_dma";
+ dev_name = "aml_aes_dma";
+ status = "okay";
+ };
- aml_sha {
- compatible = "amlogic,sha_dma";
- dev_name = "aml_sha_dma";
- status = "okay";
- interrupts = <0 188 1 0 189 1>;
- reg = <0x0 0xc883e000 0x0 0x28>;
+ aml_tdes {
+ compatible = "amlogic,des_dma,tdes_dma";
+ dev_name = "aml_tdes_dma";
+ status = "okay";
+ };
};
audio_data:audio_data {
+obj-$(CONFIG_AMLOGIC_CRYPTO_DMA) += aml-cryp-dma.o
+aml-cryp-dma-objs:= aml-dma.o aml-crypto-dma.o
obj-$(CONFIG_AMLOGIC_CRYPTO_DMA) += aml-aes-dma.o
obj-$(CONFIG_AMLOGIC_CRYPTO_DMA) += aml-tdes-dma.o
-obj-$(CONFIG_AMLOGIC_CRYPTO_DMA) += aml-crypto-dma.o
obj-$(CONFIG_AMLOGIC_CRYPTO_DMA) += aml-sha-dma.o
-obj-$(CONFIG_AMLOGIC_CRYPTO_BLKMV) += aml-aes-blkmv.o
-obj-$(CONFIG_AMLOGIC_CRYPTO_BLKMV) += aml-tdes-blkmv.o
-obj-$(CONFIG_AMLOGIC_CRYPTO_BLKMV) += aml-crypto-blkmv.o
+obj-$(CONFIG_AMLOGIC_CRYPTO_BLKMV) += aml-aes-blkmv.o
+obj-$(CONFIG_AMLOGIC_CRYPTO_BLKMV) += aml-tdes-blkmv.o
+obj-$(CONFIG_AMLOGIC_CRYPTO_BLKMV) += aml-crypto-blkmv.o
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <linux/amlogic/iomap.h>
-#include <linux/amlogic/cpu_version.h>
#include "aml-crypto-dma.h"
/* AES flags */
#define AML_AES_QUEUE_LENGTH 50
#define AML_AES_DMA_THRESHOLD 16
-#define DMA_THREAD_REG (get_dma_t0_offset() + AES_THREAD_INDEX)
-#define DMA_STATUS_REG (get_dma_sts0_offset() + AES_THREAD_INDEX)
-
-u8 map_in_aes_dma;
struct aml_aes_dev;
struct aml_aes_ctx {
unsigned long flags;
int err;
- spinlock_t lock;
+ struct aml_dma_dev *dma;
+ uint32_t thread;
+ uint32_t status;
struct crypto_queue queue;
struct tasklet_struct done_task;
.lock = __SPIN_LOCK_UNLOCKED(aml_aes.lock),
};
-static void set_aes_key_iv(struct aml_aes_dev *dd, u32 *key,
+static int set_aes_key_iv(struct aml_aes_dev *dd, u32 *key,
uint32_t keylen, u32 *iv, uint8_t swap)
{
struct dma_dsc *dsc = dd->descriptor;
uint32_t key_iv[12];
uint32_t *piv = key_iv + 8;
int32_t len = keylen;
- dma_addr_t dma_addr_key;
+ dma_addr_t dma_addr_key = 0;
+ uint32_t i = 0;
memset(key_iv, 0, sizeof(key_iv));
memcpy(key_iv, key, keylen);
len = 48; /* full key storage */
}
+ if (!len)
+ return -EPERM;
+
dma_addr_key = dma_map_single(dd->dev, key_iv,
sizeof(key_iv), DMA_TO_DEVICE);
- if (cpu_after_eq(MESON_CPU_MAJOR_ID_AXG)) {
- uint32_t i = 0;
- while (len > 0) {
- dsc[i].src_addr = (uint32_t)dma_addr_key + i * 16;
- dsc[i].tgt_addr = i * 16;
- dsc[i].dsc_cfg.d32 = 0;
- dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
- dsc[i].dsc_cfg.b.mode = MODE_KEY;
- dsc[i].dsc_cfg.b.eoc = 0;
- dsc[i].dsc_cfg.b.owner = 1;
- i++;
- len -= 16;
- }
- dsc[i - 1].dsc_cfg.b.eoc = 1;
- } else {
- dsc->src_addr = (uint32_t)dma_addr_key;
- dsc->tgt_addr = 0;
- dsc->dsc_cfg.d32 = 0;
- dsc->dsc_cfg.b.length = len;
- dsc->dsc_cfg.b.mode = MODE_KEY;
- dsc->dsc_cfg.b.eoc = 1;
- dsc->dsc_cfg.b.owner = 1;
+ if (dma_mapping_error(dd->dev, dma_addr_key)) {
+ dev_err(dd->dev, "error mapping dma_addr_key\n");
+ return -EINVAL;
}
+ while (len > 0) {
+ dsc[i].src_addr = (uint32_t)dma_addr_key + i * 16;
+ dsc[i].tgt_addr = i * 16;
+ dsc[i].dsc_cfg.d32 = 0;
+ dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
+ dsc[i].dsc_cfg.b.mode = MODE_KEY;
+ dsc[i].dsc_cfg.b.eoc = 0;
+ dsc[i].dsc_cfg.b.owner = 1;
+ i++;
+ len -= 16;
+ }
+ dsc[i - 1].dsc_cfg.b.eoc = 1;
+
dma_sync_single_for_device(dd->dev, dd->dma_descript_tab,
PAGE_SIZE, DMA_TO_DEVICE);
- aml_write_crypto_reg(DMA_THREAD_REG,
+ aml_write_crypto_reg(dd->thread,
(uintptr_t) dd->dma_descript_tab | 2);
- aml_dma_debug(dsc, 1, __func__);
- while (aml_read_crypto_reg(DMA_STATUS_REG) == 0)
+ aml_dma_debug(dsc, i, __func__, dd->thread, dd->status);
+ while (aml_read_crypto_reg(dd->status) == 0)
;
- aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+ aml_write_crypto_reg(dd->status, 0xf);
dma_unmap_single(dd->dev, dma_addr_key,
sizeof(key_iv), DMA_TO_DEVICE);
+
+ return 0;
}
static size_t aml_aes_sg_copy(struct scatterlist **sg, size_t *offset,
struct ablkcipher_request *req = dd->req;
dd->flags &= ~AES_FLAGS_BUSY;
+ dd->dma->dma_busy = 0;
req->base.complete(&req->base, err);
}
dma_sync_single_for_device(dd->dev, dd->dma_descript_tab,
PAGE_SIZE, DMA_TO_DEVICE);
- aml_dma_debug(dsc, nents, __func__);
- aml_write_crypto_reg(DMA_THREAD_REG, dd->dma_descript_tab | 2);
+ aml_dma_debug(dsc, nents, __func__, dd->thread, dd->status);
+ aml_write_crypto_reg(dd->thread, dd->dma_descript_tab | 2);
return 0;
}
return err;
if (dd->flags & AES_FLAGS_CBC)
- set_aes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
+ err = set_aes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
dd->req->info, 0);
else if (dd->flags & AES_FLAGS_CTR)
- set_aes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
+ err = set_aes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
dd->req->info, 1);
else
- set_aes_key_iv(dd, dd->ctx->key, dd->ctx->keylen, NULL, 0);
+ err = set_aes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
+ NULL, 0);
return err;
}
unsigned long flags;
int32_t err, ret = 0;
- spin_lock_irqsave(&dd->lock, flags);
+ spin_lock_irqsave(&dd->dma->dma_lock, flags);
if (req)
ret = ablkcipher_enqueue_request(&dd->queue, req);
- if (dd->flags & AES_FLAGS_BUSY) {
- spin_unlock_irqrestore(&dd->lock, flags);
+ if (dd->flags & AES_FLAGS_BUSY || dd->dma->dma_busy) {
+ spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
return ret;
}
backlog = crypto_get_backlog(&dd->queue);
async_req = crypto_dequeue_request(&dd->queue);
- if (async_req)
+ if (async_req) {
dd->flags |= AES_FLAGS_BUSY;
- spin_unlock_irqrestore(&dd->lock, flags);
+ dd->dma->dma_busy = 1;
+ }
+ spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
if (!async_req)
return ret;
{
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-aml",
- .cra_priority = 300,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aml_aes_ctx),
{
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-aml",
- .cra_priority = 300,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aml_aes_ctx),
err = aml_aes_crypt_dma_stop(dd);
aml_dma_debug(dd->descriptor, dd->fast_nents ?
- dd->fast_nents : 1, __func__);
+ dd->fast_nents : 1, __func__, dd->thread, dd->status);
err = dd->err ? : err;
static irqreturn_t aml_aes_irq(int irq, void *dev_id)
{
struct aml_aes_dev *aes_dd = dev_id;
- uint8_t status = aml_read_crypto_reg(DMA_STATUS_REG);
+ uint8_t status = aml_read_crypto_reg(aes_dd->status);
if (status) {
if (status == 0x1)
pr_err("irq overwrite\n");
if (AES_FLAGS_DMA & aes_dd->flags) {
- aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+ aml_write_crypto_reg(aes_dd->status, 0xf);
tasklet_schedule(&aes_dd->done_task);
return IRQ_HANDLED;
} else {
{
struct aml_aes_dev *aes_dd;
struct device *dev = &pdev->dev;
- struct resource *res_irq = 0;
- struct resource *res_base = 0;
int err = -EPERM;
aes_dd = kzalloc(sizeof(struct aml_aes_dev), GFP_KERNEL);
}
aes_dd->dev = dev;
+ aes_dd->dma = dev_get_drvdata(dev->parent);
+ aes_dd->thread = aes_dd->dma->thread;
+ aes_dd->status = aes_dd->dma->status;
+ aes_dd->irq = aes_dd->dma->irq;
platform_set_drvdata(pdev, aes_dd);
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, AES_THREAD_INDEX);
- res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_base) {
- dev_err(dev, "error to get normal IORESOURCE_MEM.\n");
- goto aes_dd_err;
- } else {
- if (!cryptoreg_offset) {
- cryptoreg_offset = ioremap(res_base->start,
- resource_size(res_base));
- map_in_aes_dma = 1;
- }
- }
INIT_LIST_HEAD(&aes_dd->list);
(unsigned long)aes_dd);
crypto_init_queue(&aes_dd->queue, AML_AES_QUEUE_LENGTH);
-
- aes_dd->irq = res_irq->start;
-
err = request_irq(aes_dd->irq, aml_aes_irq, IRQF_SHARED, "aml-aes",
aes_dd);
if (err) {
free_irq(aes_dd->irq, aes_dd);
aes_irq_err:
- if (map_in_aes_dma) {
- iounmap(cryptoreg_offset);
- map_in_aes_dma = 0;
- }
-
tasklet_kill(&aes_dd->done_task);
tasklet_kill(&aes_dd->queue_task);
kfree(aes_dd);
tasklet_kill(&aes_dd->done_task);
tasklet_kill(&aes_dd->queue_task);
- if (map_in_aes_dma) {
- iounmap(cryptoreg_offset);
- map_in_aes_dma = 0;
- }
-
-
if (aes_dd->irq > 0)
free_irq(aes_dd->irq, aes_dd);
#include <linux/amlogic/iomap.h>
#include <linux/amlogic/cpu_version.h>
#include "aml-crypto-dma.h"
-#if 1
-void __iomem *cryptoreg_offset;
+
u32 swap_ulong32(u32 val)
{
u32 res = 0;
(((val >> 16) & 0xff) << 8) + ((val >> 24) & 0xff);
return res;
}
-void aml_write_crypto_reg(u32 addr, u32 data)
-{
- writel(data, cryptoreg_offset + (addr << 2));
-}
-
-u32 aml_read_crypto_reg(u32 addr)
-{
- return readl(cryptoreg_offset + (addr << 2));
-}
-#endif
+EXPORT_SYMBOL_GPL(swap_ulong32);
-u32 get_dma_t0_offset(void)
+void aml_write_crypto_reg(u32 addr, u32 data)
{
- if (cpu_after_eq(MESON_CPU_MAJOR_ID_TXLX))
- return TXLX_DMA_T0;
+ if (cryptoreg)
+ writel(data, cryptoreg + (addr << 2));
else
- return GXL_DMA_T0;
+ pr_err("crypto reg mapping is not initailized\n");
}
+EXPORT_SYMBOL_GPL(aml_write_crypto_reg);
-u32 get_dma_sts0_offset(void)
+u32 aml_read_crypto_reg(u32 addr)
{
- if (cpu_after_eq(MESON_CPU_MAJOR_ID_TXLX))
- return TXLX_DMA_STS0;
- else
- return GXL_DMA_STS0;
+ if (!cryptoreg) {
+ pr_err("crypto reg mapping is not initailized\n");
+ return 0;
+ }
+ return readl(cryptoreg + (addr << 2));
}
+EXPORT_SYMBOL_GPL(aml_read_crypto_reg);
-void aml_dma_debug(struct dma_dsc *dsc, u32 nents, const char *msg)
+void aml_dma_debug(struct dma_dsc *dsc, u32 nents, const char *msg,
+ u32 thread, u32 status)
{
-#if AML_CRYPTO_DEBUG
u32 i = 0;
- u32 DMA_T0 = get_dma_t0_offset();
- u32 DMA_STS0 = get_dma_sts0_offset();
- pr_err("begin %s\n", msg);
- for (i = 0; i < 1; i++)
- pr_err("reg(%lu) = 0x%8x\n", (uintptr_t)(DMA_T0 + i),
- aml_read_crypto_reg(DMA_T0 + i));
- for (i = 0; i < 1; i++)
- pr_err("reg(%lu) = 0x%8x\n", (uintptr_t)(DMA_STS0 + i),
- aml_read_crypto_reg(DMA_STS0 + i));
+ pr_debug("begin %s\n", msg);
+ pr_debug("reg(%u) = 0x%8x\n", thread,
+ aml_read_crypto_reg(thread));
+ pr_debug("reg(%u) = 0x%8x\n", status,
+ aml_read_crypto_reg(status));
for (i = 0; i < nents; i++) {
- pr_err("desc (%4x) (len) = 0x%8x\n", i,
+ pr_debug("desc (%4x) (len) = 0x%8x\n", i,
dsc[i].dsc_cfg.b.length);
- pr_err("desc (%4x) (irq) = 0x%8x\n", i,
+ pr_debug("desc (%4x) (irq) = 0x%8x\n", i,
dsc[i].dsc_cfg.b.irq);
- pr_err("desc (%4x) (eoc) = 0x%8x\n", i,
+ pr_debug("desc (%4x) (eoc) = 0x%8x\n", i,
dsc[i].dsc_cfg.b.eoc);
- pr_err("desc (%4x) (lop) = 0x%8x\n", i,
+ pr_debug("desc (%4x) (lop) = 0x%8x\n", i,
dsc[i].dsc_cfg.b.loop);
- pr_err("desc (%4x) (mod) = 0x%8x\n", i,
+ pr_debug("desc (%4x) (mod) = 0x%8x\n", i,
dsc[i].dsc_cfg.b.mode);
- pr_err("desc (%4x) (beg) = 0x%8x\n", i,
+ pr_debug("desc (%4x) (beg) = 0x%8x\n", i,
dsc[i].dsc_cfg.b.begin);
- pr_err("desc (%4x) (end) = 0x%8x\n", i,
+ pr_debug("desc (%4x) (end) = 0x%8x\n", i,
dsc[i].dsc_cfg.b.end);
- pr_err("desc (%4x) (opm) = 0x%8x\n", i,
+ pr_debug("desc (%4x) (opm) = 0x%8x\n", i,
dsc[i].dsc_cfg.b.op_mode);
- pr_err("desc (%4x) (enc) = 0x%8x\n", i,
+ pr_debug("desc (%4x) (enc) = 0x%8x\n", i,
dsc[i].dsc_cfg.b.enc_sha_only);
- pr_err("desc (%4x) (blk) = 0x%8x\n", i,
+ pr_debug("desc (%4x) (blk) = 0x%8x\n", i,
dsc[i].dsc_cfg.b.block);
- pr_err("desc (%4x) (err) = 0x%8x\n", i,
+ pr_debug("desc (%4x) (err) = 0x%8x\n", i,
dsc[i].dsc_cfg.b.error);
- pr_err("desc (%4x) (own) = 0x%8x\n", i,
+ pr_debug("desc (%4x) (own) = 0x%8x\n", i,
dsc[i].dsc_cfg.b.owner);
- pr_err("desc (%4x) (src) = 0x%8x\n", i,
+ pr_debug("desc (%4x) (src) = 0x%8x\n", i,
dsc[i].src_addr);
- pr_err("desc (%4x) (tgt) = 0x%8x\n", i,
+ pr_debug("desc (%4x) (tgt) = 0x%8x\n", i,
dsc[i].tgt_addr);
}
- pr_err("end %s\n", msg);
-#endif
+ pr_debug("end %s\n", msg);
}
+EXPORT_SYMBOL_GPL(aml_dma_debug);
#define _AML_CRYPTO_H_
#include <linux/io.h>
-#define AML_CRYPTO_DEBUG 0
-
/* Reserved 4096 bytes and table is 12 bytes each */
#define MAX_NUM_TABLES 341
#define MODE_TDES_2K 0xe
#define MODE_TDES_3K 0xf
-/* Thread 2, 3 are for secure threads */
-#define AES_THREAD_INDEX 0
-#define TDES_THREAD_INDEX 0
-#define SHA_THREAD_INDEX 0
-#define HMAC_THREAD_INDEX 0
-
struct dma_dsc {
union {
uint32_t d32;
uint32_t tgt_addr;
};
-extern void __iomem *cryptoreg_offset;
-extern u32 secure_cryptoreg_offset;
+struct aml_dma_dev {
+ spinlock_t dma_lock;
+ uint32_t thread;
+ uint32_t status;
+ int irq;
+ uint8_t dma_busy;
+};
u32 swap_ulong32(u32 val);
void aml_write_crypto_reg(u32 addr, u32 data);
u32 aml_read_crypto_reg(u32 addr);
-void aml_dma_debug(struct dma_dsc *dsc, u32 nents, const char *msg);
+void aml_dma_debug(struct dma_dsc *dsc, u32 nents, const char *msg,
+ u32 thread, u32 status);
u32 get_dma_t0_offset(void);
u32 get_dma_sts0_offset(void);
+
+extern void __iomem *cryptoreg;
#endif
--- /dev/null
+/*
+ * drivers/amlogic/crypto/aml-dma.c
+ *
+ * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/of_platform.h>
+#include "aml-crypto-dma.h"
+
+void __iomem *cryptoreg;
+
+struct meson_dma_data {
+ uint32_t thread;
+ uint32_t status;
+};
+
+struct meson_dma_data meson_gxl_data = {
+ .thread = GXL_DMA_T0,
+ .status = GXL_DMA_STS0,
+};
+
+struct meson_dma_data meson_txlx_data = {
+ .thread = TXLX_DMA_T0,
+ .status = TXLX_DMA_STS0,
+};
+#ifdef CONFIG_OF
+static const struct of_device_id aml_dma_dt_match[] = {
+ { .compatible = "amlogic,aml_gxl_dma",
+ .data = &meson_gxl_data,
+ },
+ { .compatible = "amlogic,aml_txlx_dma",
+ .data = &meson_txlx_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, aml_dma_dt_match);
+#else
+#define aml_aes_dt_match NULL
+#endif
+
+static int aml_dma_probe(struct platform_device *pdev)
+{
+ struct aml_dma_dev *dma_dd;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res_base = 0;
+ struct resource *res_irq = 0;
+ const struct of_device_id *match;
+ int err = -EPERM;
+ const struct meson_dma_data *priv_data;
+
+ dma_dd = kzalloc(sizeof(struct aml_dma_dev), GFP_KERNEL);
+ if (dma_dd == NULL) {
+ err = -ENOMEM;
+ goto dma_err;
+ }
+
+ match = of_match_device(aml_dma_dt_match, &pdev->dev);
+ priv_data = match->data;
+ dma_dd->thread = priv_data->thread;
+ dma_dd->status = priv_data->status;
+ res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_base) {
+ dev_err(dev, "error to get normal IORESOURCE_MEM.\n");
+ goto dma_err;
+ } else {
+ cryptoreg = ioremap(res_base->start,
+ resource_size(res_base));
+ if (!cryptoreg) {
+ dev_err(dev, "failed to remap crypto reg\n");
+ goto dma_err;
+ }
+ }
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ dma_dd->irq = res_irq->start;
+ dma_dd->dma_busy = 0;
+ platform_set_drvdata(pdev, dma_dd);
+ dev_info(dev, "Aml dma\n");
+
+ err = of_platform_populate(np, NULL, NULL, dev);
+
+ if (err != 0)
+ iounmap(cryptoreg);
+
+ return err;
+
+dma_err:
+ dev_err(dev, "initialization failed.\n");
+
+ return err;
+}
+
+static int aml_dma_remove(struct platform_device *pdev)
+{
+ struct aml_dma_dev *dma_dd;
+
+ dma_dd = platform_get_drvdata(pdev);
+ if (!dma_dd)
+ return -ENODEV;
+
+ iounmap(cryptoreg);
+ kfree(dma_dd);
+
+ return 0;
+}
+
+static struct platform_driver aml_dma_driver = {
+ .probe = aml_dma_probe,
+ .remove = aml_dma_remove,
+ .driver = {
+ .name = "aml_dma",
+ .owner = THIS_MODULE,
+ .of_match_table = aml_dma_dt_match,
+ },
+};
+
+module_platform_driver(aml_dma_driver);
+
+MODULE_DESCRIPTION("Aml crypto DMA support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("matthew.shyu <matthew.shyu@amlogic.com>");
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <linux/amlogic/iomap.h>
-#include <linux/amlogic/cpu_version.h>
#include "aml-crypto-dma.h"
/* SHA flags */
#define SHA_BUFFER_LEN PAGE_SIZE
-#define DMA_THREAD_REG (get_dma_t0_offset() + SHA_THREAD_INDEX)
-#define DMA_STATUS_REG (get_dma_sts0_offset() + SHA_THREAD_INDEX)
-u8 map_in_sha_dma;
-
struct aml_sha_dev;
struct aml_sha_reqctx {
struct device *dev;
int irq;
- spinlock_t lock;
+ struct aml_dma_dev *dma;
+ uint32_t thread;
+ uint32_t status;
int err;
struct tasklet_struct done_task;
ctx->flags = 0;
-#if AML_CRYPTO_DEBUG
- dev_info(dd->dev, "init: digest size: %d\n",
- crypto_ahash_digestsize(tfm));
-#endif
+ pr_info("init: digest size: %d\n", crypto_ahash_digestsize(tfm));
switch (crypto_ahash_digestsize(tfm)) {
case SHA1_DIGEST_SIZE:
struct aml_sha_reqctx *ctx = ahash_request_ctx(dd->req);
size_t length = 0;
-#if AML_CRYPTO_DEBUG
- dev_info(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, nents: %u, final: %d\n",
+ pr_info("xmit_dma: digcnt: 0x%llx 0x%llx, nents: %u, final: %d\n",
ctx->digcnt[1], ctx->digcnt[0], nents, final);
-#endif
mode = MODE_SHA1;
}
ctx->dma_descript_tab = dma_map_single(dd->dev, ctx->descriptor,
PAGE_SIZE, DMA_FROM_DEVICE);
- aml_dma_debug(dsc, nents, __func__);
+ if (dma_mapping_error(dd->dev, ctx->dma_descript_tab)) {
+ dev_err(dd->dev, "mapping descriptor failed\n");
+ dma_unmap_single(dd->dev, ctx->hash_addr,
+ SHA256_DIGEST_SIZE, DMA_FROM_DEVICE);
+ return -EINVAL;
+ }
+ aml_dma_debug(dsc, nents, __func__, dd->thread, dd->status);
/* should be non-zero before next lines to disable clocks later */
for (i = 0; i < nents; i++) {
length = dsc->dsc_cfg.b.length;
dd->flags |= SHA_FLAGS_DMA_ACTIVE;
-#if AML_CRYPTO_DEBUG
- dev_info(dd->dev,
- "xmit before : digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
+ pr_info("xmit before : digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
ctx->digcnt[1], ctx->digcnt[0], length, final);
-#endif
+
/* Start DMA transfer */
- aml_write_crypto_reg(DMA_THREAD_REG,
+ aml_write_crypto_reg(dd->thread,
(uintptr_t) ctx->dma_descript_tab | 2);
return -EINPROGRESS;
final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
-#if AML_CRYPTO_DEBUG
- dev_info(dd->dev,
- "slow: bufcnt: %zd, digcnt: 0x%llx 0x%llx, final: %d, total: %u\n",
+ pr_info("slow: bufcnt: %zd, digcnt: 0x%llx 0x%llx, final: %d, total: %u\n",
ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final, ctx->total);
-#endif
if (IS_ALIGNED(ctx->bufcnt, ctx->block_size) || final) {
count = ctx->bufcnt;
struct scatterlist *sg;
struct dma_dsc *dsc = ctx->descriptor;
-#if AML_CRYPTO_DEBUG
- dev_info(dd->dev, "start: total: %u, fast_nents: %u offset: %u\n",
- ctx->total, ctx->fast_nents, ctx->offset);
-#endif
+ pr_info("start: total: %u, fast_nents: %u offset: %u\n",
+ ctx->total, ctx->fast_nents, ctx->offset);
if (!ctx->total)
return 0;
sg = ctx->sg;
while (ctx->total && ctx->fast_nents < MAX_NUM_TABLES && sg) {
-#if AML_CRYPTO_DEBUG
- dev_info(dd->dev,
- "fast: dig: 0x%llx 0x%llx, bufcnt: %zd, total: %u, sglen: %u\n",
+ pr_info("fast: dig: 0x%llx 0x%llx, bufcnt: %zd, total: %u, sglen: %u\n",
ctx->digcnt[1], ctx->digcnt[0],
ctx->bufcnt, ctx->total, ctx->sg->length);
-#endif
length = min(ctx->total, sg->length);
sg = sg_next(sg);
ctx->fast_nents++;
-#if AML_CRYPTO_DEBUG
- dev_info(dd->dev, "fast: total: %u, offset: %u, tail: %u\n",
+ pr_info("fast: total: %u, offset: %u, tail: %u\n",
ctx->total, ctx->offset, tail);
-#endif
if (tail)
break;
static int aml_sha_update_req(struct aml_sha_dev *dd)
{
int err;
-#if AML_CRYPTO_DEBUG
struct ahash_request *req = dd->req;
struct aml_sha_reqctx *ctx = ahash_request_ctx(req);
-#endif
-#if AML_CRYPTO_DEBUG
- dev_info(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
+ pr_info("update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
-#endif
err = aml_sha_update_dma_start(dd);
err = aml_sha_update_dma_slow(dd);
-#if AML_CRYPTO_DEBUG
- dev_info(dd->dev, "final_req: err: %d\n", err);
-#endif
+ pr_info("final_req: err: %d\n", err);
return err;
}
ctx->hash_addr = dma_map_single(dd->dev, ctx->digest,
SHA256_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dd->dev, ctx->hash_addr)) {
+ dev_err(dd->dev, "mapping hash addr failed: %d\n",
+ SHA256_DIGEST_SIZE);
+ return -EINVAL;
+ }
- key = kmalloc(tctx->keylen, GFP_KERNEL);
+ key = kmalloc(tctx->keylen, GFP_ATOMIC);
memcpy(key, tctx->key, tctx->keylen);
dma_key = dma_map_single(dd->dev, key,
tctx->keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dd->dev, dma_key)) {
+ dev_err(dd->dev, "mapping key addr failed: %d\n", tctx->keylen);
+ dma_unmap_single(dd->dev, ctx->hash_addr,
+ SHA256_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+ return -EINVAL;
+ }
mode = MODE_SHA1;
if (ctx->flags & SHA_FLAGS_SHA224)
ctx->dma_descript_tab = dma_map_single(dd->dev, ctx->descriptor,
PAGE_SIZE, DMA_TO_DEVICE);
- aml_dma_debug(dsc, 2, __func__);
- aml_write_crypto_reg(DMA_THREAD_REG,
+ if (dma_mapping_error(dd->dev, ctx->dma_descript_tab)) {
+ dev_err(dd->dev, "mapping descriptor failed\n");
+ dma_unmap_single(dd->dev, dma_key,
+ tctx->keylen, DMA_TO_DEVICE);
+ dma_unmap_single(dd->dev, ctx->hash_addr,
+ SHA256_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+ return -EINVAL;
+ }
+
+ aml_dma_debug(dsc, 2, __func__, dd->thread, dd->status);
+ aml_write_crypto_reg(dd->thread,
(uintptr_t) ctx->dma_descript_tab | 2);
- while (aml_read_crypto_reg(DMA_STATUS_REG) == 0)
+ while (aml_read_crypto_reg(dd->status) == 0)
;
- aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+ aml_write_crypto_reg(dd->status, 0xf);
dma_unmap_single(dd->dev, dma_key,
tctx->keylen, DMA_TO_DEVICE);
dma_unmap_single(dd->dev, ctx->hash_addr,
struct aml_sha_reqctx *ctx = ahash_request_ctx(req);
struct aml_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
int err = 0;
-#if AML_CRYPTO_DEBUG
- struct aml_sha_dev *dd = ctx->dd;
-#endif
if (ctx->digcnt[0] || ctx->digcnt[1]) {
if (tctx->flags & SHA_FLAGS_HMAC)
aml_sha_copy_ready_hash(req);
}
-#if AML_CRYPTO_DEBUG
- dev_info(dd->dev, "finish digcnt: 0x%llx 0x%llx, bufcnt: %zd\n",
+ pr_info("finish digcnt: 0x%llx 0x%llx, bufcnt: %zd\n",
ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt);
-#endif
return err;
}
}
/* atomic operation is not needed here */
+ if (dd->flags & SHA_FLAGS_BUSY) {
+ dd->flags &= ~SHA_FLAGS_BUSY;
+ dd->dma->dma_busy = 0;
+ }
dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL |
SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
struct aml_sha_dev *dd = tctx->dd;
dma_addr_t dma_ctx;
struct dma_dsc *dsc = ctx->descriptor;
- unsigned long flags;
+ uint32_t i = 0;
+ int32_t len = sizeof(tctx->state);
if (!ctx->digcnt[0] && !ctx->digcnt[1])
return;
- if (!cpu_after_eq(MESON_CPU_MAJOR_ID_TXLX))
- return;
-
- spin_lock_irqsave(&dd->lock, flags);
dma_ctx = dma_map_single(dd->dev, tctx->state,
sizeof(tctx->state), DMA_TO_DEVICE);
+ if (dma_mapping_error(dd->dev, dma_ctx)) {
+ dev_err(dd->dev, "mapping dma_ctx failed\n");
+ return;
+ }
- if (cpu_after_eq(MESON_CPU_MAJOR_ID_AXG)) {
- uint32_t i = 0;
- int32_t len = sizeof(tctx->state);
- while (len > 0) {
- dsc[i].src_addr = (uint32_t)dma_ctx + i * 16;
- dsc[i].tgt_addr = i * 16;
- dsc[i].dsc_cfg.d32 = 0;
- dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
- dsc[i].dsc_cfg.b.mode = MODE_KEY;
- dsc[i].dsc_cfg.b.eoc = 0;
- dsc[i].dsc_cfg.b.owner = 1;
- i++;
- len -= 16;
- }
- dsc[i - 1].dsc_cfg.b.eoc = 1;
- } else {
- dsc->src_addr = (uint32_t)dma_ctx;
- dsc->tgt_addr = 0;
- dsc->dsc_cfg.d32 = 0;
- dsc->dsc_cfg.b.length = sizeof(tctx->state);
- dsc->dsc_cfg.b.mode = MODE_KEY;
- dsc->dsc_cfg.b.eoc = 1;
- dsc->dsc_cfg.b.owner = 1;
+ while (len > 0) {
+ dsc[i].src_addr = (uint32_t)dma_ctx + i * 16;
+ dsc[i].tgt_addr = i * 16;
+ dsc[i].dsc_cfg.d32 = 0;
+ dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
+ dsc[i].dsc_cfg.b.mode = MODE_KEY;
+ dsc[i].dsc_cfg.b.eoc = 0;
+ dsc[i].dsc_cfg.b.owner = 1;
+ i++;
+ len -= 16;
}
+ dsc[i - 1].dsc_cfg.b.eoc = 1;
ctx->dma_descript_tab = dma_map_single(dd->dev, ctx->descriptor,
PAGE_SIZE, DMA_TO_DEVICE);
- aml_write_crypto_reg(DMA_THREAD_REG,
+ if (dma_mapping_error(dd->dev, dma_ctx)) {
+ dev_err(dd->dev, "mapping descript tab failed\n");
+ dma_unmap_single(dd->dev, dma_ctx,
+ sizeof(tctx->state), DMA_TO_DEVICE);
+ return;
+ }
+ aml_write_crypto_reg(dd->thread,
(uintptr_t) ctx->dma_descript_tab | 2);
- aml_dma_debug(dsc, 1, __func__);
- while (aml_read_crypto_reg(DMA_STATUS_REG) == 0)
+ aml_dma_debug(dsc, 1, __func__, dd->thread, dd->status);
+ while (aml_read_crypto_reg(dd->status) == 0)
;
- aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+ aml_write_crypto_reg(dd->status, 0xf);
dma_unmap_single(dd->dev, dma_ctx,
sizeof(tctx->state), DMA_TO_DEVICE);
dma_unmap_single(dd->dev, ctx->dma_descript_tab, PAGE_SIZE,
DMA_FROM_DEVICE);
- spin_unlock_irqrestore(&dd->lock, flags);
}
static int aml_sha_handle_queue(struct aml_sha_dev *dd,
unsigned long flags;
int err = 0, ret = 0;
- spin_lock_irqsave(&dd->lock, flags);
+ spin_lock_irqsave(&dd->dma->dma_lock, flags);
if (req)
ret = ahash_enqueue_request(&dd->queue, req);
- if (SHA_FLAGS_BUSY & dd->flags) {
- spin_unlock_irqrestore(&dd->lock, flags);
+ if (SHA_FLAGS_BUSY & dd->flags || dd->dma->dma_busy) {
+ spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
return ret;
}
backlog = crypto_get_backlog(&dd->queue);
async_req = crypto_dequeue_request(&dd->queue);
- if (async_req)
+ if (async_req) {
dd->flags |= SHA_FLAGS_BUSY;
+ dd->dma->dma_busy = 1;
+ }
- spin_unlock_irqrestore(&dd->lock, flags);
+ spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
if (!async_req)
return ret;
dd->req = req;
ctx = ahash_request_ctx(req);
-#if AML_CRYPTO_DEBUG
- dev_info(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+ pr_info("handling new req, op: %lu, nbytes: %d\n",
ctx->op, req->nbytes);
-#endif
err = aml_sha_hw_init(dd);
if (err)
static int aml_sha_import(struct ahash_request *req, const void *in)
{
- struct aml_sha_reqctx *ctx = ahash_request_ctx(req);
struct aml_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
const struct aml_sha_ctx *ictx = in;
- struct aml_sha_dev *dd = tctx->dd;
- dma_addr_t dma_ctx;
- struct dma_dsc *dsc = ctx->descriptor;
- unsigned long flags;
-
- if (!cpu_after_eq(MESON_CPU_MAJOR_ID_TXLX))
- return -ENOTSUPP;
- spin_lock_irqsave(&dd->lock, flags);
memcpy(tctx->state, ictx->state, sizeof(tctx->state));
- dma_ctx = dma_map_single(dd->dev, tctx->state,
- sizeof(tctx->state), DMA_TO_DEVICE);
-
- if (cpu_after_eq(MESON_CPU_MAJOR_ID_AXG)) {
- uint32_t i = 0;
- int32_t len = sizeof(tctx->state);
- while (len > 0) {
- dsc[i].src_addr = (uint32_t)dma_ctx + i * 16;
- dsc[i].tgt_addr = i * 16;
- dsc[i].dsc_cfg.d32 = 0;
- dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
- dsc[i].dsc_cfg.b.mode = MODE_KEY;
- dsc[i].dsc_cfg.b.eoc = 0;
- dsc[i].dsc_cfg.b.owner = 1;
- i++;
- len -= 16;
- }
- dsc[i - 1].dsc_cfg.b.eoc = 1;
- } else {
- dsc->src_addr = (uint32_t)dma_ctx;
- dsc->tgt_addr = 0;
- dsc->dsc_cfg.d32 = 0;
- dsc->dsc_cfg.b.length = sizeof(tctx->state);
- dsc->dsc_cfg.b.mode = MODE_KEY;
- dsc->dsc_cfg.b.eoc = 1;
- dsc->dsc_cfg.b.owner = 1;
- }
-
- ctx->dma_descript_tab = dma_map_single(dd->dev, ctx->descriptor,
- PAGE_SIZE, DMA_TO_DEVICE);
- aml_write_crypto_reg(DMA_THREAD_REG,
- (uintptr_t) ctx->dma_descript_tab | 2);
- aml_dma_debug(dsc, 1, __func__);
- while (aml_read_crypto_reg(DMA_STATUS_REG) == 0)
- ;
- aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
- dma_unmap_single(dd->dev, dma_ctx,
- sizeof(tctx->state), DMA_TO_DEVICE);
- dma_unmap_single(dd->dev, ctx->dma_descript_tab, PAGE_SIZE,
- DMA_FROM_DEVICE);
-
- spin_unlock_irqrestore(&dd->lock, flags);
return 0;
}
{
struct aml_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
struct aml_sha_ctx *octx = out;
- struct aml_sha_dev *dd = tctx->dd;
- unsigned long flags;
-
- if (!cpu_after_eq(MESON_CPU_MAJOR_ID_TXLX))
- return -ENOTSUPP;
- spin_lock_irqsave(&dd->lock, flags);
memcpy(octx->state, tctx->state, sizeof(tctx->state));
- spin_unlock_irqrestore(&dd->lock, flags);
return 0;
}
struct aml_sha_dev *dd = 0;
struct aml_sha_dev *tmp = 0;
struct dma_dsc *dsc = 0;
- struct aml_sha_reqctx *ctx = 0;
uint32_t bs = 0;
uint32_t ds = 0;
int err = 0;
dma_addr_t dma_key = 0;
+ dma_addr_t dma_descript_tab = 0;
uint8_t *key_raw = 0;
uint32_t mode = MODE_SHA1;
uint32_t map_len = 0;
} else {
dd = tctx->dd;
}
- ctx = ahash_request_ctx(dd->req);
- dsc = ctx->descriptor;
+
+ dsc = kmalloc(sizeof(struct dma_dsc) * 2, GFP_KERNEL);
+ if (!dsc)
+ return -ENOMEM;
spin_unlock_bh(&aml_sha.lock);
map_len = keylen > bs ? keylen : bs;
dma_key = dma_map_single(dd->dev, key_raw,
map_len, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dd->dev, dma_key)) {
+ dev_err(dd->dev, "mapping dma_key failed\n");
+ return -EINVAL;
+ }
+
if (keylen > bs) {
dsc[0].src_addr = (uintptr_t)dma_key;
dsc[0].tgt_addr = (uintptr_t)dma_key;
dsc[ipad].dsc_cfg.b.eoc = 1;
dsc[ipad].dsc_cfg.b.owner = 1;
- ctx->dma_descript_tab = dma_map_single(dd->dev, ctx->descriptor,
- PAGE_SIZE, DMA_TO_DEVICE);
- aml_dma_debug(dsc, ipad + 1, __func__);
- aml_write_crypto_reg(DMA_THREAD_REG,
- (uintptr_t) ctx->dma_descript_tab | 2);
- while (aml_read_crypto_reg(DMA_STATUS_REG) == 0)
+ dma_descript_tab = dma_map_single(dd->dev, dsc,
+ sizeof(struct dma_dsc) * 2, DMA_TO_DEVICE);
+ if (dma_mapping_error(dd->dev, dma_descript_tab)) {
+ dev_err(dd->dev, "mapping descript_tab failed\n");
+ dma_unmap_single(dd->dev, dma_key,
+ map_len, DMA_BIDIRECTIONAL);
+ return -EINVAL;
+ }
+
+ aml_dma_debug(dsc, ipad + 1, __func__, dd->thread, dd->status);
+ aml_write_crypto_reg(dd->thread,
+ (uintptr_t) dma_descript_tab | 2);
+ while (aml_read_crypto_reg(dd->status) == 0)
;
- aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+ aml_write_crypto_reg(dd->status, 0xf);
dma_unmap_single(dd->dev, dma_key,
map_len, DMA_BIDIRECTIONAL);
- dma_unmap_single(dd->dev, ctx->dma_descript_tab, PAGE_SIZE,
+ dma_unmap_single(dd->dev, dma_descript_tab, PAGE_SIZE,
DMA_FROM_DEVICE);
tctx->keylen = keylen;
memcpy(tctx->key, key_raw, keylen);
.base = {
.cra_name = "sha1",
.cra_driver_name = "aml-sha1",
- .cra_priority = 300,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aml_sha_ctx),
.base = {
.cra_name = "sha256",
.cra_driver_name = "aml-sha256",
- .cra_priority = 300,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aml_sha_ctx),
.base = {
.cra_name = "sha224",
.cra_driver_name = "aml-sha224",
- .cra_priority = 300,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aml_sha_ctx),
.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "aml-hmac-sha1",
- .cra_priority = 300,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aml_sha_ctx),
.base = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "aml-hmac-sha224",
- .cra_priority = 300,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aml_sha_ctx),
.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "aml-hmac-sha256",
- .cra_priority = 300,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aml_sha_ctx),
dma_unmap_single(dd->dev, ctx->dma_descript_tab, PAGE_SIZE,
DMA_FROM_DEVICE);
aml_dma_debug(ctx->descriptor, ctx->fast_nents ?
- ctx->fast_nents : 1, __func__);
+ ctx->fast_nents : 1, __func__, dd->thread, dd->status);
if (SHA_FLAGS_DMA_READY & dd->flags) {
if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
static irqreturn_t aml_sha_irq(int irq, void *dev_id)
{
struct aml_sha_dev *sha_dd = dev_id;
- uint8_t status = aml_read_crypto_reg(DMA_STATUS_REG);
+ uint8_t status = aml_read_crypto_reg(sha_dd->status);
if (status) {
if (status == 0x1)
if (SHA_FLAGS_DMA_ACTIVE & sha_dd->flags) {
sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
sha_dd->flags |= SHA_FLAGS_DMA_READY;
- aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+ aml_write_crypto_reg(sha_dd->status, 0xf);
tasklet_schedule(&sha_dd->done_task);
return IRQ_HANDLED;
} else {
{
struct aml_sha_dev *sha_dd;
struct device *dev = &pdev->dev;
- struct resource *res_irq = 0;
- struct resource *res_base = 0;
int err = -EPERM;
sha_dd = kzalloc(sizeof(struct aml_sha_dev), GFP_KERNEL);
}
sha_dd->dev = dev;
+ sha_dd->dma = dev_get_drvdata(dev->parent);
+ sha_dd->thread = sha_dd->dma->thread;
+ sha_dd->status = sha_dd->dma->status;
+ sha_dd->irq = sha_dd->dma->irq;
platform_set_drvdata(pdev, sha_dd);
-
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, SHA_THREAD_INDEX);
-
- res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_base) {
- dev_err(dev, "error to get normal IORESOURCE_MEM.\n");
- goto sha_dd_err;
- } else {
- if (cryptoreg_offset) {
- cryptoreg_offset = ioremap(res_base->start,
- resource_size(res_base));
- map_in_sha_dma = 1;
- }
- }
-
INIT_LIST_HEAD(&sha_dd->list);
tasklet_init(&sha_dd->done_task, aml_sha_done_task,
(unsigned long)sha_dd);
crypto_init_queue(&sha_dd->queue, AML_SHA_QUEUE_LENGTH);
-
- sha_dd->irq = res_irq->start;
err = request_irq(sha_dd->irq, aml_sha_irq, IRQF_SHARED, "aml-sha",
sha_dd);
if (err) {
list_del(&sha_dd->list);
spin_unlock(&aml_sha.lock);
- if (map_in_sha_dma) {
-
- iounmap(cryptoreg_offset);
- map_in_sha_dma = 0;
- }
-
free_irq(sha_dd->irq, sha_dd);
res_err:
tasklet_kill(&sha_dd->done_task);
tasklet_kill(&sha_dd->done_task);
- if (map_in_sha_dma) {
-
- iounmap(cryptoreg_offset);
- map_in_sha_dma = 0;
- }
-
if (sha_dd->irq >= 0)
free_irq(sha_dd->irq, sha_dd);
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <linux/amlogic/iomap.h>
-#include <linux/amlogic/cpu_version.h>
#include "aml-crypto-dma.h"
/* TDES flags */
#define AML_TDES_QUEUE_LENGTH 50
-#define DMA_THREAD_REG (get_dma_t0_offset() + TDES_THREAD_INDEX)
-#define DMA_STATUS_REG (get_dma_sts0_offset() + TDES_THREAD_INDEX)
-u8 map_in_tdes_dma;
-
struct aml_tdes_dev;
struct aml_tdes_ctx {
unsigned long flags;
int err;
- spinlock_t lock;
+ struct aml_dma_dev *dma;
+ uint32_t thread;
+ uint32_t status;
struct crypto_queue queue;
struct tasklet_struct done_task;
.lock = __SPIN_LOCK_UNLOCKED(aml_tdes.lock),
};
-static void set_tdes_key_iv(struct aml_tdes_dev *dd,
+static int set_tdes_key_iv(struct aml_tdes_dev *dd,
u32 *key, u32 keylen, u32 *iv)
{
struct dma_dsc *dsc = dd->descriptor;
uint32_t *piv = key_iv + 8;
uint32_t len = keylen;
dma_addr_t dma_addr_key;
+ uint32_t i = 0;
memset(key_iv, 0, sizeof(key_iv));
memcpy(key_iv, key, keylen);
len = 48; /* full key storage */
}
+ if (!len)
+ return -EPERM;
+
dma_addr_key = dma_map_single(dd->dev, key_iv,
sizeof(key_iv), DMA_TO_DEVICE);
- if (cpu_after_eq(MESON_CPU_MAJOR_ID_AXG)) {
- uint32_t i = 0;
- while (len > 0) {
- dsc[i].src_addr = (uint32_t)dma_addr_key + i * 16;
- dsc[i].tgt_addr = i * 16;
- dsc[i].dsc_cfg.d32 = 0;
- dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
- dsc[i].dsc_cfg.b.mode = MODE_KEY;
- dsc[i].dsc_cfg.b.eoc = 0;
- dsc[i].dsc_cfg.b.owner = 1;
- i++;
- len -= 16;
- }
- dsc[i - 1].dsc_cfg.b.eoc = 1;
- } else {
- dsc->src_addr = (uintptr_t)dma_addr_key;
- dsc->tgt_addr = 0;
- dsc->dsc_cfg.d32 = 0;
- dsc->dsc_cfg.b.length = len;
- dsc->dsc_cfg.b.mode = MODE_KEY;
- dsc->dsc_cfg.b.eoc = 1;
- dsc->dsc_cfg.b.owner = 1;
+ if (dma_mapping_error(dd->dev, dma_addr_key)) {
+ dev_err(dd->dev, "error mapping dma_addr_key\n");
+ return -EINVAL;
+ }
+
+ while (len > 0) {
+ dsc[i].src_addr = (uint32_t)dma_addr_key + i * 16;
+ dsc[i].tgt_addr = i * 16;
+ dsc[i].dsc_cfg.d32 = 0;
+ dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
+ dsc[i].dsc_cfg.b.mode = MODE_KEY;
+ dsc[i].dsc_cfg.b.eoc = 0;
+ dsc[i].dsc_cfg.b.owner = 1;
+ i++;
+ len -= 16;
}
+ dsc[i - 1].dsc_cfg.b.eoc = 1;
dma_sync_single_for_device(dd->dev, dd->dma_descript_tab,
PAGE_SIZE, DMA_TO_DEVICE);
- aml_write_crypto_reg(DMA_THREAD_REG,
+ aml_write_crypto_reg(dd->thread,
(uintptr_t) dd->dma_descript_tab | 2);
- aml_dma_debug(dsc, 1, __func__);
- while (aml_read_crypto_reg(DMA_STATUS_REG) == 0)
+ aml_dma_debug(dsc, i, __func__, dd->thread, dd->status);
+ while (aml_read_crypto_reg(dd->status) == 0)
;
- aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+ aml_write_crypto_reg(dd->status, 0xf);
dma_unmap_single(dd->dev, dma_addr_key,
sizeof(key_iv), DMA_TO_DEVICE);
+
+ return 0;
}
struct ablkcipher_request *req = dd->req;
dd->flags &= ~TDES_FLAGS_BUSY;
+ dd->dma->dma_busy = 0;
req->base.complete(&req->base, err);
}
dma_sync_single_for_device(dd->dev, dd->dma_descript_tab,
PAGE_SIZE, DMA_TO_DEVICE);
- aml_dma_debug(dsc, nents, __func__);
- aml_write_crypto_reg(DMA_THREAD_REG,
+ aml_dma_debug(dsc, nents, __func__, dd->thread, dd->status);
+ aml_write_crypto_reg(dd->thread,
(uintptr_t) dd->dma_descript_tab | 2);
return 0;
}
return err;
if (dd->flags & TDES_FLAGS_CBC)
- set_tdes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
+ err = set_tdes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
dd->req->info);
else
- set_tdes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
+ err = set_tdes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
NULL);
return err;
unsigned long flags;
int err, ret = 0;
- spin_lock_irqsave(&dd->lock, flags);
+ spin_lock_irqsave(&dd->dma->dma_lock, flags);
if (req)
ret = ablkcipher_enqueue_request(&dd->queue, req);
- if (dd->flags & TDES_FLAGS_BUSY) {
- spin_unlock_irqrestore(&dd->lock, flags);
+ if (dd->flags & TDES_FLAGS_BUSY || dd->dma->dma_busy) {
+ spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
return ret;
}
backlog = crypto_get_backlog(&dd->queue);
async_req = crypto_dequeue_request(&dd->queue);
- if (async_req)
+ if (async_req) {
dd->flags |= TDES_FLAGS_BUSY;
- spin_unlock_irqrestore(&dd->lock, flags);
+ dd->dma->dma_busy = 1;
+ }
+ spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
if (!async_req)
return ret;
{
}
-static struct crypto_alg tdes_algs[] = {
+static struct crypto_alg des_algs[] = {
{
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-aml",
{
.cra_name = "cbc(des)",
.cra_driver_name = "cbc-des-aml",
- .cra_priority = 300,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aml_tdes_ctx),
.decrypt = aml_tdes_cbc_decrypt,
}
},
+};
+
+static struct crypto_alg tdes_algs[] = {
{
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-tdes-aml",
- .cra_priority = 300,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aml_tdes_ctx),
{
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-tdes-aml",
- .cra_priority = 300,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aml_tdes_ctx),
err = aml_tdes_crypt_dma_stop(dd);
aml_dma_debug(dd->descriptor, dd->fast_nents ?
- dd->fast_nents : 1, __func__);
+ dd->fast_nents : 1, __func__, dd->thread, dd->status);
err = dd->err ? : err;
if (dd->total && !err) {
static irqreturn_t aml_tdes_irq(int irq, void *dev_id)
{
struct aml_tdes_dev *tdes_dd = dev_id;
- uint8_t status = aml_read_crypto_reg(DMA_STATUS_REG);
+ uint8_t status = aml_read_crypto_reg(tdes_dd->status);
if (status) {
if (status == 0x1)
pr_err("irq overwrite\n");
if (TDES_FLAGS_DMA & tdes_dd->flags) {
- aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+ aml_write_crypto_reg(tdes_dd->status, 0xf);
tasklet_schedule(&tdes_dd->done_task);
return IRQ_HANDLED;
} else {
{
int i = 0;
- /*
- * AXG and beyond does not support DES
- * and thus we start from 2
- */
- if (cpu_after_eq(MESON_CPU_MAJOR_ID_AXG))
- i = 2;
+ for (; i < ARRAY_SIZE(des_algs); i++)
+ crypto_unregister_alg(&des_algs[i]);
for (; i < ARRAY_SIZE(tdes_algs); i++)
crypto_unregister_alg(&tdes_algs[i]);
static int aml_tdes_register_algs(struct aml_tdes_dev *dd)
{
- int err = 0, i = 0, j = 0;
+ int err = 0, i = 0, j = 0, k = 0;
- /*
- * AXG and beyond does not support DES
- * and thus we start from 2
- */
- if (cpu_after_eq(MESON_CPU_MAJOR_ID_AXG))
- i = 2;
+ for (; i < ARRAY_SIZE(des_algs); i++) {
+ err = crypto_register_alg(&des_algs[i]);
+ if (err)
+ goto err_des_algs;
+ }
- for (; i < ARRAY_SIZE(tdes_algs); i++) {
- err = crypto_register_alg(&tdes_algs[i]);
+ for (; k < ARRAY_SIZE(tdes_algs); k++) {
+ err = crypto_register_alg(&tdes_algs[k]);
if (err)
goto err_tdes_algs;
}
return 0;
err_tdes_algs:
- for (j = 0; j < i; j++)
+ for (j = 0; j < k; j++)
crypto_unregister_alg(&tdes_algs[j]);
+err_des_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_alg(&des_algs[j]);
+
return err;
}
{
struct aml_tdes_dev *tdes_dd;
struct device *dev = &pdev->dev;
- struct resource *res_irq = 0;
- struct resource *res_base = 0;
int err = -EPERM;
tdes_dd = kzalloc(sizeof(struct aml_tdes_dev), GFP_KERNEL);
}
tdes_dd->dev = dev;
+ tdes_dd->dma = dev_get_drvdata(dev->parent);
+ tdes_dd->thread = tdes_dd->dma->thread;
+ tdes_dd->status = tdes_dd->dma->status;
+ tdes_dd->irq = tdes_dd->dma->irq;
platform_set_drvdata(pdev, tdes_dd);
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ,
- TDES_THREAD_INDEX);
-
- res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_base) {
- dev_err(dev, "error to get normal IORESOURCE_MEM.\n");
- goto tdes_dd_err;
- } else {
- if (!cryptoreg_offset) {
- cryptoreg_offset = ioremap(res_base->start,
- resource_size(res_base));
- map_in_tdes_dma = 1;
- }
- }
INIT_LIST_HEAD(&tdes_dd->list);
(unsigned long)tdes_dd);
crypto_init_queue(&tdes_dd->queue, AML_TDES_QUEUE_LENGTH);
-
- tdes_dd->irq = res_irq->start;
err = request_irq(tdes_dd->irq, aml_tdes_irq, IRQF_SHARED, "aml-tdes",
tdes_dd);
if (err) {
free_irq(tdes_dd->irq, tdes_dd);
tdes_irq_err:
- if (map_in_tdes_dma) {
- iounmap(cryptoreg_offset);
- map_in_tdes_dma = 0;
- }
-
tasklet_kill(&tdes_dd->done_task);
tasklet_kill(&tdes_dd->queue_task);
kfree(tdes_dd);
tasklet_kill(&tdes_dd->done_task);
tasklet_kill(&tdes_dd->queue_task);
- if (map_in_tdes_dma) {
- iounmap(cryptoreg_offset);
- map_in_tdes_dma = 0;
- }
-
if (tdes_dd->irq > 0)
free_irq(tdes_dd->irq, tdes_dd);