crypto: qat - use masks for AE groups
[platform/kernel/linux-rpi.git] / drivers / crypto / sa2ul.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * K3 SA2UL crypto accelerator driver
4  *
5  * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
6  *
7  * Authors:     Keerthy
8  *              Vitaly Andrianov
9  *              Tero Kristo
10  */
11 #include <linux/bitfield.h>
12 #include <linux/clk.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dmapool.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22
23 #include <crypto/aes.h>
24 #include <crypto/authenc.h>
25 #include <crypto/des.h>
26 #include <crypto/internal/aead.h>
27 #include <crypto/internal/hash.h>
28 #include <crypto/internal/skcipher.h>
29 #include <crypto/scatterwalk.h>
30 #include <crypto/sha1.h>
31 #include <crypto/sha2.h>
32
33 #include "sa2ul.h"
34
35 /* Byte offset for key in encryption security context */
36 #define SC_ENC_KEY_OFFSET (1 + 27 + 4)
37 /* Byte offset for Aux-1 in encryption security context */
38 #define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
39
40 #define SA_CMDL_UPD_ENC         0x0001
41 #define SA_CMDL_UPD_AUTH        0x0002
42 #define SA_CMDL_UPD_ENC_IV      0x0004
43 #define SA_CMDL_UPD_AUTH_IV     0x0008
44 #define SA_CMDL_UPD_AUX_KEY     0x0010
45
46 #define SA_AUTH_SUBKEY_LEN      16
47 #define SA_CMDL_PAYLOAD_LENGTH_MASK     0xFFFF
48 #define SA_CMDL_SOP_BYPASS_LEN_MASK     0xFF000000
49
50 #define MODE_CONTROL_BYTES      27
51 #define SA_HASH_PROCESSING      0
52 #define SA_CRYPTO_PROCESSING    0
53 #define SA_UPLOAD_HASH_TO_TLR   BIT(6)
54
55 #define SA_SW0_FLAGS_MASK       0xF0000
56 #define SA_SW0_CMDL_INFO_MASK   0x1F00000
57 #define SA_SW0_CMDL_PRESENT     BIT(4)
58 #define SA_SW0_ENG_ID_MASK      0x3E000000
59 #define SA_SW0_DEST_INFO_PRESENT        BIT(30)
60 #define SA_SW2_EGRESS_LENGTH            0xFF000000
61 #define SA_BASIC_HASH           0x10
62
63 #define SHA256_DIGEST_WORDS    8
64 /* Make 32-bit word from 4 bytes */
65 #define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
66                                    ((b2) << 8) | (b3))
67
68 /* size of SCCTL structure in bytes */
69 #define SA_SCCTL_SZ 16
70
71 /* Max Authentication tag size */
72 #define SA_MAX_AUTH_TAG_SZ 64
73
74 enum sa_algo_id {
75         SA_ALG_CBC_AES = 0,
76         SA_ALG_EBC_AES,
77         SA_ALG_CBC_DES3,
78         SA_ALG_ECB_DES3,
79         SA_ALG_SHA1,
80         SA_ALG_SHA256,
81         SA_ALG_SHA512,
82         SA_ALG_AUTHENC_SHA1_AES,
83         SA_ALG_AUTHENC_SHA256_AES,
84 };
85
86 struct sa_match_data {
87         u8 priv;
88         u8 priv_id;
89         u32 supported_algos;
90 };
91
92 static struct device *sa_k3_dev;
93
94 /**
95  * struct sa_cmdl_cfg - Command label configuration descriptor
96  * @aalg: authentication algorithm ID
97  * @enc_eng_id: Encryption Engine ID supported by the SA hardware
98  * @auth_eng_id: Authentication Engine ID
99  * @iv_size: Initialization Vector size
100  * @akey: Authentication key
101  * @akey_len: Authentication key length
102  * @enc: True, if this is an encode request
103  */
104 struct sa_cmdl_cfg {
105         int aalg;
106         u8 enc_eng_id;
107         u8 auth_eng_id;
108         u8 iv_size;
109         const u8 *akey;
110         u16 akey_len;
111         bool enc;
112 };
113
114 /**
115  * struct algo_data - Crypto algorithm specific data
116  * @enc_eng: Encryption engine info structure
117  * @auth_eng: Authentication engine info structure
118  * @auth_ctrl: Authentication control word
119  * @hash_size: Size of digest
120  * @iv_idx: iv index in psdata
121  * @iv_out_size: iv out size
122  * @ealg_id: Encryption Algorithm ID
123  * @aalg_id: Authentication algorithm ID
124  * @mci_enc: Mode Control Instruction for Encryption algorithm
125  * @mci_dec: Mode Control Instruction for Decryption
126  * @inv_key: Whether the encryption algorithm demands key inversion
127  * @ctx: Pointer to the algorithm context
128  * @keyed_mac: Whether the authentication algorithm has key
129  * @prep_iopad: Function pointer to generate intermediate ipad/opad
130  */
131 struct algo_data {
132         struct sa_eng_info enc_eng;
133         struct sa_eng_info auth_eng;
134         u8 auth_ctrl;
135         u8 hash_size;
136         u8 iv_idx;
137         u8 iv_out_size;
138         u8 ealg_id;
139         u8 aalg_id;
140         u8 *mci_enc;
141         u8 *mci_dec;
142         bool inv_key;
143         struct sa_tfm_ctx *ctx;
144         bool keyed_mac;
145         void (*prep_iopad)(struct algo_data *algo, const u8 *key,
146                            u16 key_sz, __be32 *ipad, __be32 *opad);
147 };
148
149 /**
150  * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
151  * @type: Type of the crypto algorithm.
152  * @alg: Union of crypto algorithm definitions.
153  * @registered: Flag indicating if the crypto algorithm is already registered
154  */
155 struct sa_alg_tmpl {
156         u32 type;               /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
157         union {
158                 struct skcipher_alg skcipher;
159                 struct ahash_alg ahash;
160                 struct aead_alg aead;
161         } alg;
162         bool registered;
163 };
164
165 /**
166  * struct sa_mapped_sg: scatterlist information for tx and rx
167  * @mapped: Set to true if the @sgt is mapped
168  * @dir: mapping direction used for @sgt
169  * @split_sg: Set if the sg is split and needs to be freed up
170  * @static_sg: Static scatterlist entry for overriding data
171  * @sgt: scatterlist table for DMA API use
172  */
173 struct sa_mapped_sg {
174         bool mapped;
175         enum dma_data_direction dir;
176         struct scatterlist static_sg;
177         struct scatterlist *split_sg;
178         struct sg_table sgt;
179 };
180 /**
181  * struct sa_rx_data: RX Packet miscellaneous data place holder
182  * @req: crypto request data pointer
183  * @ddev: pointer to the DMA device
184  * @tx_in: dma_async_tx_descriptor pointer for rx channel
185  * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
186  * @enc: Flag indicating either encryption or decryption
187  * @enc_iv_size: Initialisation vector size
188  * @iv_idx: Initialisation vector index
189  */
190 struct sa_rx_data {
191         void *req;
192         struct device *ddev;
193         struct dma_async_tx_descriptor *tx_in;
194         struct sa_mapped_sg mapped_sg[2];
195         u8 enc;
196         u8 enc_iv_size;
197         u8 iv_idx;
198 };
199
200 /**
201  * struct sa_req: SA request definition
202  * @dev: device for the request
203  * @size: total data to the xmitted via DMA
204  * @enc_offset: offset of cipher data
205  * @enc_size: data to be passed to cipher engine
206  * @enc_iv: cipher IV
207  * @auth_offset: offset of the authentication data
208  * @auth_size: size of the authentication data
209  * @auth_iv: authentication IV
210  * @type: algorithm type for the request
211  * @cmdl: command label pointer
212  * @base: pointer to the base request
213  * @ctx: pointer to the algorithm context data
214  * @enc: true if this is an encode request
215  * @src: source data
216  * @dst: destination data
217  * @callback: DMA callback for the request
218  * @mdata_size: metadata size passed to DMA
219  */
220 struct sa_req {
221         struct device *dev;
222         u16 size;
223         u8 enc_offset;
224         u16 enc_size;
225         u8 *enc_iv;
226         u8 auth_offset;
227         u16 auth_size;
228         u8 *auth_iv;
229         u32 type;
230         u32 *cmdl;
231         struct crypto_async_request *base;
232         struct sa_tfm_ctx *ctx;
233         bool enc;
234         struct scatterlist *src;
235         struct scatterlist *dst;
236         dma_async_tx_callback callback;
237         u16 mdata_size;
238 };
239
240 /*
241  * Mode Control Instructions for various Key lengths 128, 192, 256
242  * For CBC (Cipher Block Chaining) mode for encryption
243  */
244 static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
245         {       0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
246                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
247                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
248         {       0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
249                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
250                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
251         {       0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
252                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
253                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
254 };
255
256 /*
257  * Mode Control Instructions for various Key lengths 128, 192, 256
258  * For CBC (Cipher Block Chaining) mode for decryption
259  */
260 static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
261         {       0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
262                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
263                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
264         {       0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
265                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
266                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
267         {       0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
268                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
269                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
270 };
271
272 /*
273  * Mode Control Instructions for various Key lengths 128, 192, 256
274  * For CBC (Cipher Block Chaining) mode for encryption
275  */
276 static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
277         {       0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
278                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
279                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
280         {       0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
281                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
282                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
283         {       0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
284                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
285                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
286 };
287
288 /*
289  * Mode Control Instructions for various Key lengths 128, 192, 256
290  * For CBC (Cipher Block Chaining) mode for decryption
291  */
292 static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
293         {       0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
294                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
295                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
296         {       0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
297                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
298                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
299         {       0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
300                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
301                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
302 };
303
304 /*
305  * Mode Control Instructions for various Key lengths 128, 192, 256
306  * For ECB (Electronic Code Book) mode for encryption
307  */
308 static u8 mci_ecb_enc_array[3][27] = {
309         {       0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
310                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
311                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
312         {       0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
313                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
314                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
315         {       0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
316                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
317                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
318 };
319
320 /*
321  * Mode Control Instructions for various Key lengths 128, 192, 256
322  * For ECB (Electronic Code Book) mode for decryption
323  */
324 static u8 mci_ecb_dec_array[3][27] = {
325         {       0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
326                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
327                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
328         {       0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
329                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
330                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
331         {       0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
332                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
333                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00        },
334 };
335
336 /*
337  * Mode Control Instructions for DES algorithm
338  * For CBC (Cipher Block Chaining) mode and ECB mode
339  * encryption and for decryption respectively
340  */
341 static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
342         0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
343         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
344         0x00, 0x00, 0x00,
345 };
346
347 static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
348         0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
349         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
350         0x00, 0x00, 0x00,
351 };
352
353 static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
354         0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
355         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
356         0x00, 0x00, 0x00,
357 };
358
359 static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
360         0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
361         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
362         0x00, 0x00, 0x00,
363 };
364
365 /*
366  * Perform 16 byte or 128 bit swizzling
367  * The SA2UL Expects the security context to
368  * be in little Endian and the bus width is 128 bits or 16 bytes
369  * Hence swap 16 bytes at a time from higher to lower address
370  */
371 static void sa_swiz_128(u8 *in, u16 len)
372 {
373         u8 data[16];
374         int i, j;
375
376         for (i = 0; i < len; i += 16) {
377                 memcpy(data, &in[i], 16);
378                 for (j = 0; j < 16; j++)
379                         in[i + j] = data[15 - j];
380         }
381 }
382
383 /* Prepare the ipad and opad from key as per SHA algorithm step 1*/
384 static void prepare_kipad(u8 *k_ipad, const u8 *key, u16 key_sz)
385 {
386         int i;
387
388         for (i = 0; i < key_sz; i++)
389                 k_ipad[i] = key[i] ^ 0x36;
390
391         /* Instead of XOR with 0 */
392         for (; i < SHA1_BLOCK_SIZE; i++)
393                 k_ipad[i] = 0x36;
394 }
395
396 static void prepare_kopad(u8 *k_opad, const u8 *key, u16 key_sz)
397 {
398         int i;
399
400         for (i = 0; i < key_sz; i++)
401                 k_opad[i] = key[i] ^ 0x5c;
402
403         /* Instead of XOR with 0 */
404         for (; i < SHA1_BLOCK_SIZE; i++)
405                 k_opad[i] = 0x5c;
406 }
407
408 static void sa_export_shash(void *state, struct shash_desc *hash,
409                             int digest_size, __be32 *out)
410 {
411         struct sha1_state *sha1;
412         struct sha256_state *sha256;
413         u32 *result;
414
415         switch (digest_size) {
416         case SHA1_DIGEST_SIZE:
417                 sha1 = state;
418                 result = sha1->state;
419                 break;
420         case SHA256_DIGEST_SIZE:
421                 sha256 = state;
422                 result = sha256->state;
423                 break;
424         default:
425                 dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
426                         digest_size);
427                 return;
428         }
429
430         crypto_shash_export(hash, state);
431
432         cpu_to_be32_array(out, result, digest_size / 4);
433 }
434
435 static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
436                               u16 key_sz, __be32 *ipad, __be32 *opad)
437 {
438         SHASH_DESC_ON_STACK(shash, data->ctx->shash);
439         int block_size = crypto_shash_blocksize(data->ctx->shash);
440         int digest_size = crypto_shash_digestsize(data->ctx->shash);
441         union {
442                 struct sha1_state sha1;
443                 struct sha256_state sha256;
444                 u8 k_pad[SHA1_BLOCK_SIZE];
445         } sha;
446
447         shash->tfm = data->ctx->shash;
448
449         prepare_kipad(sha.k_pad, key, key_sz);
450
451         crypto_shash_init(shash);
452         crypto_shash_update(shash, sha.k_pad, block_size);
453         sa_export_shash(&sha, shash, digest_size, ipad);
454
455         prepare_kopad(sha.k_pad, key, key_sz);
456
457         crypto_shash_init(shash);
458         crypto_shash_update(shash, sha.k_pad, block_size);
459
460         sa_export_shash(&sha, shash, digest_size, opad);
461
462         memzero_explicit(&sha, sizeof(sha));
463 }
464
465 /* Derive the inverse key used in AES-CBC decryption operation */
466 static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
467 {
468         struct crypto_aes_ctx ctx;
469         int key_pos;
470
471         if (aes_expandkey(&ctx, key, key_sz)) {
472                 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
473                 return -EINVAL;
474         }
475
476         /* work around to get the right inverse for AES_KEYSIZE_192 size keys */
477         if (key_sz == AES_KEYSIZE_192) {
478                 ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
479                 ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
480         }
481
482         /* Based crypto_aes_expand_key logic */
483         switch (key_sz) {
484         case AES_KEYSIZE_128:
485         case AES_KEYSIZE_192:
486                 key_pos = key_sz + 24;
487                 break;
488
489         case AES_KEYSIZE_256:
490                 key_pos = key_sz + 24 - 4;
491                 break;
492
493         default:
494                 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
495                 return -EINVAL;
496         }
497
498         memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
499         return 0;
500 }
501
502 /* Set Security context for the encryption engine */
503 static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
504                          u8 enc, u8 *sc_buf)
505 {
506         const u8 *mci = NULL;
507
508         /* Set Encryption mode selector to crypto processing */
509         sc_buf[0] = SA_CRYPTO_PROCESSING;
510
511         if (enc)
512                 mci = ad->mci_enc;
513         else
514                 mci = ad->mci_dec;
515         /* Set the mode control instructions in security context */
516         if (mci)
517                 memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
518
519         /* For AES-CBC decryption get the inverse key */
520         if (ad->inv_key && !enc) {
521                 if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
522                         return -EINVAL;
523         /* For all other cases: key is used */
524         } else {
525                 memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
526         }
527
528         return 0;
529 }
530
531 /* Set Security context for the authentication engine */
532 static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
533                            u8 *sc_buf)
534 {
535         __be32 *ipad = (void *)(sc_buf + 32);
536         __be32 *opad = (void *)(sc_buf + 64);
537
538         /* Set Authentication mode selector to hash processing */
539         sc_buf[0] = SA_HASH_PROCESSING;
540         /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
541         sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
542         sc_buf[1] |= ad->auth_ctrl;
543
544         /* Copy the keys or ipad/opad */
545         if (ad->keyed_mac)
546                 ad->prep_iopad(ad, key, key_sz, ipad, opad);
547         else {
548                 /* basic hash */
549                 sc_buf[1] |= SA_BASIC_HASH;
550         }
551 }
552
553 static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
554 {
555         int j;
556
557         for (j = 0; j < ((size16) ? 4 : 2); j++) {
558                 *out = cpu_to_be32(*((u32 *)iv));
559                 iv += 4;
560                 out++;
561         }
562 }
563
564 /* Format general command label */
565 static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
566                               struct sa_cmdl_upd_info *upd_info)
567 {
568         u8 enc_offset = 0, auth_offset = 0, total = 0;
569         u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
570         u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
571         u32 *word_ptr = (u32 *)cmdl;
572         int i;
573
574         /* Clear the command label */
575         memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
576
577         /* Iniialize the command update structure */
578         memzero_explicit(upd_info, sizeof(*upd_info));
579
580         if (cfg->enc_eng_id && cfg->auth_eng_id) {
581                 if (cfg->enc) {
582                         auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
583                         enc_next_eng = cfg->auth_eng_id;
584
585                         if (cfg->iv_size)
586                                 auth_offset += cfg->iv_size;
587                 } else {
588                         enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
589                         auth_next_eng = cfg->enc_eng_id;
590                 }
591         }
592
593         if (cfg->enc_eng_id) {
594                 upd_info->flags |= SA_CMDL_UPD_ENC;
595                 upd_info->enc_size.index = enc_offset >> 2;
596                 upd_info->enc_offset.index = upd_info->enc_size.index + 1;
597                 /* Encryption command label */
598                 cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
599
600                 /* Encryption modes requiring IV */
601                 if (cfg->iv_size) {
602                         upd_info->flags |= SA_CMDL_UPD_ENC_IV;
603                         upd_info->enc_iv.index =
604                                 (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
605                         upd_info->enc_iv.size = cfg->iv_size;
606
607                         cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
608                                 SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
609
610                         cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
611                                 (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
612                         total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
613                 } else {
614                         cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
615                                                 SA_CMDL_HEADER_SIZE_BYTES;
616                         total += SA_CMDL_HEADER_SIZE_BYTES;
617                 }
618         }
619
620         if (cfg->auth_eng_id) {
621                 upd_info->flags |= SA_CMDL_UPD_AUTH;
622                 upd_info->auth_size.index = auth_offset >> 2;
623                 upd_info->auth_offset.index = upd_info->auth_size.index + 1;
624                 cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
625                 cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
626                         SA_CMDL_HEADER_SIZE_BYTES;
627                 total += SA_CMDL_HEADER_SIZE_BYTES;
628         }
629
630         total = roundup(total, 8);
631
632         for (i = 0; i < total / 4; i++)
633                 word_ptr[i] = swab32(word_ptr[i]);
634
635         return total;
636 }
637
638 /* Update Command label */
639 static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
640                                   struct sa_cmdl_upd_info *upd_info)
641 {
642         int i = 0, j;
643
644         if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
645                 cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
646                 cmdl[upd_info->enc_size.index] |= req->enc_size;
647                 cmdl[upd_info->enc_offset.index] &=
648                                                 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
649                 cmdl[upd_info->enc_offset.index] |=
650                         FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
651                                    req->enc_offset);
652
653                 if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
654                         __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
655                         u32 *enc_iv = (u32 *)req->enc_iv;
656
657                         for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
658                                 data[j] = cpu_to_be32(*enc_iv);
659                                 enc_iv++;
660                         }
661                 }
662         }
663
664         if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
665                 cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
666                 cmdl[upd_info->auth_size.index] |= req->auth_size;
667                 cmdl[upd_info->auth_offset.index] &=
668                         ~SA_CMDL_SOP_BYPASS_LEN_MASK;
669                 cmdl[upd_info->auth_offset.index] |=
670                         FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
671                                    req->auth_offset);
672                 if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
673                         sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
674                                    req->auth_iv,
675                                    (upd_info->auth_iv.size > 8));
676                 }
677                 if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
678                         int offset = (req->auth_size & 0xF) ? 4 : 0;
679
680                         memcpy(&cmdl[upd_info->aux_key_info.index],
681                                &upd_info->aux_key[offset], 16);
682                 }
683         }
684 }
685
686 /* Format SWINFO words to be sent to SA */
687 static
688 void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
689                    u8 cmdl_present, u8 cmdl_offset, u8 flags,
690                    u8 hash_size, u32 *swinfo)
691 {
692         swinfo[0] = sc_id;
693         swinfo[0] |= FIELD_PREP(SA_SW0_FLAGS_MASK, flags);
694         if (likely(cmdl_present))
695                 swinfo[0] |= FIELD_PREP(SA_SW0_CMDL_INFO_MASK,
696                                         cmdl_offset | SA_SW0_CMDL_PRESENT);
697         swinfo[0] |= FIELD_PREP(SA_SW0_ENG_ID_MASK, eng_id);
698
699         swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
700         swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
701         swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
702         swinfo[2] |= FIELD_PREP(SA_SW2_EGRESS_LENGTH, hash_size);
703 }
704
705 /* Dump the security context */
706 static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
707 {
708 #ifdef DEBUG
709         dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
710         print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
711                        16, 1, buf, SA_CTX_MAX_SZ, false);
712 #endif
713 }
714
715 static
716 int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data,
717                const u8 *enc_key, u16 enc_key_sz,
718                const u8 *auth_key, u16 auth_key_sz,
719                struct algo_data *ad, u8 enc, u32 *swinfo)
720 {
721         int enc_sc_offset = 0;
722         int auth_sc_offset = 0;
723         u8 *sc_buf = ctx->sc;
724         u16 sc_id = ctx->sc_id;
725         u8 first_engine = 0;
726
727         memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
728
729         if (ad->auth_eng.eng_id) {
730                 if (enc)
731                         first_engine = ad->enc_eng.eng_id;
732                 else
733                         first_engine = ad->auth_eng.eng_id;
734
735                 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
736                 auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
737                 sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
738                 if (!ad->hash_size)
739                         return -EINVAL;
740                 ad->hash_size = roundup(ad->hash_size, 8);
741
742         } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
743                 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
744                 first_engine = ad->enc_eng.eng_id;
745                 sc_buf[1] = SA_SCCTL_FE_ENC;
746                 ad->hash_size = ad->iv_out_size;
747         }
748
749         /* SCCTL Owner info: 0=host, 1=CP_ACE */
750         sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
751         memcpy(&sc_buf[2], &sc_id, 2);
752         sc_buf[4] = 0x0;
753         sc_buf[5] = match_data->priv_id;
754         sc_buf[6] = match_data->priv;
755         sc_buf[7] = 0x0;
756
757         /* Prepare context for encryption engine */
758         if (ad->enc_eng.sc_size) {
759                 if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
760                                   &sc_buf[enc_sc_offset]))
761                         return -EINVAL;
762         }
763
764         /* Prepare context for authentication engine */
765         if (ad->auth_eng.sc_size)
766                 sa_set_sc_auth(ad, auth_key, auth_key_sz,
767                                &sc_buf[auth_sc_offset]);
768
769         /* Set the ownership of context to CP_ACE */
770         sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
771
772         /* swizzle the security context */
773         sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
774
775         sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
776                       SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
777
778         sa_dump_sc(sc_buf, ctx->sc_phys);
779
780         return 0;
781 }
782
783 /* Free the per direction context memory */
784 static void sa_free_ctx_info(struct sa_ctx_info *ctx,
785                              struct sa_crypto_data *data)
786 {
787         unsigned long bn;
788
789         bn = ctx->sc_id - data->sc_id_start;
790         spin_lock(&data->scid_lock);
791         __clear_bit(bn, data->ctx_bm);
792         data->sc_id--;
793         spin_unlock(&data->scid_lock);
794
795         if (ctx->sc) {
796                 dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
797                 ctx->sc = NULL;
798         }
799 }
800
801 static int sa_init_ctx_info(struct sa_ctx_info *ctx,
802                             struct sa_crypto_data *data)
803 {
804         unsigned long bn;
805         int err;
806
807         spin_lock(&data->scid_lock);
808         bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
809         __set_bit(bn, data->ctx_bm);
810         data->sc_id++;
811         spin_unlock(&data->scid_lock);
812
813         ctx->sc_id = (u16)(data->sc_id_start + bn);
814
815         ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
816         if (!ctx->sc) {
817                 dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
818                 err = -ENOMEM;
819                 goto scid_rollback;
820         }
821
822         return 0;
823
824 scid_rollback:
825         spin_lock(&data->scid_lock);
826         __clear_bit(bn, data->ctx_bm);
827         data->sc_id--;
828         spin_unlock(&data->scid_lock);
829
830         return err;
831 }
832
833 static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
834 {
835         struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
836         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
837
838         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
839                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
840                 ctx->dec.sc_id, &ctx->dec.sc_phys);
841
842         sa_free_ctx_info(&ctx->enc, data);
843         sa_free_ctx_info(&ctx->dec, data);
844
845         crypto_free_skcipher(ctx->fallback.skcipher);
846 }
847
848 static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
849 {
850         struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
851         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
852         const char *name = crypto_tfm_alg_name(&tfm->base);
853         struct crypto_skcipher *child;
854         int ret;
855
856         memzero_explicit(ctx, sizeof(*ctx));
857         ctx->dev_data = data;
858
859         ret = sa_init_ctx_info(&ctx->enc, data);
860         if (ret)
861                 return ret;
862         ret = sa_init_ctx_info(&ctx->dec, data);
863         if (ret) {
864                 sa_free_ctx_info(&ctx->enc, data);
865                 return ret;
866         }
867
868         child = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
869
870         if (IS_ERR(child)) {
871                 dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
872                 return PTR_ERR(child);
873         }
874
875         ctx->fallback.skcipher = child;
876         crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
877                                          sizeof(struct skcipher_request));
878
879         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
880                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
881                 ctx->dec.sc_id, &ctx->dec.sc_phys);
882         return 0;
883 }
884
885 static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
886                             unsigned int keylen, struct algo_data *ad)
887 {
888         struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
889         struct crypto_skcipher *child = ctx->fallback.skcipher;
890         int cmdl_len;
891         struct sa_cmdl_cfg cfg;
892         int ret;
893
894         if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
895             keylen != AES_KEYSIZE_256)
896                 return -EINVAL;
897
898         ad->enc_eng.eng_id = SA_ENG_ID_EM1;
899         ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
900
901         memzero_explicit(&cfg, sizeof(cfg));
902         cfg.enc_eng_id = ad->enc_eng.eng_id;
903         cfg.iv_size = crypto_skcipher_ivsize(tfm);
904
905         crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
906         crypto_skcipher_set_flags(child, tfm->base.crt_flags &
907                                          CRYPTO_TFM_REQ_MASK);
908         ret = crypto_skcipher_setkey(child, key, keylen);
909         if (ret)
910                 return ret;
911
912         /* Setup Encryption Security Context & Command label template */
913         if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, key, keylen, NULL, 0,
914                        ad, 1, &ctx->enc.epib[1]))
915                 goto badkey;
916
917         cmdl_len = sa_format_cmdl_gen(&cfg,
918                                       (u8 *)ctx->enc.cmdl,
919                                       &ctx->enc.cmdl_upd_info);
920         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
921                 goto badkey;
922
923         ctx->enc.cmdl_size = cmdl_len;
924
925         /* Setup Decryption Security Context & Command label template */
926         if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, key, keylen, NULL, 0,
927                        ad, 0, &ctx->dec.epib[1]))
928                 goto badkey;
929
930         cfg.enc_eng_id = ad->enc_eng.eng_id;
931         cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
932                                       &ctx->dec.cmdl_upd_info);
933
934         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
935                 goto badkey;
936
937         ctx->dec.cmdl_size = cmdl_len;
938         ctx->iv_idx = ad->iv_idx;
939
940         return 0;
941
942 badkey:
943         dev_err(sa_k3_dev, "%s: badkey\n", __func__);
944         return -EINVAL;
945 }
946
947 static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
948                              unsigned int keylen)
949 {
950         struct algo_data ad = { 0 };
951         /* Convert the key size (16/24/32) to the key size index (0/1/2) */
952         int key_idx = (keylen >> 3) - 2;
953
954         if (key_idx >= 3)
955                 return -EINVAL;
956
957         ad.mci_enc = mci_cbc_enc_array[key_idx];
958         ad.mci_dec = mci_cbc_dec_array[key_idx];
959         ad.inv_key = true;
960         ad.ealg_id = SA_EALG_ID_AES_CBC;
961         ad.iv_idx = 4;
962         ad.iv_out_size = 16;
963
964         return sa_cipher_setkey(tfm, key, keylen, &ad);
965 }
966
967 static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
968                              unsigned int keylen)
969 {
970         struct algo_data ad = { 0 };
971         /* Convert the key size (16/24/32) to the key size index (0/1/2) */
972         int key_idx = (keylen >> 3) - 2;
973
974         if (key_idx >= 3)
975                 return -EINVAL;
976
977         ad.mci_enc = mci_ecb_enc_array[key_idx];
978         ad.mci_dec = mci_ecb_dec_array[key_idx];
979         ad.inv_key = true;
980         ad.ealg_id = SA_EALG_ID_AES_ECB;
981
982         return sa_cipher_setkey(tfm, key, keylen, &ad);
983 }
984
985 static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
986                               unsigned int keylen)
987 {
988         struct algo_data ad = { 0 };
989
990         ad.mci_enc = mci_cbc_3des_enc_array;
991         ad.mci_dec = mci_cbc_3des_dec_array;
992         ad.ealg_id = SA_EALG_ID_3DES_CBC;
993         ad.iv_idx = 6;
994         ad.iv_out_size = 8;
995
996         return sa_cipher_setkey(tfm, key, keylen, &ad);
997 }
998
999 static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
1000                               unsigned int keylen)
1001 {
1002         struct algo_data ad = { 0 };
1003
1004         ad.mci_enc = mci_ecb_3des_enc_array;
1005         ad.mci_dec = mci_ecb_3des_dec_array;
1006
1007         return sa_cipher_setkey(tfm, key, keylen, &ad);
1008 }
1009
1010 static void sa_sync_from_device(struct sa_rx_data *rxd)
1011 {
1012         struct sg_table *sgt;
1013
1014         if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
1015                 sgt = &rxd->mapped_sg[0].sgt;
1016         else
1017                 sgt = &rxd->mapped_sg[1].sgt;
1018
1019         dma_sync_sgtable_for_cpu(rxd->ddev, sgt, DMA_FROM_DEVICE);
1020 }
1021
1022 static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
1023 {
1024         int i;
1025
1026         for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
1027                 struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
1028
1029                 if (mapped_sg->mapped) {
1030                         dma_unmap_sgtable(rxd->ddev, &mapped_sg->sgt,
1031                                           mapped_sg->dir, 0);
1032                         kfree(mapped_sg->split_sg);
1033                 }
1034         }
1035
1036         kfree(rxd);
1037 }
1038
1039 static void sa_aes_dma_in_callback(void *data)
1040 {
1041         struct sa_rx_data *rxd = data;
1042         struct skcipher_request *req;
1043         u32 *result;
1044         __be32 *mdptr;
1045         size_t ml, pl;
1046         int i;
1047
1048         sa_sync_from_device(rxd);
1049         req = container_of(rxd->req, struct skcipher_request, base);
1050
1051         if (req->iv) {
1052                 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
1053                                                                &ml);
1054                 result = (u32 *)req->iv;
1055
1056                 for (i = 0; i < (rxd->enc_iv_size / 4); i++)
1057                         result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
1058         }
1059
1060         sa_free_sa_rx_data(rxd);
1061
1062         skcipher_request_complete(req, 0);
1063 }
1064
1065 static void
1066 sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
1067 {
1068         u32 *out, *in;
1069         int i;
1070
1071         for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
1072                 *out++ = *in++;
1073
1074         mdptr[4] = (0xFFFF << 16);
1075         for (out = &mdptr[5], in = psdata, i = 0;
1076              i < pslen / sizeof(u32); i++)
1077                 *out++ = *in++;
1078 }
1079
1080 static int sa_run(struct sa_req *req)
1081 {
1082         struct sa_rx_data *rxd;
1083         gfp_t gfp_flags;
1084         u32 cmdl[SA_MAX_CMDL_WORDS];
1085         struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
1086         struct device *ddev;
1087         struct dma_chan *dma_rx;
1088         int sg_nents, src_nents, dst_nents;
1089         struct scatterlist *src, *dst;
1090         size_t pl, ml, split_size;
1091         struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
1092         int ret;
1093         struct dma_async_tx_descriptor *tx_out;
1094         u32 *mdptr;
1095         bool diff_dst;
1096         enum dma_data_direction dir_src;
1097         struct sa_mapped_sg *mapped_sg;
1098
1099         gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1100                 GFP_KERNEL : GFP_ATOMIC;
1101
1102         rxd = kzalloc(sizeof(*rxd), gfp_flags);
1103         if (!rxd)
1104                 return -ENOMEM;
1105
1106         if (req->src != req->dst) {
1107                 diff_dst = true;
1108                 dir_src = DMA_TO_DEVICE;
1109         } else {
1110                 diff_dst = false;
1111                 dir_src = DMA_BIDIRECTIONAL;
1112         }
1113
1114         /*
1115          * SA2UL has an interesting feature where the receive DMA channel
1116          * is selected based on the data passed to the engine. Within the
1117          * transition range, there is also a space where it is impossible
1118          * to determine where the data will end up, and this should be
1119          * avoided. This will be handled by the SW fallback mechanism by
1120          * the individual algorithm implementations.
1121          */
1122         if (req->size >= 256)
1123                 dma_rx = pdata->dma_rx2;
1124         else
1125                 dma_rx = pdata->dma_rx1;
1126
1127         ddev = dmaengine_get_dma_device(pdata->dma_tx);
1128         rxd->ddev = ddev;
1129
1130         memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
1131
1132         sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
1133
1134         if (req->type != CRYPTO_ALG_TYPE_AHASH) {
1135                 if (req->enc)
1136                         req->type |=
1137                                 (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
1138                 else
1139                         req->type |=
1140                                 (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
1141         }
1142
1143         cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
1144
1145         /*
1146          * Map the packets, first we check if the data fits into a single
1147          * sg entry and use that if possible. If it does not fit, we check
1148          * if we need to do sg_split to align the scatterlist data on the
1149          * actual data size being processed by the crypto engine.
1150          */
1151         src = req->src;
1152         sg_nents = sg_nents_for_len(src, req->size);
1153
1154         split_size = req->size;
1155
1156         mapped_sg = &rxd->mapped_sg[0];
1157         if (sg_nents == 1 && split_size <= req->src->length) {
1158                 src = &mapped_sg->static_sg;
1159                 src_nents = 1;
1160                 sg_init_table(src, 1);
1161                 sg_set_page(src, sg_page(req->src), split_size,
1162                             req->src->offset);
1163
1164                 mapped_sg->sgt.sgl = src;
1165                 mapped_sg->sgt.orig_nents = src_nents;
1166                 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1167                 if (ret) {
1168                         kfree(rxd);
1169                         return ret;
1170                 }
1171
1172                 mapped_sg->dir = dir_src;
1173                 mapped_sg->mapped = true;
1174         } else {
1175                 mapped_sg->sgt.sgl = req->src;
1176                 mapped_sg->sgt.orig_nents = sg_nents;
1177                 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1178                 if (ret) {
1179                         kfree(rxd);
1180                         return ret;
1181                 }
1182
1183                 mapped_sg->dir = dir_src;
1184                 mapped_sg->mapped = true;
1185
1186                 ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
1187                                &split_size, &src, &src_nents, gfp_flags);
1188                 if (ret) {
1189                         src_nents = mapped_sg->sgt.nents;
1190                         src = mapped_sg->sgt.sgl;
1191                 } else {
1192                         mapped_sg->split_sg = src;
1193                 }
1194         }
1195
1196         dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
1197
1198         if (!diff_dst) {
1199                 dst_nents = src_nents;
1200                 dst = src;
1201         } else {
1202                 dst_nents = sg_nents_for_len(req->dst, req->size);
1203                 mapped_sg = &rxd->mapped_sg[1];
1204
1205                 if (dst_nents == 1 && split_size <= req->dst->length) {
1206                         dst = &mapped_sg->static_sg;
1207                         dst_nents = 1;
1208                         sg_init_table(dst, 1);
1209                         sg_set_page(dst, sg_page(req->dst), split_size,
1210                                     req->dst->offset);
1211
1212                         mapped_sg->sgt.sgl = dst;
1213                         mapped_sg->sgt.orig_nents = dst_nents;
1214                         ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1215                                               DMA_FROM_DEVICE, 0);
1216                         if (ret)
1217                                 goto err_cleanup;
1218
1219                         mapped_sg->dir = DMA_FROM_DEVICE;
1220                         mapped_sg->mapped = true;
1221                 } else {
1222                         mapped_sg->sgt.sgl = req->dst;
1223                         mapped_sg->sgt.orig_nents = dst_nents;
1224                         ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1225                                               DMA_FROM_DEVICE, 0);
1226                         if (ret)
1227                                 goto err_cleanup;
1228
1229                         mapped_sg->dir = DMA_FROM_DEVICE;
1230                         mapped_sg->mapped = true;
1231
1232                         ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
1233                                        0, 1, &split_size, &dst, &dst_nents,
1234                                        gfp_flags);
1235                         if (ret) {
1236                                 dst_nents = mapped_sg->sgt.nents;
1237                                 dst = mapped_sg->sgt.sgl;
1238                         } else {
1239                                 mapped_sg->split_sg = dst;
1240                         }
1241                 }
1242         }
1243
1244         rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
1245                                              DMA_DEV_TO_MEM,
1246                                              DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1247         if (!rxd->tx_in) {
1248                 dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
1249                 ret = -EINVAL;
1250                 goto err_cleanup;
1251         }
1252
1253         rxd->req = (void *)req->base;
1254         rxd->enc = req->enc;
1255         rxd->iv_idx = req->ctx->iv_idx;
1256         rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
1257         rxd->tx_in->callback = req->callback;
1258         rxd->tx_in->callback_param = rxd;
1259
1260         tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
1261                                          src_nents, DMA_MEM_TO_DEV,
1262                                          DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1263
1264         if (!tx_out) {
1265                 dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
1266                 ret = -EINVAL;
1267                 goto err_cleanup;
1268         }
1269
1270         /*
1271          * Prepare metadata for DMA engine. This essentially describes the
1272          * crypto algorithm to be used, data sizes, different keys etc.
1273          */
1274         mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
1275
1276         sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
1277                                    sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
1278                            sa_ctx->epib);
1279
1280         ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
1281         dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
1282
1283         dmaengine_submit(tx_out);
1284         dmaengine_submit(rxd->tx_in);
1285
1286         dma_async_issue_pending(dma_rx);
1287         dma_async_issue_pending(pdata->dma_tx);
1288
1289         return -EINPROGRESS;
1290
1291 err_cleanup:
1292         sa_free_sa_rx_data(rxd);
1293
1294         return ret;
1295 }
1296
1297 static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
1298 {
1299         struct sa_tfm_ctx *ctx =
1300             crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1301         struct crypto_alg *alg = req->base.tfm->__crt_alg;
1302         struct sa_req sa_req = { 0 };
1303
1304         if (!req->cryptlen)
1305                 return 0;
1306
1307         if (req->cryptlen % alg->cra_blocksize)
1308                 return -EINVAL;
1309
1310         /* Use SW fallback if the data size is not supported */
1311         if (req->cryptlen > SA_MAX_DATA_SZ ||
1312             (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
1313              req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
1314                 struct skcipher_request *subreq = skcipher_request_ctx(req);
1315
1316                 skcipher_request_set_tfm(subreq, ctx->fallback.skcipher);
1317                 skcipher_request_set_callback(subreq, req->base.flags,
1318                                               req->base.complete,
1319                                               req->base.data);
1320                 skcipher_request_set_crypt(subreq, req->src, req->dst,
1321                                            req->cryptlen, req->iv);
1322                 if (enc)
1323                         return crypto_skcipher_encrypt(subreq);
1324                 else
1325                         return crypto_skcipher_decrypt(subreq);
1326         }
1327
1328         sa_req.size = req->cryptlen;
1329         sa_req.enc_size = req->cryptlen;
1330         sa_req.src = req->src;
1331         sa_req.dst = req->dst;
1332         sa_req.enc_iv = iv;
1333         sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
1334         sa_req.enc = enc;
1335         sa_req.callback = sa_aes_dma_in_callback;
1336         sa_req.mdata_size = 44;
1337         sa_req.base = &req->base;
1338         sa_req.ctx = ctx;
1339
1340         return sa_run(&sa_req);
1341 }
1342
1343 static int sa_encrypt(struct skcipher_request *req)
1344 {
1345         return sa_cipher_run(req, req->iv, 1);
1346 }
1347
1348 static int sa_decrypt(struct skcipher_request *req)
1349 {
1350         return sa_cipher_run(req, req->iv, 0);
1351 }
1352
1353 static void sa_sha_dma_in_callback(void *data)
1354 {
1355         struct sa_rx_data *rxd = data;
1356         struct ahash_request *req;
1357         struct crypto_ahash *tfm;
1358         unsigned int authsize;
1359         int i;
1360         size_t ml, pl;
1361         u32 *result;
1362         __be32 *mdptr;
1363
1364         sa_sync_from_device(rxd);
1365         req = container_of(rxd->req, struct ahash_request, base);
1366         tfm = crypto_ahash_reqtfm(req);
1367         authsize = crypto_ahash_digestsize(tfm);
1368
1369         mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1370         result = (u32 *)req->result;
1371
1372         for (i = 0; i < (authsize / 4); i++)
1373                 result[i] = be32_to_cpu(mdptr[i + 4]);
1374
1375         sa_free_sa_rx_data(rxd);
1376
1377         ahash_request_complete(req, 0);
1378 }
1379
1380 static int zero_message_process(struct ahash_request *req)
1381 {
1382         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1383         int sa_digest_size = crypto_ahash_digestsize(tfm);
1384
1385         switch (sa_digest_size) {
1386         case SHA1_DIGEST_SIZE:
1387                 memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
1388                 break;
1389         case SHA256_DIGEST_SIZE:
1390                 memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
1391                 break;
1392         case SHA512_DIGEST_SIZE:
1393                 memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
1394                 break;
1395         default:
1396                 return -EINVAL;
1397         }
1398
1399         return 0;
1400 }
1401
1402 static int sa_sha_run(struct ahash_request *req)
1403 {
1404         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1405         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1406         struct sa_req sa_req = { 0 };
1407         size_t auth_len;
1408
1409         auth_len = req->nbytes;
1410
1411         if (!auth_len)
1412                 return zero_message_process(req);
1413
1414         if (auth_len > SA_MAX_DATA_SZ ||
1415             (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
1416              auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
1417                 struct ahash_request *subreq = &rctx->fallback_req;
1418                 int ret = 0;
1419
1420                 ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1421                 subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1422
1423                 crypto_ahash_init(subreq);
1424
1425                 subreq->nbytes = auth_len;
1426                 subreq->src = req->src;
1427                 subreq->result = req->result;
1428
1429                 ret |= crypto_ahash_update(subreq);
1430
1431                 subreq->nbytes = 0;
1432
1433                 ret |= crypto_ahash_final(subreq);
1434
1435                 return ret;
1436         }
1437
1438         sa_req.size = auth_len;
1439         sa_req.auth_size = auth_len;
1440         sa_req.src = req->src;
1441         sa_req.dst = req->src;
1442         sa_req.enc = true;
1443         sa_req.type = CRYPTO_ALG_TYPE_AHASH;
1444         sa_req.callback = sa_sha_dma_in_callback;
1445         sa_req.mdata_size = 28;
1446         sa_req.ctx = ctx;
1447         sa_req.base = &req->base;
1448
1449         return sa_run(&sa_req);
1450 }
1451
1452 static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct  algo_data *ad)
1453 {
1454         int bs = crypto_shash_blocksize(ctx->shash);
1455         int cmdl_len;
1456         struct sa_cmdl_cfg cfg;
1457
1458         ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1459         ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1460         ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1461
1462         memset(ctx->authkey, 0, bs);
1463         memset(&cfg, 0, sizeof(cfg));
1464         cfg.aalg = ad->aalg_id;
1465         cfg.enc_eng_id = ad->enc_eng.eng_id;
1466         cfg.auth_eng_id = ad->auth_eng.eng_id;
1467         cfg.iv_size = 0;
1468         cfg.akey = NULL;
1469         cfg.akey_len = 0;
1470
1471         ctx->dev_data = dev_get_drvdata(sa_k3_dev);
1472         /* Setup Encryption Security Context & Command label template */
1473         if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, NULL, 0, NULL, 0,
1474                        ad, 0, &ctx->enc.epib[1]))
1475                 goto badkey;
1476
1477         cmdl_len = sa_format_cmdl_gen(&cfg,
1478                                       (u8 *)ctx->enc.cmdl,
1479                                       &ctx->enc.cmdl_upd_info);
1480         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1481                 goto badkey;
1482
1483         ctx->enc.cmdl_size = cmdl_len;
1484
1485         return 0;
1486
1487 badkey:
1488         dev_err(sa_k3_dev, "%s: badkey\n", __func__);
1489         return -EINVAL;
1490 }
1491
1492 static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1493 {
1494         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1495         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1496         int ret;
1497
1498         memset(ctx, 0, sizeof(*ctx));
1499         ctx->dev_data = data;
1500         ret = sa_init_ctx_info(&ctx->enc, data);
1501         if (ret)
1502                 return ret;
1503
1504         if (alg_base) {
1505                 ctx->shash = crypto_alloc_shash(alg_base, 0,
1506                                                 CRYPTO_ALG_NEED_FALLBACK);
1507                 if (IS_ERR(ctx->shash)) {
1508                         dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
1509                                 alg_base);
1510                         return PTR_ERR(ctx->shash);
1511                 }
1512                 /* for fallback */
1513                 ctx->fallback.ahash =
1514                         crypto_alloc_ahash(alg_base, 0,
1515                                            CRYPTO_ALG_NEED_FALLBACK);
1516                 if (IS_ERR(ctx->fallback.ahash)) {
1517                         dev_err(ctx->dev_data->dev,
1518                                 "Could not load fallback driver\n");
1519                         return PTR_ERR(ctx->fallback.ahash);
1520                 }
1521         }
1522
1523         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1524                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1525                 ctx->dec.sc_id, &ctx->dec.sc_phys);
1526
1527         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1528                                  sizeof(struct sa_sha_req_ctx) +
1529                                  crypto_ahash_reqsize(ctx->fallback.ahash));
1530
1531         return 0;
1532 }
1533
1534 static int sa_sha_digest(struct ahash_request *req)
1535 {
1536         return sa_sha_run(req);
1537 }
1538
1539 static int sa_sha_init(struct ahash_request *req)
1540 {
1541         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1542         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1543         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1544
1545         dev_dbg(sa_k3_dev, "init: digest size: %u, rctx=%p\n",
1546                 crypto_ahash_digestsize(tfm), rctx);
1547
1548         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1549         rctx->fallback_req.base.flags =
1550                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1551
1552         return crypto_ahash_init(&rctx->fallback_req);
1553 }
1554
1555 static int sa_sha_update(struct ahash_request *req)
1556 {
1557         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1558         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1559         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1560
1561         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1562         rctx->fallback_req.base.flags =
1563                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1564         rctx->fallback_req.nbytes = req->nbytes;
1565         rctx->fallback_req.src = req->src;
1566
1567         return crypto_ahash_update(&rctx->fallback_req);
1568 }
1569
1570 static int sa_sha_final(struct ahash_request *req)
1571 {
1572         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1573         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1574         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1575
1576         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1577         rctx->fallback_req.base.flags =
1578                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1579         rctx->fallback_req.result = req->result;
1580
1581         return crypto_ahash_final(&rctx->fallback_req);
1582 }
1583
1584 static int sa_sha_finup(struct ahash_request *req)
1585 {
1586         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1587         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1588         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1589
1590         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1591         rctx->fallback_req.base.flags =
1592                 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1593
1594         rctx->fallback_req.nbytes = req->nbytes;
1595         rctx->fallback_req.src = req->src;
1596         rctx->fallback_req.result = req->result;
1597
1598         return crypto_ahash_finup(&rctx->fallback_req);
1599 }
1600
1601 static int sa_sha_import(struct ahash_request *req, const void *in)
1602 {
1603         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1604         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1605         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1606
1607         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1608         rctx->fallback_req.base.flags = req->base.flags &
1609                 CRYPTO_TFM_REQ_MAY_SLEEP;
1610
1611         return crypto_ahash_import(&rctx->fallback_req, in);
1612 }
1613
1614 static int sa_sha_export(struct ahash_request *req, void *out)
1615 {
1616         struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1617         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1618         struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1619         struct ahash_request *subreq = &rctx->fallback_req;
1620
1621         ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1622         subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1623
1624         return crypto_ahash_export(subreq, out);
1625 }
1626
1627 static int sa_sha1_cra_init(struct crypto_tfm *tfm)
1628 {
1629         struct algo_data ad = { 0 };
1630         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1631
1632         sa_sha_cra_init_alg(tfm, "sha1");
1633
1634         ad.aalg_id = SA_AALG_ID_SHA1;
1635         ad.hash_size = SHA1_DIGEST_SIZE;
1636         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1637
1638         sa_sha_setup(ctx, &ad);
1639
1640         return 0;
1641 }
1642
1643 static int sa_sha256_cra_init(struct crypto_tfm *tfm)
1644 {
1645         struct algo_data ad = { 0 };
1646         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1647
1648         sa_sha_cra_init_alg(tfm, "sha256");
1649
1650         ad.aalg_id = SA_AALG_ID_SHA2_256;
1651         ad.hash_size = SHA256_DIGEST_SIZE;
1652         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1653
1654         sa_sha_setup(ctx, &ad);
1655
1656         return 0;
1657 }
1658
1659 static int sa_sha512_cra_init(struct crypto_tfm *tfm)
1660 {
1661         struct algo_data ad = { 0 };
1662         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1663
1664         sa_sha_cra_init_alg(tfm, "sha512");
1665
1666         ad.aalg_id = SA_AALG_ID_SHA2_512;
1667         ad.hash_size = SHA512_DIGEST_SIZE;
1668         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
1669
1670         sa_sha_setup(ctx, &ad);
1671
1672         return 0;
1673 }
1674
1675 static void sa_sha_cra_exit(struct crypto_tfm *tfm)
1676 {
1677         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1678         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1679
1680         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1681                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1682                 ctx->dec.sc_id, &ctx->dec.sc_phys);
1683
1684         if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
1685                 sa_free_ctx_info(&ctx->enc, data);
1686
1687         crypto_free_shash(ctx->shash);
1688         crypto_free_ahash(ctx->fallback.ahash);
1689 }
1690
1691 static void sa_aead_dma_in_callback(void *data)
1692 {
1693         struct sa_rx_data *rxd = data;
1694         struct aead_request *req;
1695         struct crypto_aead *tfm;
1696         unsigned int start;
1697         unsigned int authsize;
1698         u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
1699         size_t pl, ml;
1700         int i;
1701         int err = 0;
1702         u32 *mdptr;
1703
1704         sa_sync_from_device(rxd);
1705         req = container_of(rxd->req, struct aead_request, base);
1706         tfm = crypto_aead_reqtfm(req);
1707         start = req->assoclen + req->cryptlen;
1708         authsize = crypto_aead_authsize(tfm);
1709
1710         mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1711         for (i = 0; i < (authsize / 4); i++)
1712                 mdptr[i + 4] = swab32(mdptr[i + 4]);
1713
1714         if (rxd->enc) {
1715                 scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1716                                          1);
1717         } else {
1718                 start -= authsize;
1719                 scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
1720                                          0);
1721
1722                 err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
1723         }
1724
1725         sa_free_sa_rx_data(rxd);
1726
1727         aead_request_complete(req, err);
1728 }
1729
1730 static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
1731                             const char *fallback)
1732 {
1733         struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1734         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1735         int ret;
1736
1737         memzero_explicit(ctx, sizeof(*ctx));
1738         ctx->dev_data = data;
1739
1740         ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
1741         if (IS_ERR(ctx->shash)) {
1742                 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
1743                 return PTR_ERR(ctx->shash);
1744         }
1745
1746         ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
1747                                                CRYPTO_ALG_NEED_FALLBACK);
1748
1749         if (IS_ERR(ctx->fallback.aead)) {
1750                 dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
1751                         fallback);
1752                 return PTR_ERR(ctx->fallback.aead);
1753         }
1754
1755         crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1756                                 crypto_aead_reqsize(ctx->fallback.aead));
1757
1758         ret = sa_init_ctx_info(&ctx->enc, data);
1759         if (ret)
1760                 return ret;
1761
1762         ret = sa_init_ctx_info(&ctx->dec, data);
1763         if (ret) {
1764                 sa_free_ctx_info(&ctx->enc, data);
1765                 return ret;
1766         }
1767
1768         dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1769                 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1770                 ctx->dec.sc_id, &ctx->dec.sc_phys);
1771
1772         return ret;
1773 }
1774
1775 static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
1776 {
1777         return sa_cra_init_aead(tfm, "sha1",
1778                                 "authenc(hmac(sha1-ce),cbc(aes-ce))");
1779 }
1780
1781 static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
1782 {
1783         return sa_cra_init_aead(tfm, "sha256",
1784                                 "authenc(hmac(sha256-ce),cbc(aes-ce))");
1785 }
1786
1787 static void sa_exit_tfm_aead(struct crypto_aead *tfm)
1788 {
1789         struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1790         struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1791
1792         crypto_free_shash(ctx->shash);
1793         crypto_free_aead(ctx->fallback.aead);
1794
1795         sa_free_ctx_info(&ctx->enc, data);
1796         sa_free_ctx_info(&ctx->dec, data);
1797 }
1798
1799 /* AEAD algorithm configuration interface function */
1800 static int sa_aead_setkey(struct crypto_aead *authenc,
1801                           const u8 *key, unsigned int keylen,
1802                           struct algo_data *ad)
1803 {
1804         struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
1805         struct crypto_authenc_keys keys;
1806         int cmdl_len;
1807         struct sa_cmdl_cfg cfg;
1808         int key_idx;
1809
1810         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1811                 return -EINVAL;
1812
1813         /* Convert the key size (16/24/32) to the key size index (0/1/2) */
1814         key_idx = (keys.enckeylen >> 3) - 2;
1815         if (key_idx >= 3)
1816                 return -EINVAL;
1817
1818         ad->ctx = ctx;
1819         ad->enc_eng.eng_id = SA_ENG_ID_EM1;
1820         ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1821         ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1822         ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1823         ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
1824         ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
1825         ad->inv_key = true;
1826         ad->keyed_mac = true;
1827         ad->ealg_id = SA_EALG_ID_AES_CBC;
1828         ad->prep_iopad = sa_prepare_iopads;
1829
1830         memset(&cfg, 0, sizeof(cfg));
1831         cfg.enc = true;
1832         cfg.aalg = ad->aalg_id;
1833         cfg.enc_eng_id = ad->enc_eng.eng_id;
1834         cfg.auth_eng_id = ad->auth_eng.eng_id;
1835         cfg.iv_size = crypto_aead_ivsize(authenc);
1836         cfg.akey = keys.authkey;
1837         cfg.akey_len = keys.authkeylen;
1838
1839         /* Setup Encryption Security Context & Command label template */
1840         if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, keys.enckey,
1841                        keys.enckeylen, keys.authkey, keys.authkeylen,
1842                        ad, 1, &ctx->enc.epib[1]))
1843                 return -EINVAL;
1844
1845         cmdl_len = sa_format_cmdl_gen(&cfg,
1846                                       (u8 *)ctx->enc.cmdl,
1847                                       &ctx->enc.cmdl_upd_info);
1848         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1849                 return -EINVAL;
1850
1851         ctx->enc.cmdl_size = cmdl_len;
1852
1853         /* Setup Decryption Security Context & Command label template */
1854         if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, keys.enckey,
1855                        keys.enckeylen, keys.authkey, keys.authkeylen,
1856                        ad, 0, &ctx->dec.epib[1]))
1857                 return -EINVAL;
1858
1859         cfg.enc = false;
1860         cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
1861                                       &ctx->dec.cmdl_upd_info);
1862
1863         if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1864                 return -EINVAL;
1865
1866         ctx->dec.cmdl_size = cmdl_len;
1867
1868         crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
1869         crypto_aead_set_flags(ctx->fallback.aead,
1870                               crypto_aead_get_flags(authenc) &
1871                               CRYPTO_TFM_REQ_MASK);
1872         crypto_aead_setkey(ctx->fallback.aead, key, keylen);
1873
1874         return 0;
1875 }
1876
1877 static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1878 {
1879         struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
1880
1881         return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
1882 }
1883
1884 static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
1885                                    const u8 *key, unsigned int keylen)
1886 {
1887         struct algo_data ad = { 0 };
1888
1889         ad.ealg_id = SA_EALG_ID_AES_CBC;
1890         ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
1891         ad.hash_size = SHA1_DIGEST_SIZE;
1892         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1893
1894         return sa_aead_setkey(authenc, key, keylen, &ad);
1895 }
1896
1897 static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
1898                                      const u8 *key, unsigned int keylen)
1899 {
1900         struct algo_data ad = { 0 };
1901
1902         ad.ealg_id = SA_EALG_ID_AES_CBC;
1903         ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
1904         ad.hash_size = SHA256_DIGEST_SIZE;
1905         ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1906
1907         return sa_aead_setkey(authenc, key, keylen, &ad);
1908 }
1909
1910 static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
1911 {
1912         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1913         struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1914         struct sa_req sa_req = { 0 };
1915         size_t auth_size, enc_size;
1916
1917         enc_size = req->cryptlen;
1918         auth_size = req->assoclen + req->cryptlen;
1919
1920         if (!enc) {
1921                 enc_size -= crypto_aead_authsize(tfm);
1922                 auth_size -= crypto_aead_authsize(tfm);
1923         }
1924
1925         if (auth_size > SA_MAX_DATA_SZ ||
1926             (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
1927              auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
1928                 struct aead_request *subreq = aead_request_ctx(req);
1929                 int ret;
1930
1931                 aead_request_set_tfm(subreq, ctx->fallback.aead);
1932                 aead_request_set_callback(subreq, req->base.flags,
1933                                           req->base.complete, req->base.data);
1934                 aead_request_set_crypt(subreq, req->src, req->dst,
1935                                        req->cryptlen, req->iv);
1936                 aead_request_set_ad(subreq, req->assoclen);
1937
1938                 ret = enc ? crypto_aead_encrypt(subreq) :
1939                         crypto_aead_decrypt(subreq);
1940                 return ret;
1941         }
1942
1943         sa_req.enc_offset = req->assoclen;
1944         sa_req.enc_size = enc_size;
1945         sa_req.auth_size = auth_size;
1946         sa_req.size = auth_size;
1947         sa_req.enc_iv = iv;
1948         sa_req.type = CRYPTO_ALG_TYPE_AEAD;
1949         sa_req.enc = enc;
1950         sa_req.callback = sa_aead_dma_in_callback;
1951         sa_req.mdata_size = 52;
1952         sa_req.base = &req->base;
1953         sa_req.ctx = ctx;
1954         sa_req.src = req->src;
1955         sa_req.dst = req->dst;
1956
1957         return sa_run(&sa_req);
1958 }
1959
1960 /* AEAD algorithm encrypt interface function */
1961 static int sa_aead_encrypt(struct aead_request *req)
1962 {
1963         return sa_aead_run(req, req->iv, 1);
1964 }
1965
1966 /* AEAD algorithm decrypt interface function */
1967 static int sa_aead_decrypt(struct aead_request *req)
1968 {
1969         return sa_aead_run(req, req->iv, 0);
1970 }
1971
1972 static struct sa_alg_tmpl sa_algs[] = {
1973         [SA_ALG_CBC_AES] = {
1974                 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1975                 .alg.skcipher = {
1976                         .base.cra_name          = "cbc(aes)",
1977                         .base.cra_driver_name   = "cbc-aes-sa2ul",
1978                         .base.cra_priority      = 30000,
1979                         .base.cra_flags         = CRYPTO_ALG_TYPE_SKCIPHER |
1980                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
1981                                                   CRYPTO_ALG_ASYNC |
1982                                                   CRYPTO_ALG_NEED_FALLBACK,
1983                         .base.cra_blocksize     = AES_BLOCK_SIZE,
1984                         .base.cra_ctxsize       = sizeof(struct sa_tfm_ctx),
1985                         .base.cra_module        = THIS_MODULE,
1986                         .init                   = sa_cipher_cra_init,
1987                         .exit                   = sa_cipher_cra_exit,
1988                         .min_keysize            = AES_MIN_KEY_SIZE,
1989                         .max_keysize            = AES_MAX_KEY_SIZE,
1990                         .ivsize                 = AES_BLOCK_SIZE,
1991                         .setkey                 = sa_aes_cbc_setkey,
1992                         .encrypt                = sa_encrypt,
1993                         .decrypt                = sa_decrypt,
1994                 }
1995         },
1996         [SA_ALG_EBC_AES] = {
1997                 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1998                 .alg.skcipher = {
1999                         .base.cra_name          = "ecb(aes)",
2000                         .base.cra_driver_name   = "ecb-aes-sa2ul",
2001                         .base.cra_priority      = 30000,
2002                         .base.cra_flags         = CRYPTO_ALG_TYPE_SKCIPHER |
2003                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2004                                                   CRYPTO_ALG_ASYNC |
2005                                                   CRYPTO_ALG_NEED_FALLBACK,
2006                         .base.cra_blocksize     = AES_BLOCK_SIZE,
2007                         .base.cra_ctxsize       = sizeof(struct sa_tfm_ctx),
2008                         .base.cra_module        = THIS_MODULE,
2009                         .init                   = sa_cipher_cra_init,
2010                         .exit                   = sa_cipher_cra_exit,
2011                         .min_keysize            = AES_MIN_KEY_SIZE,
2012                         .max_keysize            = AES_MAX_KEY_SIZE,
2013                         .setkey                 = sa_aes_ecb_setkey,
2014                         .encrypt                = sa_encrypt,
2015                         .decrypt                = sa_decrypt,
2016                 }
2017         },
2018         [SA_ALG_CBC_DES3] = {
2019                 .type = CRYPTO_ALG_TYPE_SKCIPHER,
2020                 .alg.skcipher = {
2021                         .base.cra_name          = "cbc(des3_ede)",
2022                         .base.cra_driver_name   = "cbc-des3-sa2ul",
2023                         .base.cra_priority      = 30000,
2024                         .base.cra_flags         = CRYPTO_ALG_TYPE_SKCIPHER |
2025                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2026                                                   CRYPTO_ALG_ASYNC |
2027                                                   CRYPTO_ALG_NEED_FALLBACK,
2028                         .base.cra_blocksize     = DES_BLOCK_SIZE,
2029                         .base.cra_ctxsize       = sizeof(struct sa_tfm_ctx),
2030                         .base.cra_module        = THIS_MODULE,
2031                         .init                   = sa_cipher_cra_init,
2032                         .exit                   = sa_cipher_cra_exit,
2033                         .min_keysize            = 3 * DES_KEY_SIZE,
2034                         .max_keysize            = 3 * DES_KEY_SIZE,
2035                         .ivsize                 = DES_BLOCK_SIZE,
2036                         .setkey                 = sa_3des_cbc_setkey,
2037                         .encrypt                = sa_encrypt,
2038                         .decrypt                = sa_decrypt,
2039                 }
2040         },
2041         [SA_ALG_ECB_DES3] = {
2042                 .type = CRYPTO_ALG_TYPE_SKCIPHER,
2043                 .alg.skcipher = {
2044                         .base.cra_name          = "ecb(des3_ede)",
2045                         .base.cra_driver_name   = "ecb-des3-sa2ul",
2046                         .base.cra_priority      = 30000,
2047                         .base.cra_flags         = CRYPTO_ALG_TYPE_SKCIPHER |
2048                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2049                                                   CRYPTO_ALG_ASYNC |
2050                                                   CRYPTO_ALG_NEED_FALLBACK,
2051                         .base.cra_blocksize     = DES_BLOCK_SIZE,
2052                         .base.cra_ctxsize       = sizeof(struct sa_tfm_ctx),
2053                         .base.cra_module        = THIS_MODULE,
2054                         .init                   = sa_cipher_cra_init,
2055                         .exit                   = sa_cipher_cra_exit,
2056                         .min_keysize            = 3 * DES_KEY_SIZE,
2057                         .max_keysize            = 3 * DES_KEY_SIZE,
2058                         .setkey                 = sa_3des_ecb_setkey,
2059                         .encrypt                = sa_encrypt,
2060                         .decrypt                = sa_decrypt,
2061                 }
2062         },
2063         [SA_ALG_SHA1] = {
2064                 .type = CRYPTO_ALG_TYPE_AHASH,
2065                 .alg.ahash = {
2066                         .halg.base = {
2067                                 .cra_name       = "sha1",
2068                                 .cra_driver_name        = "sha1-sa2ul",
2069                                 .cra_priority   = 400,
2070                                 .cra_flags      = CRYPTO_ALG_TYPE_AHASH |
2071                                                   CRYPTO_ALG_ASYNC |
2072                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2073                                                   CRYPTO_ALG_NEED_FALLBACK,
2074                                 .cra_blocksize  = SHA1_BLOCK_SIZE,
2075                                 .cra_ctxsize    = sizeof(struct sa_tfm_ctx),
2076                                 .cra_module     = THIS_MODULE,
2077                                 .cra_init       = sa_sha1_cra_init,
2078                                 .cra_exit       = sa_sha_cra_exit,
2079                         },
2080                         .halg.digestsize        = SHA1_DIGEST_SIZE,
2081                         .halg.statesize         = sizeof(struct sa_sha_req_ctx) +
2082                                                   sizeof(struct sha1_state),
2083                         .init                   = sa_sha_init,
2084                         .update                 = sa_sha_update,
2085                         .final                  = sa_sha_final,
2086                         .finup                  = sa_sha_finup,
2087                         .digest                 = sa_sha_digest,
2088                         .export                 = sa_sha_export,
2089                         .import                 = sa_sha_import,
2090                 },
2091         },
2092         [SA_ALG_SHA256] = {
2093                 .type = CRYPTO_ALG_TYPE_AHASH,
2094                 .alg.ahash = {
2095                         .halg.base = {
2096                                 .cra_name       = "sha256",
2097                                 .cra_driver_name        = "sha256-sa2ul",
2098                                 .cra_priority   = 400,
2099                                 .cra_flags      = CRYPTO_ALG_TYPE_AHASH |
2100                                                   CRYPTO_ALG_ASYNC |
2101                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2102                                                   CRYPTO_ALG_NEED_FALLBACK,
2103                                 .cra_blocksize  = SHA256_BLOCK_SIZE,
2104                                 .cra_ctxsize    = sizeof(struct sa_tfm_ctx),
2105                                 .cra_module     = THIS_MODULE,
2106                                 .cra_init       = sa_sha256_cra_init,
2107                                 .cra_exit       = sa_sha_cra_exit,
2108                         },
2109                         .halg.digestsize        = SHA256_DIGEST_SIZE,
2110                         .halg.statesize         = sizeof(struct sa_sha_req_ctx) +
2111                                                   sizeof(struct sha256_state),
2112                         .init                   = sa_sha_init,
2113                         .update                 = sa_sha_update,
2114                         .final                  = sa_sha_final,
2115                         .finup                  = sa_sha_finup,
2116                         .digest                 = sa_sha_digest,
2117                         .export                 = sa_sha_export,
2118                         .import                 = sa_sha_import,
2119                 },
2120         },
2121         [SA_ALG_SHA512] = {
2122                 .type = CRYPTO_ALG_TYPE_AHASH,
2123                 .alg.ahash = {
2124                         .halg.base = {
2125                                 .cra_name       = "sha512",
2126                                 .cra_driver_name        = "sha512-sa2ul",
2127                                 .cra_priority   = 400,
2128                                 .cra_flags      = CRYPTO_ALG_TYPE_AHASH |
2129                                                   CRYPTO_ALG_ASYNC |
2130                                                   CRYPTO_ALG_KERN_DRIVER_ONLY |
2131                                                   CRYPTO_ALG_NEED_FALLBACK,
2132                                 .cra_blocksize  = SHA512_BLOCK_SIZE,
2133                                 .cra_ctxsize    = sizeof(struct sa_tfm_ctx),
2134                                 .cra_module     = THIS_MODULE,
2135                                 .cra_init       = sa_sha512_cra_init,
2136                                 .cra_exit       = sa_sha_cra_exit,
2137                         },
2138                         .halg.digestsize        = SHA512_DIGEST_SIZE,
2139                         .halg.statesize         = sizeof(struct sa_sha_req_ctx) +
2140                                                   sizeof(struct sha512_state),
2141                         .init                   = sa_sha_init,
2142                         .update                 = sa_sha_update,
2143                         .final                  = sa_sha_final,
2144                         .finup                  = sa_sha_finup,
2145                         .digest                 = sa_sha_digest,
2146                         .export                 = sa_sha_export,
2147                         .import                 = sa_sha_import,
2148                 },
2149         },
2150         [SA_ALG_AUTHENC_SHA1_AES] = {
2151                 .type   = CRYPTO_ALG_TYPE_AEAD,
2152                 .alg.aead = {
2153                         .base = {
2154                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2155                                 .cra_driver_name =
2156                                         "authenc(hmac(sha1),cbc(aes))-sa2ul",
2157                                 .cra_blocksize = AES_BLOCK_SIZE,
2158                                 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2159                                         CRYPTO_ALG_KERN_DRIVER_ONLY |
2160                                         CRYPTO_ALG_ASYNC |
2161                                         CRYPTO_ALG_NEED_FALLBACK,
2162                                 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2163                                 .cra_module = THIS_MODULE,
2164                                 .cra_priority = 3000,
2165                         },
2166                         .ivsize = AES_BLOCK_SIZE,
2167                         .maxauthsize = SHA1_DIGEST_SIZE,
2168
2169                         .init = sa_cra_init_aead_sha1,
2170                         .exit = sa_exit_tfm_aead,
2171                         .setkey = sa_aead_cbc_sha1_setkey,
2172                         .setauthsize = sa_aead_setauthsize,
2173                         .encrypt = sa_aead_encrypt,
2174                         .decrypt = sa_aead_decrypt,
2175                 },
2176         },
2177         [SA_ALG_AUTHENC_SHA256_AES] = {
2178                 .type   = CRYPTO_ALG_TYPE_AEAD,
2179                 .alg.aead = {
2180                         .base = {
2181                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2182                                 .cra_driver_name =
2183                                         "authenc(hmac(sha256),cbc(aes))-sa2ul",
2184                                 .cra_blocksize = AES_BLOCK_SIZE,
2185                                 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2186                                         CRYPTO_ALG_KERN_DRIVER_ONLY |
2187                                         CRYPTO_ALG_ASYNC |
2188                                         CRYPTO_ALG_NEED_FALLBACK,
2189                                 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2190                                 .cra_module = THIS_MODULE,
2191                                 .cra_alignmask = 0,
2192                                 .cra_priority = 3000,
2193                         },
2194                         .ivsize = AES_BLOCK_SIZE,
2195                         .maxauthsize = SHA256_DIGEST_SIZE,
2196
2197                         .init = sa_cra_init_aead_sha256,
2198                         .exit = sa_exit_tfm_aead,
2199                         .setkey = sa_aead_cbc_sha256_setkey,
2200                         .setauthsize = sa_aead_setauthsize,
2201                         .encrypt = sa_aead_encrypt,
2202                         .decrypt = sa_aead_decrypt,
2203                 },
2204         },
2205 };
2206
2207 /* Register the algorithms in crypto framework */
2208 static void sa_register_algos(struct sa_crypto_data *dev_data)
2209 {
2210         const struct sa_match_data *match_data = dev_data->match_data;
2211         struct device *dev = dev_data->dev;
2212         char *alg_name;
2213         u32 type;
2214         int i, err;
2215
2216         for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2217                 /* Skip unsupported algos */
2218                 if (!(match_data->supported_algos & BIT(i)))
2219                         continue;
2220
2221                 type = sa_algs[i].type;
2222                 if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
2223                         alg_name = sa_algs[i].alg.skcipher.base.cra_name;
2224                         err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
2225                 } else if (type == CRYPTO_ALG_TYPE_AHASH) {
2226                         alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
2227                         err = crypto_register_ahash(&sa_algs[i].alg.ahash);
2228                 } else if (type == CRYPTO_ALG_TYPE_AEAD) {
2229                         alg_name = sa_algs[i].alg.aead.base.cra_name;
2230                         err = crypto_register_aead(&sa_algs[i].alg.aead);
2231                 } else {
2232                         dev_err(dev,
2233                                 "un-supported crypto algorithm (%d)",
2234                                 sa_algs[i].type);
2235                         continue;
2236                 }
2237
2238                 if (err)
2239                         dev_err(dev, "Failed to register '%s'\n", alg_name);
2240                 else
2241                         sa_algs[i].registered = true;
2242         }
2243 }
2244
2245 /* Unregister the algorithms in crypto framework */
2246 static void sa_unregister_algos(const struct device *dev)
2247 {
2248         u32 type;
2249         int i;
2250
2251         for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2252                 type = sa_algs[i].type;
2253                 if (!sa_algs[i].registered)
2254                         continue;
2255                 if (type == CRYPTO_ALG_TYPE_SKCIPHER)
2256                         crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
2257                 else if (type == CRYPTO_ALG_TYPE_AHASH)
2258                         crypto_unregister_ahash(&sa_algs[i].alg.ahash);
2259                 else if (type == CRYPTO_ALG_TYPE_AEAD)
2260                         crypto_unregister_aead(&sa_algs[i].alg.aead);
2261
2262                 sa_algs[i].registered = false;
2263         }
2264 }
2265
2266 static int sa_init_mem(struct sa_crypto_data *dev_data)
2267 {
2268         struct device *dev = &dev_data->pdev->dev;
2269         /* Setup dma pool for security context buffers */
2270         dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
2271                                             SA_CTX_MAX_SZ, 64, 0);
2272         if (!dev_data->sc_pool) {
2273                 dev_err(dev, "Failed to create dma pool");
2274                 return -ENOMEM;
2275         }
2276
2277         return 0;
2278 }
2279
2280 static int sa_dma_init(struct sa_crypto_data *dd)
2281 {
2282         int ret;
2283         struct dma_slave_config cfg;
2284
2285         dd->dma_rx1 = NULL;
2286         dd->dma_tx = NULL;
2287         dd->dma_rx2 = NULL;
2288
2289         ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
2290         if (ret)
2291                 return ret;
2292
2293         dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
2294         if (IS_ERR(dd->dma_rx1))
2295                 return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx1),
2296                                      "Unable to request rx1 DMA channel\n");
2297
2298         dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
2299         if (IS_ERR(dd->dma_rx2)) {
2300                 ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
2301                                     "Unable to request rx2 DMA channel\n");
2302                 goto err_dma_rx2;
2303         }
2304
2305         dd->dma_tx = dma_request_chan(dd->dev, "tx");
2306         if (IS_ERR(dd->dma_tx)) {
2307                 ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_tx),
2308                                     "Unable to request tx DMA channel\n");
2309                 goto err_dma_tx;
2310         }
2311
2312         memzero_explicit(&cfg, sizeof(cfg));
2313
2314         cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2315         cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2316         cfg.src_maxburst = 4;
2317         cfg.dst_maxburst = 4;
2318
2319         ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
2320         if (ret) {
2321                 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2322                         ret);
2323                 goto err_dma_config;
2324         }
2325
2326         ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
2327         if (ret) {
2328                 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2329                         ret);
2330                 goto err_dma_config;
2331         }
2332
2333         ret = dmaengine_slave_config(dd->dma_tx, &cfg);
2334         if (ret) {
2335                 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
2336                         ret);
2337                 goto err_dma_config;
2338         }
2339
2340         return 0;
2341
2342 err_dma_config:
2343         dma_release_channel(dd->dma_tx);
2344 err_dma_tx:
2345         dma_release_channel(dd->dma_rx2);
2346 err_dma_rx2:
2347         dma_release_channel(dd->dma_rx1);
2348
2349         return ret;
2350 }
2351
2352 static int sa_link_child(struct device *dev, void *data)
2353 {
2354         struct device *parent = data;
2355
2356         device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
2357
2358         return 0;
2359 }
2360
2361 static struct sa_match_data am654_match_data = {
2362         .priv = 1,
2363         .priv_id = 1,
2364         .supported_algos = BIT(SA_ALG_CBC_AES) |
2365                            BIT(SA_ALG_EBC_AES) |
2366                            BIT(SA_ALG_CBC_DES3) |
2367                            BIT(SA_ALG_ECB_DES3) |
2368                            BIT(SA_ALG_SHA1) |
2369                            BIT(SA_ALG_SHA256) |
2370                            BIT(SA_ALG_SHA512) |
2371                            BIT(SA_ALG_AUTHENC_SHA1_AES) |
2372                            BIT(SA_ALG_AUTHENC_SHA256_AES),
2373 };
2374
2375 static struct sa_match_data am64_match_data = {
2376         .priv = 0,
2377         .priv_id = 0,
2378         .supported_algos = BIT(SA_ALG_CBC_AES) |
2379                            BIT(SA_ALG_EBC_AES) |
2380                            BIT(SA_ALG_SHA256) |
2381                            BIT(SA_ALG_SHA512) |
2382                            BIT(SA_ALG_AUTHENC_SHA256_AES),
2383 };
2384
2385 static const struct of_device_id of_match[] = {
2386         { .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, },
2387         { .compatible = "ti,am654-sa2ul", .data = &am654_match_data, },
2388         { .compatible = "ti,am64-sa2ul", .data = &am64_match_data, },
2389         { .compatible = "ti,am62-sa3ul", .data = &am64_match_data, },
2390         {},
2391 };
2392 MODULE_DEVICE_TABLE(of, of_match);
2393
2394 static int sa_ul_probe(struct platform_device *pdev)
2395 {
2396         struct device *dev = &pdev->dev;
2397         struct device_node *node = dev->of_node;
2398         static void __iomem *saul_base;
2399         struct sa_crypto_data *dev_data;
2400         u32 status, val;
2401         int ret;
2402
2403         dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
2404         if (!dev_data)
2405                 return -ENOMEM;
2406
2407         dev_data->match_data = of_device_get_match_data(dev);
2408         if (!dev_data->match_data)
2409                 return -ENODEV;
2410
2411         saul_base = devm_platform_ioremap_resource(pdev, 0);
2412         if (IS_ERR(saul_base))
2413                 return PTR_ERR(saul_base);
2414
2415         sa_k3_dev = dev;
2416         dev_data->dev = dev;
2417         dev_data->pdev = pdev;
2418         dev_data->base = saul_base;
2419         platform_set_drvdata(pdev, dev_data);
2420         dev_set_drvdata(sa_k3_dev, dev_data);
2421
2422         pm_runtime_enable(dev);
2423         ret = pm_runtime_resume_and_get(dev);
2424         if (ret < 0) {
2425                 dev_err(dev, "%s: failed to get sync: %d\n", __func__, ret);
2426                 pm_runtime_disable(dev);
2427                 return ret;
2428         }
2429
2430         sa_init_mem(dev_data);
2431         ret = sa_dma_init(dev_data);
2432         if (ret)
2433                 goto destroy_dma_pool;
2434
2435         spin_lock_init(&dev_data->scid_lock);
2436
2437         val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
2438               SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
2439               SA_EEC_TRNG_EN;
2440         status = readl_relaxed(saul_base + SA_ENGINE_STATUS);
2441         /* Only enable engines if all are not already enabled */
2442         if (val & ~status)
2443                 writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
2444
2445         sa_register_algos(dev_data);
2446
2447         ret = of_platform_populate(node, NULL, NULL, dev);
2448         if (ret)
2449                 goto release_dma;
2450
2451         device_for_each_child(dev, dev, sa_link_child);
2452
2453         return 0;
2454
2455 release_dma:
2456         sa_unregister_algos(dev);
2457
2458         dma_release_channel(dev_data->dma_rx2);
2459         dma_release_channel(dev_data->dma_rx1);
2460         dma_release_channel(dev_data->dma_tx);
2461
2462 destroy_dma_pool:
2463         dma_pool_destroy(dev_data->sc_pool);
2464
2465         pm_runtime_put_sync(dev);
2466         pm_runtime_disable(dev);
2467
2468         return ret;
2469 }
2470
2471 static int sa_ul_remove(struct platform_device *pdev)
2472 {
2473         struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
2474
2475         of_platform_depopulate(&pdev->dev);
2476
2477         sa_unregister_algos(&pdev->dev);
2478
2479         dma_release_channel(dev_data->dma_rx2);
2480         dma_release_channel(dev_data->dma_rx1);
2481         dma_release_channel(dev_data->dma_tx);
2482
2483         dma_pool_destroy(dev_data->sc_pool);
2484
2485         platform_set_drvdata(pdev, NULL);
2486
2487         pm_runtime_put_sync(&pdev->dev);
2488         pm_runtime_disable(&pdev->dev);
2489
2490         return 0;
2491 }
2492
2493 static struct platform_driver sa_ul_driver = {
2494         .probe = sa_ul_probe,
2495         .remove = sa_ul_remove,
2496         .driver = {
2497                    .name = "saul-crypto",
2498                    .of_match_table = of_match,
2499                    },
2500 };
2501 module_platform_driver(sa_ul_driver);
2502 MODULE_LICENSE("GPL v2");