2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <linux/dma-mapping.h>
57 #include "adf_accel_devices.h"
58 #include "adf_transport.h"
59 #include "adf_common_drv.h"
60 #include "qat_crypto.h"
61 #include "icp_qat_hw.h"
62 #include "icp_qat_fw.h"
63 #include "icp_qat_fw_la.h"
65 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
66 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
67 ICP_QAT_HW_CIPHER_NO_CONVERT, \
68 ICP_QAT_HW_CIPHER_ENCRYPT)
70 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
71 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
72 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
73 ICP_QAT_HW_CIPHER_DECRYPT)
75 static DEFINE_MUTEX(algs_lock);
76 static unsigned int active_devs;
84 struct qat_alg_buf_list {
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89 } __packed __aligned(64);
91 /* Common content descriptor */
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
105 struct qat_alg_aead_ctx {
106 struct qat_alg_cd *enc_cd;
107 struct qat_alg_cd *dec_cd;
108 dma_addr_t enc_cd_paddr;
109 dma_addr_t dec_cd_paddr;
110 struct icp_qat_fw_la_bulk_req enc_fw_req;
111 struct icp_qat_fw_la_bulk_req dec_fw_req;
112 struct crypto_shash *hash_tfm;
113 enum icp_qat_hw_auth_algo qat_hash_alg;
114 struct qat_crypto_instance *inst;
117 struct qat_alg_ablkcipher_ctx {
118 struct icp_qat_hw_cipher_algo_blk *enc_cd;
119 struct icp_qat_hw_cipher_algo_blk *dec_cd;
120 dma_addr_t enc_cd_paddr;
121 dma_addr_t dec_cd_paddr;
122 struct icp_qat_fw_la_bulk_req enc_fw_req;
123 struct icp_qat_fw_la_bulk_req dec_fw_req;
124 struct qat_crypto_instance *inst;
125 struct crypto_tfm *tfm;
126 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
129 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
131 switch (qat_hash_alg) {
132 case ICP_QAT_HW_AUTH_ALGO_SHA1:
133 return ICP_QAT_HW_SHA1_STATE1_SZ;
134 case ICP_QAT_HW_AUTH_ALGO_SHA256:
135 return ICP_QAT_HW_SHA256_STATE1_SZ;
136 case ICP_QAT_HW_AUTH_ALGO_SHA512:
137 return ICP_QAT_HW_SHA512_STATE1_SZ;
144 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
145 struct qat_alg_aead_ctx *ctx,
146 const uint8_t *auth_key,
147 unsigned int auth_keylen)
149 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
150 struct sha1_state sha1;
151 struct sha256_state sha256;
152 struct sha512_state sha512;
153 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
154 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
155 char ipad[block_size];
156 char opad[block_size];
157 __be32 *hash_state_out;
158 __be64 *hash512_state_out;
161 memset(ipad, 0, block_size);
162 memset(opad, 0, block_size);
163 shash->tfm = ctx->hash_tfm;
166 if (auth_keylen > block_size) {
167 int ret = crypto_shash_digest(shash, auth_key,
172 memcpy(opad, ipad, digest_size);
174 memcpy(ipad, auth_key, auth_keylen);
175 memcpy(opad, auth_key, auth_keylen);
178 for (i = 0; i < block_size; i++) {
179 char *ipad_ptr = ipad + i;
180 char *opad_ptr = opad + i;
185 if (crypto_shash_init(shash))
188 if (crypto_shash_update(shash, ipad, block_size))
191 hash_state_out = (__be32 *)hash->sha.state1;
192 hash512_state_out = (__be64 *)hash_state_out;
194 switch (ctx->qat_hash_alg) {
195 case ICP_QAT_HW_AUTH_ALGO_SHA1:
196 if (crypto_shash_export(shash, &sha1))
198 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
199 *hash_state_out = cpu_to_be32(*(sha1.state + i));
201 case ICP_QAT_HW_AUTH_ALGO_SHA256:
202 if (crypto_shash_export(shash, &sha256))
204 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
205 *hash_state_out = cpu_to_be32(*(sha256.state + i));
207 case ICP_QAT_HW_AUTH_ALGO_SHA512:
208 if (crypto_shash_export(shash, &sha512))
210 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
211 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
217 if (crypto_shash_init(shash))
220 if (crypto_shash_update(shash, opad, block_size))
223 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
224 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
225 hash512_state_out = (__be64 *)hash_state_out;
227 switch (ctx->qat_hash_alg) {
228 case ICP_QAT_HW_AUTH_ALGO_SHA1:
229 if (crypto_shash_export(shash, &sha1))
231 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
232 *hash_state_out = cpu_to_be32(*(sha1.state + i));
234 case ICP_QAT_HW_AUTH_ALGO_SHA256:
235 if (crypto_shash_export(shash, &sha256))
237 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
238 *hash_state_out = cpu_to_be32(*(sha256.state + i));
240 case ICP_QAT_HW_AUTH_ALGO_SHA512:
241 if (crypto_shash_export(shash, &sha512))
243 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
244 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
249 memzero_explicit(ipad, block_size);
250 memzero_explicit(opad, block_size);
254 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
257 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
258 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
259 header->comn_req_flags =
260 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
261 QAT_COMN_PTR_TYPE_SGL);
262 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
263 ICP_QAT_FW_LA_PARTIAL_NONE);
264 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
265 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
266 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
267 ICP_QAT_FW_LA_NO_PROTO);
268 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
269 ICP_QAT_FW_LA_NO_UPDATE_STATE);
272 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
274 struct crypto_authenc_keys *keys,
277 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
278 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
279 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
280 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
281 struct icp_qat_hw_auth_algo_blk *hash =
282 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
283 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
284 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
285 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
286 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
287 void *ptr = &req_tmpl->cd_ctrl;
288 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
289 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
292 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
293 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
294 hash->sha.inner_setup.auth_config.config =
295 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
296 ctx->qat_hash_alg, digestsize);
297 hash->sha.inner_setup.auth_counter.counter =
298 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
300 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
304 qat_alg_init_common_hdr(header);
305 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
306 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
307 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
308 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
309 ICP_QAT_FW_LA_RET_AUTH_RES);
310 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
311 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
312 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
313 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
315 /* Cipher CD config setup */
316 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
317 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
318 cipher_cd_ctrl->cipher_cfg_offset = 0;
319 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
320 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
321 /* Auth CD config setup */
322 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
323 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
324 hash_cd_ctrl->inner_res_sz = digestsize;
325 hash_cd_ctrl->final_sz = digestsize;
327 switch (ctx->qat_hash_alg) {
328 case ICP_QAT_HW_AUTH_ALGO_SHA1:
329 hash_cd_ctrl->inner_state1_sz =
330 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
331 hash_cd_ctrl->inner_state2_sz =
332 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
334 case ICP_QAT_HW_AUTH_ALGO_SHA256:
335 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
336 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
338 case ICP_QAT_HW_AUTH_ALGO_SHA512:
339 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
340 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
345 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
346 ((sizeof(struct icp_qat_hw_auth_setup) +
347 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
348 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
349 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
353 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
355 struct crypto_authenc_keys *keys,
358 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
359 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
360 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
361 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
362 struct icp_qat_hw_cipher_algo_blk *cipher =
363 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
364 sizeof(struct icp_qat_hw_auth_setup) +
365 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
366 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
367 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
368 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
369 void *ptr = &req_tmpl->cd_ctrl;
370 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
371 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
372 struct icp_qat_fw_la_auth_req_params *auth_param =
373 (struct icp_qat_fw_la_auth_req_params *)
374 ((char *)&req_tmpl->serv_specif_rqpars +
375 sizeof(struct icp_qat_fw_la_cipher_req_params));
378 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
379 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
380 hash->sha.inner_setup.auth_config.config =
381 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
384 hash->sha.inner_setup.auth_counter.counter =
385 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
387 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
391 qat_alg_init_common_hdr(header);
392 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
393 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
394 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
395 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
396 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
397 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
398 ICP_QAT_FW_LA_CMP_AUTH_RES);
399 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
400 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
402 /* Cipher CD config setup */
403 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
404 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
405 cipher_cd_ctrl->cipher_cfg_offset =
406 (sizeof(struct icp_qat_hw_auth_setup) +
407 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
408 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
409 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
411 /* Auth CD config setup */
412 hash_cd_ctrl->hash_cfg_offset = 0;
413 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
414 hash_cd_ctrl->inner_res_sz = digestsize;
415 hash_cd_ctrl->final_sz = digestsize;
417 switch (ctx->qat_hash_alg) {
418 case ICP_QAT_HW_AUTH_ALGO_SHA1:
419 hash_cd_ctrl->inner_state1_sz =
420 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
421 hash_cd_ctrl->inner_state2_sz =
422 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
424 case ICP_QAT_HW_AUTH_ALGO_SHA256:
425 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
426 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
428 case ICP_QAT_HW_AUTH_ALGO_SHA512:
429 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
430 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
436 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
437 ((sizeof(struct icp_qat_hw_auth_setup) +
438 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
439 auth_param->auth_res_sz = digestsize;
440 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
441 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
445 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
446 struct icp_qat_fw_la_bulk_req *req,
447 struct icp_qat_hw_cipher_algo_blk *cd,
448 const uint8_t *key, unsigned int keylen)
450 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
451 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
452 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
454 memcpy(cd->aes.key, key, keylen);
455 qat_alg_init_common_hdr(header);
456 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
457 cd_pars->u.s.content_desc_params_sz =
458 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
459 /* Cipher CD config setup */
460 cd_ctrl->cipher_key_sz = keylen >> 3;
461 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
462 cd_ctrl->cipher_cfg_offset = 0;
463 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
464 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
467 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
468 int alg, const uint8_t *key,
469 unsigned int keylen, int mode)
471 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
472 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
473 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
475 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
476 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
477 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
480 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
481 int alg, const uint8_t *key,
482 unsigned int keylen, int mode)
484 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
485 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
486 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
488 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
489 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
491 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
492 dec_cd->aes.cipher_config.val =
493 QAT_AES_HW_CONFIG_DEC(alg, mode);
495 dec_cd->aes.cipher_config.val =
496 QAT_AES_HW_CONFIG_ENC(alg, mode);
499 static int qat_alg_validate_key(int key_len, int *alg, int mode)
501 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
503 case AES_KEYSIZE_128:
504 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
506 case AES_KEYSIZE_192:
507 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
509 case AES_KEYSIZE_256:
510 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
517 case AES_KEYSIZE_128 << 1:
518 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
520 case AES_KEYSIZE_256 << 1:
521 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
530 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
531 unsigned int keylen, int mode)
533 struct crypto_authenc_keys keys;
536 if (crypto_authenc_extractkeys(&keys, key, keylen))
539 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
542 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
545 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
550 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
556 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
563 if (qat_alg_validate_key(keylen, &alg, mode))
566 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
567 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
570 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
574 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
577 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
582 dev = &GET_DEV(ctx->inst->accel_dev);
583 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
584 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
585 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
586 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
589 int node = get_current_node();
590 struct qat_crypto_instance *inst =
591 qat_crypto_get_instance_node(node);
596 dev = &GET_DEV(inst->accel_dev);
598 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
604 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
611 if (qat_alg_aead_init_sessions(tfm, key, keylen,
612 ICP_QAT_HW_CIPHER_CBC_MODE))
618 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
619 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
620 ctx->dec_cd, ctx->dec_cd_paddr);
623 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
624 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
625 ctx->enc_cd, ctx->enc_cd_paddr);
630 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
631 struct qat_crypto_request *qat_req)
633 struct device *dev = &GET_DEV(inst->accel_dev);
634 struct qat_alg_buf_list *bl = qat_req->buf.bl;
635 struct qat_alg_buf_list *blout = qat_req->buf.blout;
636 dma_addr_t blp = qat_req->buf.blp;
637 dma_addr_t blpout = qat_req->buf.bloutp;
638 size_t sz = qat_req->buf.sz;
639 size_t sz_out = qat_req->buf.sz_out;
642 for (i = 0; i < bl->num_bufs; i++)
643 dma_unmap_single(dev, bl->bufers[i].addr,
644 bl->bufers[i].len, DMA_BIDIRECTIONAL);
646 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
649 /* If out of place operation dma unmap only data */
650 int bufless = blout->num_bufs - blout->num_mapped_bufs;
652 for (i = bufless; i < blout->num_bufs; i++) {
653 dma_unmap_single(dev, blout->bufers[i].addr,
654 blout->bufers[i].len,
657 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
662 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
663 struct scatterlist *sgl,
664 struct scatterlist *sglout,
665 struct qat_crypto_request *qat_req)
667 struct device *dev = &GET_DEV(inst->accel_dev);
669 int n = sg_nents(sgl);
670 struct qat_alg_buf_list *bufl;
671 struct qat_alg_buf_list *buflout = NULL;
673 dma_addr_t bloutp = 0;
674 struct scatterlist *sg;
675 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
676 ((1 + n) * sizeof(struct qat_alg_buf));
681 bufl = kzalloc_node(sz, GFP_ATOMIC,
682 dev_to_node(&GET_DEV(inst->accel_dev)));
686 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
687 if (unlikely(dma_mapping_error(dev, blp)))
690 for_each_sg(sgl, sg, n, i) {
696 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
699 bufl->bufers[y].len = sg->length;
700 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
704 bufl->num_bufs = sg_nctr;
705 qat_req->buf.bl = bufl;
706 qat_req->buf.blp = blp;
707 qat_req->buf.sz = sz;
708 /* Handle out of place operation */
710 struct qat_alg_buf *bufers;
712 n = sg_nents(sglout);
713 sz_out = sizeof(struct qat_alg_buf_list) +
714 ((1 + n) * sizeof(struct qat_alg_buf));
716 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
717 dev_to_node(&GET_DEV(inst->accel_dev)));
718 if (unlikely(!buflout))
720 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
721 if (unlikely(dma_mapping_error(dev, bloutp)))
723 bufers = buflout->bufers;
724 for_each_sg(sglout, sg, n, i) {
730 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
733 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
735 bufers[y].len = sg->length;
738 buflout->num_bufs = sg_nctr;
739 buflout->num_mapped_bufs = sg_nctr;
740 qat_req->buf.blout = buflout;
741 qat_req->buf.bloutp = bloutp;
742 qat_req->buf.sz_out = sz_out;
744 /* Otherwise set the src and dst to the same address */
745 qat_req->buf.bloutp = qat_req->buf.blp;
746 qat_req->buf.sz_out = 0;
750 dev_err(dev, "Failed to map buf for dma\n");
752 for (i = 0; i < n; i++)
753 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
754 dma_unmap_single(dev, bufl->bufers[i].addr,
758 if (!dma_mapping_error(dev, blp))
759 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
761 if (sgl != sglout && buflout) {
762 n = sg_nents(sglout);
763 for (i = 0; i < n; i++)
764 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
765 dma_unmap_single(dev, buflout->bufers[i].addr,
766 buflout->bufers[i].len,
768 if (!dma_mapping_error(dev, bloutp))
769 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
775 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
776 struct qat_crypto_request *qat_req)
778 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
779 struct qat_crypto_instance *inst = ctx->inst;
780 struct aead_request *areq = qat_req->aead_req;
781 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
782 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
784 qat_alg_free_bufl(inst, qat_req);
785 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
787 areq->base.complete(&areq->base, res);
790 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
791 struct qat_crypto_request *qat_req)
793 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
794 struct qat_crypto_instance *inst = ctx->inst;
795 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
796 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
797 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
799 qat_alg_free_bufl(inst, qat_req);
800 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
802 areq->base.complete(&areq->base, res);
805 void qat_alg_callback(void *resp)
807 struct icp_qat_fw_la_resp *qat_resp = resp;
808 struct qat_crypto_request *qat_req =
809 (void *)(__force long)qat_resp->opaque_data;
811 qat_req->cb(qat_resp, qat_req);
814 static int qat_alg_aead_dec(struct aead_request *areq)
816 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
817 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
818 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
819 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
820 struct icp_qat_fw_la_cipher_req_params *cipher_param;
821 struct icp_qat_fw_la_auth_req_params *auth_param;
822 struct icp_qat_fw_la_bulk_req *msg;
823 int digst_size = crypto_aead_authsize(aead_tfm);
826 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
831 *msg = ctx->dec_fw_req;
832 qat_req->aead_ctx = ctx;
833 qat_req->aead_req = areq;
834 qat_req->cb = qat_aead_alg_callback;
835 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
836 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
837 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
838 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
839 cipher_param->cipher_length = areq->cryptlen - digst_size;
840 cipher_param->cipher_offset = areq->assoclen;
841 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
842 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
843 auth_param->auth_off = 0;
844 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
846 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
847 } while (ret == -EAGAIN && ctr++ < 10);
849 if (ret == -EAGAIN) {
850 qat_alg_free_bufl(ctx->inst, qat_req);
856 static int qat_alg_aead_enc(struct aead_request *areq)
858 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
859 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
860 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
861 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
862 struct icp_qat_fw_la_cipher_req_params *cipher_param;
863 struct icp_qat_fw_la_auth_req_params *auth_param;
864 struct icp_qat_fw_la_bulk_req *msg;
865 uint8_t *iv = areq->iv;
868 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
873 *msg = ctx->enc_fw_req;
874 qat_req->aead_ctx = ctx;
875 qat_req->aead_req = areq;
876 qat_req->cb = qat_aead_alg_callback;
877 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
878 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
879 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
880 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
881 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
883 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
884 cipher_param->cipher_length = areq->cryptlen;
885 cipher_param->cipher_offset = areq->assoclen;
887 auth_param->auth_off = 0;
888 auth_param->auth_len = areq->assoclen + areq->cryptlen;
891 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
892 } while (ret == -EAGAIN && ctr++ < 10);
894 if (ret == -EAGAIN) {
895 qat_alg_free_bufl(ctx->inst, qat_req);
901 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
902 const u8 *key, unsigned int keylen,
905 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
908 spin_lock(&ctx->lock);
911 dev = &GET_DEV(ctx->inst->accel_dev);
912 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
913 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
914 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
915 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
918 int node = get_current_node();
919 struct qat_crypto_instance *inst =
920 qat_crypto_get_instance_node(node);
922 spin_unlock(&ctx->lock);
926 dev = &GET_DEV(inst->accel_dev);
928 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
932 spin_unlock(&ctx->lock);
935 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
939 spin_unlock(&ctx->lock);
943 spin_unlock(&ctx->lock);
944 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
950 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
951 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
952 ctx->dec_cd, ctx->dec_cd_paddr);
955 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
956 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
957 ctx->enc_cd, ctx->enc_cd_paddr);
962 static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
963 const u8 *key, unsigned int keylen)
965 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
966 ICP_QAT_HW_CIPHER_CBC_MODE);
969 static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
970 const u8 *key, unsigned int keylen)
972 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
973 ICP_QAT_HW_CIPHER_CTR_MODE);
976 static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
977 const u8 *key, unsigned int keylen)
979 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
980 ICP_QAT_HW_CIPHER_XTS_MODE);
983 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
985 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
986 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
987 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
988 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
989 struct icp_qat_fw_la_cipher_req_params *cipher_param;
990 struct icp_qat_fw_la_bulk_req *msg;
993 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
998 *msg = ctx->enc_fw_req;
999 qat_req->ablkcipher_ctx = ctx;
1000 qat_req->ablkcipher_req = req;
1001 qat_req->cb = qat_ablkcipher_alg_callback;
1002 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1003 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1004 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1005 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1006 cipher_param->cipher_length = req->nbytes;
1007 cipher_param->cipher_offset = 0;
1008 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1010 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1011 } while (ret == -EAGAIN && ctr++ < 10);
1013 if (ret == -EAGAIN) {
1014 qat_alg_free_bufl(ctx->inst, qat_req);
1017 return -EINPROGRESS;
1020 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1022 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1023 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1024 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1025 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1026 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1027 struct icp_qat_fw_la_bulk_req *msg;
1030 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1034 msg = &qat_req->req;
1035 *msg = ctx->dec_fw_req;
1036 qat_req->ablkcipher_ctx = ctx;
1037 qat_req->ablkcipher_req = req;
1038 qat_req->cb = qat_ablkcipher_alg_callback;
1039 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1040 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1041 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1042 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1043 cipher_param->cipher_length = req->nbytes;
1044 cipher_param->cipher_offset = 0;
1045 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1047 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1048 } while (ret == -EAGAIN && ctr++ < 10);
1050 if (ret == -EAGAIN) {
1051 qat_alg_free_bufl(ctx->inst, qat_req);
1054 return -EINPROGRESS;
1057 static int qat_alg_aead_init(struct crypto_aead *tfm,
1058 enum icp_qat_hw_auth_algo hash,
1059 const char *hash_name)
1061 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1063 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1064 if (IS_ERR(ctx->hash_tfm))
1065 return PTR_ERR(ctx->hash_tfm);
1066 ctx->qat_hash_alg = hash;
1067 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1071 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1073 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1076 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1078 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1081 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1083 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1086 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1088 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1089 struct qat_crypto_instance *inst = ctx->inst;
1092 crypto_free_shash(ctx->hash_tfm);
1097 dev = &GET_DEV(inst->accel_dev);
1099 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1100 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1101 ctx->enc_cd, ctx->enc_cd_paddr);
1104 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1105 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1106 ctx->dec_cd, ctx->dec_cd_paddr);
1108 qat_crypto_put_instance(inst);
1111 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1113 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1115 spin_lock_init(&ctx->lock);
1116 tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1121 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1123 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1124 struct qat_crypto_instance *inst = ctx->inst;
1130 dev = &GET_DEV(inst->accel_dev);
1132 memset(ctx->enc_cd, 0,
1133 sizeof(struct icp_qat_hw_cipher_algo_blk));
1134 dma_free_coherent(dev,
1135 sizeof(struct icp_qat_hw_cipher_algo_blk),
1136 ctx->enc_cd, ctx->enc_cd_paddr);
1139 memset(ctx->dec_cd, 0,
1140 sizeof(struct icp_qat_hw_cipher_algo_blk));
1141 dma_free_coherent(dev,
1142 sizeof(struct icp_qat_hw_cipher_algo_blk),
1143 ctx->dec_cd, ctx->dec_cd_paddr);
1145 qat_crypto_put_instance(inst);
1149 static struct aead_alg qat_aeads[] = { {
1151 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1152 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1153 .cra_priority = 4001,
1154 .cra_flags = CRYPTO_ALG_ASYNC,
1155 .cra_blocksize = AES_BLOCK_SIZE,
1156 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1157 .cra_module = THIS_MODULE,
1159 .init = qat_alg_aead_sha1_init,
1160 .exit = qat_alg_aead_exit,
1161 .setkey = qat_alg_aead_setkey,
1162 .decrypt = qat_alg_aead_dec,
1163 .encrypt = qat_alg_aead_enc,
1164 .ivsize = AES_BLOCK_SIZE,
1165 .maxauthsize = SHA1_DIGEST_SIZE,
1168 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1169 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1170 .cra_priority = 4001,
1171 .cra_flags = CRYPTO_ALG_ASYNC,
1172 .cra_blocksize = AES_BLOCK_SIZE,
1173 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1174 .cra_module = THIS_MODULE,
1176 .init = qat_alg_aead_sha256_init,
1177 .exit = qat_alg_aead_exit,
1178 .setkey = qat_alg_aead_setkey,
1179 .decrypt = qat_alg_aead_dec,
1180 .encrypt = qat_alg_aead_enc,
1181 .ivsize = AES_BLOCK_SIZE,
1182 .maxauthsize = SHA256_DIGEST_SIZE,
1185 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1186 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1187 .cra_priority = 4001,
1188 .cra_flags = CRYPTO_ALG_ASYNC,
1189 .cra_blocksize = AES_BLOCK_SIZE,
1190 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1191 .cra_module = THIS_MODULE,
1193 .init = qat_alg_aead_sha512_init,
1194 .exit = qat_alg_aead_exit,
1195 .setkey = qat_alg_aead_setkey,
1196 .decrypt = qat_alg_aead_dec,
1197 .encrypt = qat_alg_aead_enc,
1198 .ivsize = AES_BLOCK_SIZE,
1199 .maxauthsize = SHA512_DIGEST_SIZE,
1202 static struct crypto_alg qat_algs[] = { {
1203 .cra_name = "cbc(aes)",
1204 .cra_driver_name = "qat_aes_cbc",
1205 .cra_priority = 4001,
1206 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1207 .cra_blocksize = AES_BLOCK_SIZE,
1208 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1210 .cra_type = &crypto_ablkcipher_type,
1211 .cra_module = THIS_MODULE,
1212 .cra_init = qat_alg_ablkcipher_init,
1213 .cra_exit = qat_alg_ablkcipher_exit,
1216 .setkey = qat_alg_ablkcipher_cbc_setkey,
1217 .decrypt = qat_alg_ablkcipher_decrypt,
1218 .encrypt = qat_alg_ablkcipher_encrypt,
1219 .min_keysize = AES_MIN_KEY_SIZE,
1220 .max_keysize = AES_MAX_KEY_SIZE,
1221 .ivsize = AES_BLOCK_SIZE,
1225 .cra_name = "ctr(aes)",
1226 .cra_driver_name = "qat_aes_ctr",
1227 .cra_priority = 4001,
1228 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1229 .cra_blocksize = AES_BLOCK_SIZE,
1230 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1232 .cra_type = &crypto_ablkcipher_type,
1233 .cra_module = THIS_MODULE,
1234 .cra_init = qat_alg_ablkcipher_init,
1235 .cra_exit = qat_alg_ablkcipher_exit,
1238 .setkey = qat_alg_ablkcipher_ctr_setkey,
1239 .decrypt = qat_alg_ablkcipher_decrypt,
1240 .encrypt = qat_alg_ablkcipher_encrypt,
1241 .min_keysize = AES_MIN_KEY_SIZE,
1242 .max_keysize = AES_MAX_KEY_SIZE,
1243 .ivsize = AES_BLOCK_SIZE,
1247 .cra_name = "xts(aes)",
1248 .cra_driver_name = "qat_aes_xts",
1249 .cra_priority = 4001,
1250 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1251 .cra_blocksize = AES_BLOCK_SIZE,
1252 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1254 .cra_type = &crypto_ablkcipher_type,
1255 .cra_module = THIS_MODULE,
1256 .cra_init = qat_alg_ablkcipher_init,
1257 .cra_exit = qat_alg_ablkcipher_exit,
1260 .setkey = qat_alg_ablkcipher_xts_setkey,
1261 .decrypt = qat_alg_ablkcipher_decrypt,
1262 .encrypt = qat_alg_ablkcipher_encrypt,
1263 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1264 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1265 .ivsize = AES_BLOCK_SIZE,
1270 int qat_algs_register(void)
1274 mutex_lock(&algs_lock);
1275 if (++active_devs != 1)
1278 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1279 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1281 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1285 for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1286 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
1288 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1293 mutex_unlock(&algs_lock);
1297 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1301 void qat_algs_unregister(void)
1303 mutex_lock(&algs_lock);
1304 if (--active_devs != 0)
1307 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1308 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1311 mutex_unlock(&algs_lock);