1 /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /* lib/crypto/nss/enc_provider/enc_gen.c */
4 * Copyright (c) 2010 Red Hat, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
19 * * Neither the name of Red Hat, Inc., nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
24 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
26 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
27 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
28 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
29 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include "crypto_int.h"
43 /* 512 bits is bigger than anything defined to date */
44 #define MAX_KEY_LENGTH 64
45 #define MAX_BLOCK_SIZE 64
47 static NSSInitContext *k5_nss_ctx = NULL;
48 static pid_t k5_nss_pid = 0;
49 static k5_mutex_t k5_nss_lock = K5_MUTEX_PARTIAL_INITIALIZER;
52 struct stream_state *loopback; /* To detect copying */
53 pid_t pid; /* To detect use across fork */
58 pid_t pid; /* To detect use across fork */
63 k5_nss_map_error(int nss_error)
65 /* Currently KRB5 does not define a full set of CRYPTO failures.
66 * for now just use KRB5_CRYPTO_INTERNAL. We really should return
67 * errors for Not logged in, and maybe a few others. */
68 return KRB5_CRYPTO_INTERNAL;
72 k5_nss_map_last_error(void)
74 return k5_nss_map_error(PORT_GetError());
78 krb5int_crypto_impl_init(void)
80 return k5_mutex_finish_init(&k5_nss_lock);
84 krb5int_crypto_impl_cleanup(void)
86 k5_mutex_destroy(&k5_nss_lock);
90 * krb5 doesn't have a call into the crypto engine to initialize it, so we do
91 * it here. This code will try to piggyback on any application initialization
92 * done to NSS. Otherwise get our one library init context.
94 #define NSS_KRB5_CONFIGDIR "sql:/etc/pki/nssdb"
98 PRUint32 flags = NSS_INIT_READONLY | NSS_INIT_NOROOTINIT;
103 ret = k5_mutex_lock(&k5_nss_lock);
108 if (k5_nss_ctx != NULL) {
109 /* Do nothing if the existing context is still good. */
110 if (k5_nss_pid == pid)
112 /* The caller has forked. Restart the NSS modules. This will
113 * invalidate all of our PKCS11 handles, which we're prepared for. */
114 rv = SECMOD_RestartModules(TRUE);
115 if (rv != SECSuccess) {
116 ret = k5_nss_map_last_error();
122 k5_nss_ctx = NSS_InitContext(NSS_KRB5_CONFIGDIR, "", "", "", NULL, flags);
123 if (k5_nss_ctx == NULL) {
124 /* There may be no system database; try again without it. */
125 flags |= NSS_INIT_NOMODDB | NSS_INIT_NOCERTDB;
126 k5_nss_ctx = NSS_InitContext(NULL, "", "", "", NULL, flags);
127 if (k5_nss_ctx == NULL) {
128 ret = k5_nss_map_last_error();
135 k5_mutex_unlock(&k5_nss_lock);
140 k5_nss_create_context(krb5_key krb_key, CK_MECHANISM_TYPE mechanism,
141 CK_ATTRIBUTE_TYPE operation, SECItem *param)
143 struct cached_key *ckey = krb_key->cache;
145 return PK11_CreateContextBySymKey(mechanism, operation, ckey->symkey,
150 xor(unsigned char *x, unsigned char *y, int size)
154 #define ALIGNED(x,type) (!(((size_t)(x))&(sizeof(type)-1)))
155 if (ALIGNED(x,unsigned long) && ALIGNED(y, unsigned long)
156 && ALIGNED(size, unsigned long)) {
157 unsigned long *ux = (unsigned long *)x;
158 unsigned long *uy = (unsigned long *)y;
159 for (i=0; i < (int)(size/sizeof(unsigned long)); i++) {
164 for (i=0; i < size; i++) {
170 k5_nss_gen_block_iov(krb5_key krb_key, CK_MECHANISM_TYPE mech,
171 CK_ATTRIBUTE_TYPE operation, const krb5_data *ivec,
172 krb5_crypto_iov *data, size_t num_data)
174 krb5_error_code ret = 0;
175 PK11Context *ctx = NULL;
177 SECItem *param = NULL;
178 struct iov_block_state input_pos, output_pos;
179 unsigned char storage[MAX_BLOCK_SIZE];
180 unsigned char iv0[MAX_BLOCK_SIZE];
181 unsigned char *ptr = NULL,*lastptr = NULL;
188 IOV_BLOCK_STATE_INIT(&input_pos);
189 IOV_BLOCK_STATE_INIT(&output_pos);
191 blocksize = PK11_GetBlockSize(mech, NULL);
192 assert(blocksize <= sizeof(storage));
194 if (ivec && ivec->data) {
195 iv.data = (unsigned char *)ivec->data;
196 iv.len = ivec->length;
197 if (operation == CKA_DECRYPT) {
200 /* Count the blocks so we know which block is last. */
201 for (i = 0, inputlength = 0; i < (int)num_data; i++) {
202 krb5_crypto_iov *iov = &data[i];
204 if (ENCRYPT_IOV(iov))
205 inputlength += iov->data.length;
207 lastblock = (inputlength/blocksize) -1;
210 memset(iv0, 0, sizeof(iv0));
214 param = PK11_ParamFromIV(mech, &iv);
216 ctx = k5_nss_create_context(krb_key, mech, operation, param);
218 ret = k5_nss_map_last_error();
222 for (currentblock = 0;;currentblock++) {
223 if (!krb5int_c_iov_get_block_nocopy(storage, blocksize, data, num_data,
229 /* only set if we are decrypting */
230 if (lastblock == currentblock)
231 memcpy(ivec->data, ptr, blocksize);
233 rv = PK11_CipherOp(ctx, ptr, &length, blocksize, ptr, blocksize);
234 if (rv != SECSuccess) {
235 ret = k5_nss_map_last_error();
240 krb5int_c_iov_put_block_nocopy(data, num_data, storage, blocksize,
244 if (lastptr && ivec && ivec->data && operation == CKA_ENCRYPT) {
245 memcpy(ivec->data, lastptr, blocksize);
250 PK11_DestroyContext(ctx, PR_TRUE);
253 SECITEM_FreeItem(param, PR_TRUE);
258 k5_nss_stream_init_state(krb5_data *new_state)
260 struct stream_state *sstate;
262 /* Create a state structure with an uninitialized context. */
263 sstate = calloc(1, sizeof(*sstate));
266 sstate->loopback = NULL;
267 new_state->data = (char *) sstate;
268 new_state->length = sizeof(*sstate);
273 k5_nss_stream_free_state(krb5_data *state)
275 struct stream_state *sstate = (struct stream_state *) state->data;
277 /* Clean up the OpenSSL context if it was initialized. */
278 if (sstate && sstate->loopback == sstate) {
279 PK11_Finalize(sstate->ctx);
280 PK11_DestroyContext(sstate->ctx, PR_TRUE);
286 k5_nss_gen_stream_iov(krb5_key krb_key, krb5_data *state,
287 CK_MECHANISM_TYPE mech, CK_ATTRIBUTE_TYPE operation,
288 krb5_crypto_iov *data, size_t num_data)
291 PK11Context *ctx = NULL;
294 krb5_crypto_iov *iov;
295 struct stream_state *sstate = NULL;
301 sstate = (state == NULL) ? NULL : (struct stream_state *) state->data;
302 if (sstate == NULL || sstate->loopback == NULL) {
303 ctx = k5_nss_create_context(krb_key, mech, operation, ¶m);
305 ret = k5_nss_map_last_error();
309 sstate->loopback = sstate;
310 sstate->pid = getpid();
314 /* Cipher state can't be copied or used across a fork. */
315 if (sstate->loopback != sstate || sstate->pid != getpid())
320 for (i=0; i < (int)num_data; i++) {
323 if (iov->data.length <= 0)
326 if (ENCRYPT_IOV(iov)) {
327 rv = PK11_CipherOp(ctx, (unsigned char *)iov->data.data,
328 &return_length, iov->data.length,
329 (unsigned char *)iov->data.data,
331 if (rv != SECSuccess) {
332 ret = k5_nss_map_last_error();
335 iov->data.length = return_length;
341 PK11_DestroyContext(ctx, PR_TRUE);
347 k5_nss_gen_cts_iov(krb5_key krb_key, CK_MECHANISM_TYPE mech,
348 CK_ATTRIBUTE_TYPE operation, const krb5_data *ivec,
349 krb5_crypto_iov *data, size_t num_data)
351 krb5_error_code ret = 0;
352 PK11Context *ctx = NULL;
354 SECItem *param = NULL;
355 struct iov_block_state input_pos, output_pos;
356 unsigned char storage[MAX_BLOCK_SIZE];
357 unsigned char recover1[MAX_BLOCK_SIZE];
358 unsigned char recover2[MAX_BLOCK_SIZE];
359 unsigned char block1[MAX_BLOCK_SIZE];
360 unsigned char block2[MAX_BLOCK_SIZE];
361 unsigned char iv0[MAX_BLOCK_SIZE];
362 unsigned char *ptr = NULL;
365 size_t bulk_length, remainder;
366 size_t input_length, lastblock;
370 IOV_BLOCK_STATE_INIT(&input_pos);
371 IOV_BLOCK_STATE_INIT(&output_pos);
373 blocksize = PK11_GetBlockSize(mech, NULL);
374 assert(blocksize <= sizeof(storage));
377 iv.data = (unsigned char *)ivec->data;
378 iv.len = ivec->length;
380 memset(iv0, 0, sizeof(iv0));
384 param = PK11_ParamFromIV(mech, &iv);
386 for (i = 0, input_length = 0; i < (int)num_data; i++) {
387 krb5_crypto_iov *iov = &data[i];
389 if (ENCRYPT_IOV(iov))
390 input_length += iov->data.length;
392 /* Must be at least a block or we fail. */
393 if (input_length < blocksize) {
398 bulk_length = (input_length / blocksize)*blocksize;
399 remainder = input_length - bulk_length;
400 /* Do the block swap even if the input data is aligned, only
401 * drop it if we are encrypting exactly one block. */
402 if (remainder == 0 && bulk_length != blocksize) {
403 remainder = blocksize;
404 bulk_length -= blocksize;
407 ctx = k5_nss_create_context(krb_key, mech, operation, param);
409 ret = k5_nss_map_last_error();
413 /* Now we bulk encrypt each block in the loop. We need to know where to
414 * stop to do special processing. For single block operations we stop at
415 * the end. For all others we stop and the last second to last block
416 * (counting partial blocks). For decrypt operations we need to save cn-2
417 * so we stop at the third to last block if it exists, otherwise cn-2 = the
419 lastblock = bulk_length;
421 /* We need to process the last full block and last partitial block
423 lastblock = bulk_length - blocksize;
424 if (operation == CKA_DECRYPT) {
425 if (bulk_length > blocksize) {
426 /* Stop at cn-2 so we can save it before going on. */
427 lastblock = bulk_length - 2*blocksize;
429 /* iv is cn-2, save it now, cn - 2. */
430 memcpy(recover1, iv.data, blocksize);
431 memcpy(recover2, iv.data, blocksize);
435 for (length = 0; length < lastblock; length += blocksize) {
436 if (!krb5int_c_iov_get_block_nocopy(storage, blocksize, data, num_data,
440 rv = PK11_CipherOp(ctx, ptr, &len, blocksize, ptr, blocksize);
441 if (rv != SECSuccess) {
442 ret = k5_nss_map_last_error();
446 krb5int_c_iov_put_block_nocopy(data, num_data, storage, blocksize,
450 if (operation == CKA_DECRYPT) {
451 if (bulk_length > blocksize) {
452 /* we need to save cn-2 */
453 if (!krb5int_c_iov_get_block_nocopy(storage, blocksize, data,
454 num_data, &input_pos,
456 goto done; /* shouldn't happen */
459 memcpy(recover1, ptr, blocksize);
460 memcpy(recover2, ptr, blocksize);
462 /* now process it as normal */
463 rv = PK11_CipherOp(ctx, ptr, &len, blocksize, ptr, blocksize);
464 if (rv != SECSuccess) {
465 ret = k5_nss_map_last_error();
469 krb5int_c_iov_put_block_nocopy(data, num_data, storage,
470 blocksize, &output_pos, ptr);
473 /* fetch the last 2 blocks */
474 memset(block1, 0, blocksize); /* last block, could be partial */
475 krb5int_c_iov_get_block(block2, blocksize, data, num_data, &input_pos);
476 krb5int_c_iov_get_block(block1, remainder, data, num_data, &input_pos);
477 if (operation == CKA_DECRYPT) {
478 /* recover1 and recover2 are xor values to recover the true
479 * underlying data of the last 2 decrypts. This keeps us from
480 * having to try to reset our IV to do the final decryption. */
481 /* Currently: block1 is cn || 0, block2 is cn-1.
482 * recover1 & recover2 is set to cn-2. */
483 /* recover2 recovers pn || c' from p'n-1. The raw decrypted block
484 * will be p'n-1 xor with cn-2 while pn || c' = p'n-1 xor cn || 0.
485 * recover2 is cn-2 xor cn || 0, so we can simple xor recover1
486 * with the raw decrypted block. */
487 /* recover1 recovers pn-1 from the raw decryption of cn || c'.
488 * the raw decrypt of cn || c' = p'n xor cn-1 while
489 * pn-1 = p'n xor cn-2
490 * recover1 is cn-2 xor cn-1, so we can simple xor recover 2 with
491 * the raw decrypt of cn||c' to get pn-1. */
492 xor(recover1, block2, blocksize);
493 xor(recover2, block1, blocksize);
494 if (ivec && ivec->data)
495 memcpy(ivec->data, block2, blocksize);
497 rv = PK11_CipherOp(ctx, block2, &len, blocksize, block2, blocksize);
498 if (rv != SECSuccess) {
499 ret = k5_nss_map_last_error();
502 if (operation == CKA_DECRYPT) {
503 /* block2 now has p'n-1 xor cn-2. */
504 xor(block2, recover2, blocksize);
505 /* block 2 now has pn || c'. */
506 /* copy c' into cn || c'. */
507 memcpy(block1 + remainder, block2 + remainder,
508 blocksize - remainder);
510 rv = PK11_CipherOp(ctx, block1, &len, blocksize, block1, blocksize);
511 if (rv != SECSuccess) {
512 ret = k5_nss_map_last_error();
515 if (operation == CKA_DECRYPT) {
516 /* block1 now has p'n xor cn-1 */
517 xor(block1, recover1, blocksize);
518 /* block 1 now has pn-1 */
520 if (ivec && ivec->data) {
521 memcpy(ivec->data, block1, blocksize);
524 krb5int_c_iov_put_block(data,num_data, block1, blocksize, &output_pos);
525 krb5int_c_iov_put_block(data,num_data, block2, remainder, &output_pos);
531 PK11_DestroyContext(ctx, PR_TRUE);
534 SECITEM_FreeItem(param, PR_TRUE);
539 k5_nss_gen_cbcmac_iov(krb5_key krb_key, CK_MECHANISM_TYPE mech,
540 const krb5_data *ivec, const krb5_crypto_iov *data,
541 size_t num_data, krb5_data *output)
543 krb5_error_code ret = 0;
544 PK11Context *ctx = NULL;
546 SECItem *param = NULL;
547 struct iov_block_state input_pos, output_pos;
548 unsigned char block[MAX_BLOCK_SIZE], *lastblock;
549 unsigned char iv0[MAX_BLOCK_SIZE];
555 IOV_BLOCK_STATE_INIT(&input_pos);
556 IOV_BLOCK_STATE_INIT(&output_pos);
558 blocksize = PK11_GetBlockSize(mech, NULL);
559 assert(blocksize <= sizeof(block));
560 if (output->length < blocksize)
561 return KRB5_BAD_MSIZE;
563 if (ivec && ivec->data) {
564 iv.data = (unsigned char *)ivec->data;
565 iv.len = ivec->length;
567 memset(iv0, 0, sizeof(iv0));
571 param = PK11_ParamFromIV(mech, &iv);
573 ctx = k5_nss_create_context(krb_key, mech, CKA_ENCRYPT, param);
575 ret = k5_nss_map_last_error();
580 for (currentblock = 0;;currentblock++) {
581 if (!krb5int_c_iov_get_block(block, blocksize, data, num_data,
584 rv = PK11_CipherOp(ctx, block, &length, blocksize, block, blocksize);
585 if (rv != SECSuccess) {
586 ret = k5_nss_map_last_error();
591 memcpy(output->data, lastblock, blocksize);
596 PK11_DestroyContext(ctx, PR_TRUE);
599 SECITEM_FreeItem(param, PR_TRUE);
604 k5_nss_gen_cleanup(krb5_key krb_key)
606 struct cached_key *ckey = krb_key->cache;
609 PK11_FreeSymKey(ckey->symkey);
611 krb_key->cache = NULL;
616 k5_nss_gen_import(krb5_key krb_key, CK_MECHANISM_TYPE mech,
617 CK_ATTRIBUTE_TYPE operation)
619 krb5_error_code ret = 0;
620 pid_t pid = getpid();
621 struct cached_key *ckey = krb_key->cache;
623 PK11SlotInfo *slot = NULL;
626 PK11SymKey *wrapping_key = NULL;
627 PK11Context *ctx = NULL;
630 unsigned char wrapped_key_data[MAX_KEY_LENGTH];
631 unsigned char padded_key_data[MAX_KEY_LENGTH];
632 int wrapping_index, series, blocksize;
634 CK_MECHANISM_TYPE mechanism;
638 if (ckey && ckey->pid == pid)
646 /* Discard the no-longer-valid symkey and steal its container. */
647 PK11_FreeSymKey(ckey->symkey);
649 krb_key->cache = NULL;
651 /* Allocate a new container. */
652 ckey = k5alloc(sizeof(*ckey), &ret);
657 slot = PK11_GetBestSlot(mech, NULL);
659 ret = k5_nss_map_last_error();
662 raw_key.data = krb_key->keyblock.contents;
663 raw_key.len = krb_key->keyblock.length;
666 /* First, fetch a wrapping key. */
667 wrapping_index = PK11_GetCurrentWrapIndex(slot);
668 series = PK11_GetSlotSeries(slot);
669 wrapping_key = PK11_GetWrapKey(slot, wrapping_index,
670 CKM_INVALID_MECHANISM, series, NULL);
671 if (wrapping_key == NULL) {
672 /* One doesn't exist, create one. */
673 mechanism = PK11_GetBestWrapMechanism(slot);
674 keyLength = PK11_GetBestKeyLength(slot, mechanism);
675 wrapping_key = PK11_TokenKeyGenWithFlags(slot, mechanism, NULL,
677 CKF_UNWRAP | CKF_ENCRYPT, 0,
680 ret = k5_nss_map_last_error();
683 PK11_SetWrapKey(slot, wrapping_index, wrapping_key);
686 /* Now encrypt the data with the wrapping key. */
687 mechanism = PK11_GetMechanism(wrapping_key);
690 ctx = PK11_CreateContextBySymKey(mechanism, CKA_ENCRYPT,
691 wrapping_key, ¶ms);
693 ret = k5_nss_map_last_error();
697 wrapped_key.data = wrapped_key_data;
698 wrapped_key.len = sizeof(wrapped_key_data);
699 blocksize = PK11_GetBlockSize(mechanism, NULL);
700 keyLength = raw_key.len;
703 * ECB modes need keys in integral multiples of the block size.
704 * if the key isn't and integral multiple, pad it with zero. Unwrap
705 * will use the length parameter to appropriately set the key.
707 if ((raw_key.len % blocksize) != 0) {
708 int keyblocks = (raw_key.len + (blocksize - 1)) / blocksize;
709 keyLength = keyblocks * blocksize;
710 assert(keyLength <= sizeof(padded_key_data));
711 memset(padded_key_data, 0, keyLength);
712 memcpy(padded_key_data, raw_key.data, raw_key.len);
713 raw_key.data = padded_key_data;
715 rv = PK11_CipherOp(ctx, wrapped_key.data, (int *)&wrapped_key.len,
716 sizeof(wrapped_key_data), raw_key.data, keyLength);
717 if (keyLength != raw_key.len) {
718 /* Clear our copy of the key bits. */
719 memset(padded_key_data, 0, keyLength);
721 if (rv != SECSuccess) {
722 ret = k5_nss_map_last_error();
726 PK11_DestroyContext(ctx, PR_TRUE);
729 /* Now now we have a 'wrapped' version of the, we can import it into
730 * the token without running afoul with FIPS. */
731 symkey = PK11_UnwrapSymKey(wrapping_key, mechanism, ¶ms, &wrapped_key,
732 mech, operation, raw_key.len);
734 symkey = PK11_ImportSymKey(slot, mech, PK11_OriginGenerated, operation,
737 if (symkey == NULL) {
738 ret = k5_nss_map_last_error();
742 ckey->symkey = symkey;
743 krb_key->cache = ckey;
753 PK11_DestroyContext(ctx, PR_TRUE);
756 PK11_FreeSymKey(wrapping_key);