2 * Argon2 reference source code package - reference C implementations
5 * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
7 * You may use this work under the terms of a Creative Commons CC0 1.0
8 * License/Waiver or the Apache Public License 2.0, at your option. The terms of
9 * these licenses can be found at:
11 * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
12 * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
14 * You should have received a copy of both of these licenses along with this
15 * software. If not, they may be obtained at the above URLs.
21 #include <winbase.h> /* For SecureZeroMemory */
23 #if defined __STDC_LIB_EXT1__
24 #define __STDC_WANT_LIB_EXT1__ 1
26 #define VC_GE_2005(version) (version >= 1400)
34 #include "blake2/blake2.h"
35 #include "blake2/blake2-impl.h"
41 #if defined(__clang__)
42 #if __has_attribute(optnone)
43 #define NOT_OPTIMIZED __attribute__((optnone))
45 #elif defined(__GNUC__)
47 (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
48 #if GCC_VERSION >= 40400
49 #define NOT_OPTIMIZED __attribute__((optimize("O0")))
56 /***************Instance and Position constructors**********/
57 void init_block_value(block *b, uint8_t in) { memset(b->v, in, sizeof(b->v)); }
59 void copy_block(block *dst, const block *src) {
60 memcpy(dst->v, src->v, sizeof(uint64_t) * ARGON2_QWORDS_IN_BLOCK);
63 void xor_block(block *dst, const block *src) {
65 for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
66 dst->v[i] ^= src->v[i];
70 static void load_block(block *dst, const void *input) {
72 for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
73 dst->v[i] = load64((const uint8_t *)input + i * sizeof(dst->v[i]));
77 static void store_block(void *output, const block *src) {
79 for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
80 store64((uint8_t *)output + i * sizeof(src->v[i]), src->v[i]);
84 /***************Memory functions*****************/
86 int allocate_memory(const argon2_context *context, uint8_t **memory,
87 size_t num, size_t size) {
88 size_t memory_size = num*size;
90 return ARGON2_MEMORY_ALLOCATION_ERROR;
93 /* 1. Check for multiplication overflow */
94 if (size != 0 && memory_size / size != num) {
95 return ARGON2_MEMORY_ALLOCATION_ERROR;
98 /* 2. Try to allocate with appropriate allocator */
99 if (context->allocate_cbk) {
100 (context->allocate_cbk)(memory, memory_size);
102 *memory = malloc(memory_size);
105 if (*memory == NULL) {
106 return ARGON2_MEMORY_ALLOCATION_ERROR;
112 void free_memory(const argon2_context *context, uint8_t *memory,
113 size_t num, size_t size) {
114 size_t memory_size = num*size;
115 clear_internal_memory(memory, memory_size);
116 if (context->free_cbk) {
117 (context->free_cbk)(memory, memory_size);
123 void NOT_OPTIMIZED secure_wipe_memory(void *v, size_t n) {
124 #if defined(_MSC_VER) && VC_GE_2005(_MSC_VER)
125 SecureZeroMemory(v, n);
126 #elif defined memset_s
127 memset_s(v, n, 0, n);
128 #elif defined(HAVE_EXPLICIT_BZERO)
129 explicit_bzero(v, n);
131 static void *(*const volatile memset_sec)(void *, int, size_t) = &memset;
136 /* Memory clear flag defaults to true. */
137 int FLAG_clear_internal_memory = 1;
138 void clear_internal_memory(void *v, size_t n) {
139 if (FLAG_clear_internal_memory && v) {
140 secure_wipe_memory(v, n);
144 void finalize(const argon2_context *context, argon2_instance_t *instance) {
145 if (context != NULL && instance != NULL) {
149 copy_block(&blockhash, instance->memory + instance->lane_length - 1);
151 /* XOR the last blocks */
152 for (l = 1; l < instance->lanes; ++l) {
153 uint32_t last_block_in_lane =
154 l * instance->lane_length + (instance->lane_length - 1);
155 xor_block(&blockhash, instance->memory + last_block_in_lane);
158 /* Hash the result */
160 uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE];
161 store_block(blockhash_bytes, &blockhash);
162 blake2b_long(context->out, context->outlen, blockhash_bytes,
164 /* clear blockhash and blockhash_bytes */
165 clear_internal_memory(blockhash.v, ARGON2_BLOCK_SIZE);
166 clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
170 print_tag(context->out, context->outlen);
173 free_memory(context, (uint8_t *)instance->memory,
174 instance->memory_blocks, sizeof(block));
178 uint32_t index_alpha(const argon2_instance_t *instance,
179 const argon2_position_t *position, uint32_t pseudo_rand,
183 * This lane : all already finished segments plus already constructed
184 * blocks in this segment
185 * Other lanes : all already finished segments
187 * This lane : (SYNC_POINTS - 1) last segments plus already constructed
188 * blocks in this segment
189 * Other lanes : (SYNC_POINTS - 1) last segments
191 uint32_t reference_area_size;
192 uint64_t relative_position;
193 uint32_t start_position, absolute_position;
195 if (0 == position->pass) {
197 if (0 == position->slice) {
199 reference_area_size =
200 position->index - 1; /* all but the previous */
203 /* The same lane => add current segment */
204 reference_area_size =
205 position->slice * instance->segment_length +
208 reference_area_size =
209 position->slice * instance->segment_length +
210 ((position->index == 0) ? (-1) : 0);
216 reference_area_size = instance->lane_length -
217 instance->segment_length + position->index -
220 reference_area_size = instance->lane_length -
221 instance->segment_length +
222 ((position->index == 0) ? (-1) : 0);
226 /* 1.2.4. Mapping pseudo_rand to 0..<reference_area_size-1> and produce
227 * relative position */
228 relative_position = pseudo_rand;
229 relative_position = relative_position * relative_position >> 32;
230 relative_position = reference_area_size - 1 -
231 (reference_area_size * relative_position >> 32);
233 /* 1.2.5 Computing starting position */
236 if (0 != position->pass) {
237 start_position = (position->slice == ARGON2_SYNC_POINTS - 1)
239 : (position->slice + 1) * instance->segment_length;
242 /* 1.2.6. Computing absolute position */
243 absolute_position = (start_position + relative_position) %
244 instance->lane_length; /* absolute position */
245 return absolute_position;
248 /* Single-threaded version for p=1 case */
249 static int fill_memory_blocks_st(argon2_instance_t *instance) {
252 for (r = 0; r < instance->passes; ++r) {
253 for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
254 for (l = 0; l < instance->lanes; ++l) {
255 argon2_position_t position = {r, l, (uint8_t)s, 0};
256 fill_segment(instance, position);
260 internal_kat(instance, r); /* Print all memory blocks */
266 #if !defined(ARGON2_NO_THREADS)
269 static unsigned __stdcall fill_segment_thr(void *thread_data)
271 static void *fill_segment_thr(void *thread_data)
274 argon2_thread_data *my_data = thread_data;
275 fill_segment(my_data->instance_ptr, my_data->pos);
276 argon2_thread_exit();
280 /* Multi-threaded version for p > 1 case */
281 static int fill_memory_blocks_mt(argon2_instance_t *instance) {
283 argon2_thread_handle_t *thread = NULL;
284 argon2_thread_data *thr_data = NULL;
287 /* 1. Allocating space for threads */
288 thread = calloc(instance->lanes, sizeof(argon2_thread_handle_t));
289 if (thread == NULL) {
290 rc = ARGON2_MEMORY_ALLOCATION_ERROR;
294 thr_data = calloc(instance->lanes, sizeof(argon2_thread_data));
295 if (thr_data == NULL) {
296 rc = ARGON2_MEMORY_ALLOCATION_ERROR;
300 for (r = 0; r < instance->passes; ++r) {
301 for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
304 /* 2. Calling threads */
305 for (l = 0; l < instance->lanes; ++l) {
306 argon2_position_t position;
308 /* 2.1 Join a thread if limit is exceeded */
309 if (l >= instance->threads) {
310 if (argon2_thread_join(thread[l - instance->threads])) {
311 rc = ARGON2_THREAD_FAIL;
316 /* 2.2 Create thread */
319 position.slice = (uint8_t)s;
321 thr_data[l].instance_ptr =
322 instance; /* preparing the thread input */
323 memcpy(&(thr_data[l].pos), &position,
324 sizeof(argon2_position_t));
325 if (argon2_thread_create(&thread[l], &fill_segment_thr,
326 (void *)&thr_data[l])) {
327 /* Wait for already running threads */
328 for (ll = 0; ll < l; ++ll)
329 argon2_thread_join(thread[ll]);
330 rc = ARGON2_THREAD_FAIL;
334 /* fill_segment(instance, position); */
335 /*Non-thread equivalent of the lines above */
338 /* 3. Joining remaining threads */
339 for (l = instance->lanes - instance->threads; l < instance->lanes;
341 if (argon2_thread_join(thread[l])) {
342 rc = ARGON2_THREAD_FAIL;
349 internal_kat(instance, r); /* Print all memory blocks */
354 if (thread != NULL) {
357 if (thr_data != NULL) {
363 #endif /* ARGON2_NO_THREADS */
365 int fill_memory_blocks(argon2_instance_t *instance) {
366 if (instance == NULL || instance->lanes == 0) {
367 return ARGON2_INCORRECT_PARAMETER;
369 #if defined(ARGON2_NO_THREADS)
370 return fill_memory_blocks_st(instance);
372 return instance->threads == 1 ?
373 fill_memory_blocks_st(instance) : fill_memory_blocks_mt(instance);
377 int validate_inputs(const argon2_context *context) {
378 if (NULL == context) {
379 return ARGON2_INCORRECT_PARAMETER;
382 if (NULL == context->out) {
383 return ARGON2_OUTPUT_PTR_NULL;
386 /* Validate output length */
387 if (ARGON2_MIN_OUTLEN > context->outlen) {
388 return ARGON2_OUTPUT_TOO_SHORT;
391 if (ARGON2_MAX_OUTLEN < context->outlen) {
392 return ARGON2_OUTPUT_TOO_LONG;
395 /* Validate password (required param) */
396 if (NULL == context->pwd) {
397 if (0 != context->pwdlen) {
398 return ARGON2_PWD_PTR_MISMATCH;
401 #if ARGON2_MIN_PWD_LENGTH > 0 /* cryptsetup: fix gcc warning */
402 if (ARGON2_MIN_PWD_LENGTH > context->pwdlen) {
403 return ARGON2_PWD_TOO_SHORT;
406 if (ARGON2_MAX_PWD_LENGTH < context->pwdlen) {
407 return ARGON2_PWD_TOO_LONG;
410 /* Validate salt (required param) */
411 if (NULL == context->salt) {
412 if (0 != context->saltlen) {
413 return ARGON2_SALT_PTR_MISMATCH;
417 if (ARGON2_MIN_SALT_LENGTH > context->saltlen) {
418 return ARGON2_SALT_TOO_SHORT;
421 if (ARGON2_MAX_SALT_LENGTH < context->saltlen) {
422 return ARGON2_SALT_TOO_LONG;
425 /* Validate secret (optional param) */
426 if (NULL == context->secret) {
427 if (0 != context->secretlen) {
428 return ARGON2_SECRET_PTR_MISMATCH;
431 #if ARGON2_MIN_SECRET > 0 /* cryptsetup: fix gcc warning */
432 if (ARGON2_MIN_SECRET > context->secretlen) {
433 return ARGON2_SECRET_TOO_SHORT;
436 if (ARGON2_MAX_SECRET < context->secretlen) {
437 return ARGON2_SECRET_TOO_LONG;
441 /* Validate associated data (optional param) */
442 if (NULL == context->ad) {
443 if (0 != context->adlen) {
444 return ARGON2_AD_PTR_MISMATCH;
447 #if ARGON2_MIN_AD_LENGTH > 0 /* cryptsetup: fix gcc warning */
448 if (ARGON2_MIN_AD_LENGTH > context->adlen) {
449 return ARGON2_AD_TOO_SHORT;
452 if (ARGON2_MAX_AD_LENGTH < context->adlen) {
453 return ARGON2_AD_TOO_LONG;
457 /* Validate memory cost */
458 if (ARGON2_MIN_MEMORY > context->m_cost) {
459 return ARGON2_MEMORY_TOO_LITTLE;
461 #if 0 /* UINT32_MAX, cryptsetup: fix gcc warning */
462 if (ARGON2_MAX_MEMORY < context->m_cost) {
463 return ARGON2_MEMORY_TOO_MUCH;
466 if (context->m_cost < 8 * context->lanes) {
467 return ARGON2_MEMORY_TOO_LITTLE;
470 /* Validate time cost */
471 if (ARGON2_MIN_TIME > context->t_cost) {
472 return ARGON2_TIME_TOO_SMALL;
475 if (ARGON2_MAX_TIME < context->t_cost) {
476 return ARGON2_TIME_TOO_LARGE;
480 if (ARGON2_MIN_LANES > context->lanes) {
481 return ARGON2_LANES_TOO_FEW;
484 if (ARGON2_MAX_LANES < context->lanes) {
485 return ARGON2_LANES_TOO_MANY;
488 /* Validate threads */
489 if (ARGON2_MIN_THREADS > context->threads) {
490 return ARGON2_THREADS_TOO_FEW;
493 if (ARGON2_MAX_THREADS < context->threads) {
494 return ARGON2_THREADS_TOO_MANY;
497 if (NULL != context->allocate_cbk && NULL == context->free_cbk) {
498 return ARGON2_FREE_MEMORY_CBK_NULL;
501 if (NULL == context->allocate_cbk && NULL != context->free_cbk) {
502 return ARGON2_ALLOCATE_MEMORY_CBK_NULL;
508 void fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance) {
510 /* Make the first and second block in each lane as G(H0||0||i) or
512 uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE];
513 for (l = 0; l < instance->lanes; ++l) {
515 store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 0);
516 store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH + 4, l);
517 blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
518 ARGON2_PREHASH_SEED_LENGTH);
519 load_block(&instance->memory[l * instance->lane_length + 0],
522 store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 1);
523 blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
524 ARGON2_PREHASH_SEED_LENGTH);
525 load_block(&instance->memory[l * instance->lane_length + 1],
528 clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
531 void initial_hash(uint8_t *blockhash, argon2_context *context,
533 blake2b_state BlakeHash;
534 uint8_t value[sizeof(uint32_t)];
536 if (NULL == context || NULL == blockhash) {
540 blake2b_init(&BlakeHash, ARGON2_PREHASH_DIGEST_LENGTH);
542 store32(&value, context->lanes);
543 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
545 store32(&value, context->outlen);
546 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
548 store32(&value, context->m_cost);
549 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
551 store32(&value, context->t_cost);
552 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
554 store32(&value, context->version);
555 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
557 store32(&value, (uint32_t)type);
558 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
560 store32(&value, context->pwdlen);
561 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
563 if (context->pwd != NULL) {
564 blake2b_update(&BlakeHash, (const uint8_t *)context->pwd,
567 if (context->flags & ARGON2_FLAG_CLEAR_PASSWORD) {
568 secure_wipe_memory(context->pwd, context->pwdlen);
573 store32(&value, context->saltlen);
574 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
576 if (context->salt != NULL) {
577 blake2b_update(&BlakeHash, (const uint8_t *)context->salt,
581 store32(&value, context->secretlen);
582 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
584 if (context->secret != NULL) {
585 blake2b_update(&BlakeHash, (const uint8_t *)context->secret,
588 if (context->flags & ARGON2_FLAG_CLEAR_SECRET) {
589 secure_wipe_memory(context->secret, context->secretlen);
590 context->secretlen = 0;
594 store32(&value, context->adlen);
595 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
597 if (context->ad != NULL) {
598 blake2b_update(&BlakeHash, (const uint8_t *)context->ad,
602 blake2b_final(&BlakeHash, blockhash, ARGON2_PREHASH_DIGEST_LENGTH);
605 int initialize(argon2_instance_t *instance, argon2_context *context) {
606 uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH];
607 int result = ARGON2_OK;
609 if (instance == NULL || context == NULL)
610 return ARGON2_INCORRECT_PARAMETER;
611 instance->context_ptr = context;
613 /* 1. Memory allocation */
614 result = allocate_memory(context, (uint8_t **)&(instance->memory),
615 instance->memory_blocks, sizeof(block));
616 if (result != ARGON2_OK) {
620 /* 2. Initial hashing */
621 /* H_0 + 8 extra bytes to produce the first blocks */
622 /* uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; */
623 /* Hashing all inputs */
624 initial_hash(blockhash, context, instance->type);
625 /* Zeroing 8 extra bytes */
626 clear_internal_memory(blockhash + ARGON2_PREHASH_DIGEST_LENGTH,
627 ARGON2_PREHASH_SEED_LENGTH -
628 ARGON2_PREHASH_DIGEST_LENGTH);
631 initial_kat(blockhash, context, instance->type);
634 /* 3. Creating first blocks, we always have at least two blocks in a slice
636 fill_first_blocks(blockhash, instance);
637 /* Clearing the hash */
638 clear_internal_memory(blockhash, ARGON2_PREHASH_SEED_LENGTH);