2 * Argon2 reference source code package - reference C implementations
5 * Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
7 * You may use this work under the terms of a Creative Commons CC0 1.0
8 * License/Waiver or the Apache Public License 2.0, at your option. The terms of
9 * these licenses can be found at:
11 * - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
12 * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
14 * You should have received a copy of both of these licenses along with this
15 * software. If not, they may be obtained at the above URLs.
21 #include <winbase.h> /* For SecureZeroMemory */
23 #if defined __STDC_LIB_EXT1__
24 #define __STDC_WANT_LIB_EXT1__ 1
26 #define VC_GE_2005(version) (version >= 1400)
34 #include "blake2/blake2.h"
35 #include "blake2/blake2-impl.h"
41 #if defined(__clang__)
42 #if __has_attribute(optnone)
43 #define NOT_OPTIMIZED __attribute__((optnone))
45 #elif defined(__GNUC__)
47 (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
48 #if GCC_VERSION >= 40400
49 #define NOT_OPTIMIZED __attribute__((optimize("O0")))
56 /***************Instance and Position constructors**********/
57 void init_block_value(block *b, uint8_t in) { memset(b->v, in, sizeof(b->v)); }
59 void copy_block(block *dst, const block *src) {
60 memcpy(dst->v, src->v, sizeof(uint64_t) * ARGON2_QWORDS_IN_BLOCK);
63 void xor_block(block *dst, const block *src) {
65 for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
66 dst->v[i] ^= src->v[i];
70 static void load_block(block *dst, const void *input) {
72 for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
73 dst->v[i] = load64((const uint8_t *)input + i * sizeof(dst->v[i]));
77 static void store_block(void *output, const block *src) {
79 for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
80 store64((uint8_t *)output + i * sizeof(src->v[i]), src->v[i]);
84 /***************Memory functions*****************/
86 int allocate_memory(const argon2_context *context, uint8_t **memory,
87 size_t num, size_t size) {
88 size_t memory_size = num*size;
90 return ARGON2_MEMORY_ALLOCATION_ERROR;
93 /* 1. Check for multiplication overflow */
94 if (size != 0 && memory_size / size != num) {
95 return ARGON2_MEMORY_ALLOCATION_ERROR;
98 /* 2. Try to allocate with appropriate allocator */
99 if (context->allocate_cbk) {
100 (context->allocate_cbk)(memory, memory_size);
102 *memory = malloc(memory_size);
105 if (*memory == NULL) {
106 return ARGON2_MEMORY_ALLOCATION_ERROR;
112 void free_memory(const argon2_context *context, uint8_t *memory,
113 size_t num, size_t size) {
114 size_t memory_size = num*size;
115 clear_internal_memory(memory, memory_size);
116 if (context->free_cbk) {
117 (context->free_cbk)(memory, memory_size);
123 #if defined(_MSC_VER) && VC_GE_2005(_MSC_VER)
124 void secure_wipe_memory(void *v, size_t n) {
125 SecureZeroMemory(v, n);
127 #elif defined memset_s
128 void secure_wipe_memory(void *v, size_t n) {
129 memset_s(v, n, 0, n);
131 #elif defined(HAVE_EXPLICIT_BZERO)
132 void secure_wipe_memory(void *v, size_t n) {
133 explicit_bzero(v, n);
136 void NOT_OPTIMIZED secure_wipe_memory(void *v, size_t n) {
137 static void *(*const volatile memset_sec)(void *, int, size_t) = &memset;
142 /* Memory clear flag defaults to true. */
143 int FLAG_clear_internal_memory = 1;
144 void clear_internal_memory(void *v, size_t n) {
145 if (FLAG_clear_internal_memory && v) {
146 secure_wipe_memory(v, n);
150 void finalize(const argon2_context *context, argon2_instance_t *instance) {
151 if (context != NULL && instance != NULL) {
155 copy_block(&blockhash, instance->memory + instance->lane_length - 1);
157 /* XOR the last blocks */
158 for (l = 1; l < instance->lanes; ++l) {
159 uint32_t last_block_in_lane =
160 l * instance->lane_length + (instance->lane_length - 1);
161 xor_block(&blockhash, instance->memory + last_block_in_lane);
164 /* Hash the result */
166 uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE];
167 store_block(blockhash_bytes, &blockhash);
168 blake2b_long(context->out, context->outlen, blockhash_bytes,
170 /* clear blockhash and blockhash_bytes */
171 clear_internal_memory(blockhash.v, ARGON2_BLOCK_SIZE);
172 clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
176 print_tag(context->out, context->outlen);
179 free_memory(context, (uint8_t *)instance->memory,
180 instance->memory_blocks, sizeof(block));
184 uint32_t index_alpha(const argon2_instance_t *instance,
185 const argon2_position_t *position, uint32_t pseudo_rand,
189 * This lane : all already finished segments plus already constructed
190 * blocks in this segment
191 * Other lanes : all already finished segments
193 * This lane : (SYNC_POINTS - 1) last segments plus already constructed
194 * blocks in this segment
195 * Other lanes : (SYNC_POINTS - 1) last segments
197 uint32_t reference_area_size;
198 uint64_t relative_position;
199 uint32_t start_position, absolute_position;
201 if (0 == position->pass) {
203 if (0 == position->slice) {
205 reference_area_size =
206 position->index - 1; /* all but the previous */
209 /* The same lane => add current segment */
210 reference_area_size =
211 position->slice * instance->segment_length +
214 reference_area_size =
215 position->slice * instance->segment_length +
216 ((position->index == 0) ? (-1) : 0);
222 reference_area_size = instance->lane_length -
223 instance->segment_length + position->index -
226 reference_area_size = instance->lane_length -
227 instance->segment_length +
228 ((position->index == 0) ? (-1) : 0);
232 /* 1.2.4. Mapping pseudo_rand to 0..<reference_area_size-1> and produce
233 * relative position */
234 relative_position = pseudo_rand;
235 relative_position = relative_position * relative_position >> 32;
236 relative_position = reference_area_size - 1 -
237 (reference_area_size * relative_position >> 32);
239 /* 1.2.5 Computing starting position */
242 if (0 != position->pass) {
243 start_position = (position->slice == ARGON2_SYNC_POINTS - 1)
245 : (position->slice + 1) * instance->segment_length;
248 /* 1.2.6. Computing absolute position */
249 absolute_position = (start_position + relative_position) %
250 instance->lane_length; /* absolute position */
251 return absolute_position;
254 /* Single-threaded version for p=1 case */
255 static int fill_memory_blocks_st(argon2_instance_t *instance) {
258 for (r = 0; r < instance->passes; ++r) {
259 for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
260 for (l = 0; l < instance->lanes; ++l) {
261 argon2_position_t position = {r, l, (uint8_t)s, 0};
262 fill_segment(instance, position);
266 internal_kat(instance, r); /* Print all memory blocks */
272 #if !defined(ARGON2_NO_THREADS)
275 static unsigned __stdcall fill_segment_thr(void *thread_data)
277 static void *fill_segment_thr(void *thread_data)
280 argon2_thread_data *my_data = thread_data;
281 fill_segment(my_data->instance_ptr, my_data->pos);
285 /* Multi-threaded version for p > 1 case */
286 static int fill_memory_blocks_mt(argon2_instance_t *instance) {
288 argon2_thread_handle_t *thread = NULL;
289 argon2_thread_data *thr_data = NULL;
292 /* 1. Allocating space for threads */
293 thread = calloc(instance->lanes, sizeof(argon2_thread_handle_t));
294 if (thread == NULL) {
295 rc = ARGON2_MEMORY_ALLOCATION_ERROR;
299 thr_data = calloc(instance->lanes, sizeof(argon2_thread_data));
300 if (thr_data == NULL) {
301 rc = ARGON2_MEMORY_ALLOCATION_ERROR;
305 for (r = 0; r < instance->passes; ++r) {
306 for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
309 /* 2. Calling threads */
310 for (l = 0; l < instance->lanes; ++l) {
311 argon2_position_t position;
313 /* 2.1 Join a thread if limit is exceeded */
314 if (l >= instance->threads) {
315 if (argon2_thread_join(thread[l - instance->threads])) {
316 rc = ARGON2_THREAD_FAIL;
321 /* 2.2 Create thread */
324 position.slice = (uint8_t)s;
326 thr_data[l].instance_ptr =
327 instance; /* preparing the thread input */
328 memcpy(&(thr_data[l].pos), &position,
329 sizeof(argon2_position_t));
330 if (argon2_thread_create(&thread[l], &fill_segment_thr,
331 (void *)&thr_data[l])) {
332 /* Wait for already running threads */
333 for (ll = 0; ll < l; ++ll)
334 argon2_thread_join(thread[ll]);
335 rc = ARGON2_THREAD_FAIL;
339 /* fill_segment(instance, position); */
340 /*Non-thread equivalent of the lines above */
343 /* 3. Joining remaining threads */
344 for (l = instance->lanes - instance->threads; l < instance->lanes;
346 if (argon2_thread_join(thread[l])) {
347 rc = ARGON2_THREAD_FAIL;
354 internal_kat(instance, r); /* Print all memory blocks */
359 if (thread != NULL) {
362 if (thr_data != NULL) {
368 #endif /* ARGON2_NO_THREADS */
370 int fill_memory_blocks(argon2_instance_t *instance) {
371 if (instance == NULL || instance->lanes == 0) {
372 return ARGON2_INCORRECT_PARAMETER;
374 #if defined(ARGON2_NO_THREADS)
375 return fill_memory_blocks_st(instance);
377 return instance->threads == 1 ?
378 fill_memory_blocks_st(instance) : fill_memory_blocks_mt(instance);
382 int validate_inputs(const argon2_context *context) {
383 if (NULL == context) {
384 return ARGON2_INCORRECT_PARAMETER;
387 if (NULL == context->out) {
388 return ARGON2_OUTPUT_PTR_NULL;
391 /* Validate output length */
392 if (ARGON2_MIN_OUTLEN > context->outlen) {
393 return ARGON2_OUTPUT_TOO_SHORT;
396 if (ARGON2_MAX_OUTLEN < context->outlen) {
397 return ARGON2_OUTPUT_TOO_LONG;
400 /* Validate password (required param) */
401 if (NULL == context->pwd) {
402 if (0 != context->pwdlen) {
403 return ARGON2_PWD_PTR_MISMATCH;
406 #if ARGON2_MIN_PWD_LENGTH > 0 /* cryptsetup: fix gcc warning */
407 if (ARGON2_MIN_PWD_LENGTH > context->pwdlen) {
408 return ARGON2_PWD_TOO_SHORT;
411 if (ARGON2_MAX_PWD_LENGTH < context->pwdlen) {
412 return ARGON2_PWD_TOO_LONG;
415 /* Validate salt (required param) */
416 if (NULL == context->salt) {
417 if (0 != context->saltlen) {
418 return ARGON2_SALT_PTR_MISMATCH;
422 if (ARGON2_MIN_SALT_LENGTH > context->saltlen) {
423 return ARGON2_SALT_TOO_SHORT;
426 if (ARGON2_MAX_SALT_LENGTH < context->saltlen) {
427 return ARGON2_SALT_TOO_LONG;
430 /* Validate secret (optional param) */
431 if (NULL == context->secret) {
432 if (0 != context->secretlen) {
433 return ARGON2_SECRET_PTR_MISMATCH;
436 #if ARGON2_MIN_SECRET > 0 /* cryptsetup: fix gcc warning */
437 if (ARGON2_MIN_SECRET > context->secretlen) {
438 return ARGON2_SECRET_TOO_SHORT;
441 if (ARGON2_MAX_SECRET < context->secretlen) {
442 return ARGON2_SECRET_TOO_LONG;
446 /* Validate associated data (optional param) */
447 if (NULL == context->ad) {
448 if (0 != context->adlen) {
449 return ARGON2_AD_PTR_MISMATCH;
452 #if ARGON2_MIN_AD_LENGTH > 0 /* cryptsetup: fix gcc warning */
453 if (ARGON2_MIN_AD_LENGTH > context->adlen) {
454 return ARGON2_AD_TOO_SHORT;
457 if (ARGON2_MAX_AD_LENGTH < context->adlen) {
458 return ARGON2_AD_TOO_LONG;
462 /* Validate memory cost */
463 if (ARGON2_MIN_MEMORY > context->m_cost) {
464 return ARGON2_MEMORY_TOO_LITTLE;
466 #if 0 /* UINT32_MAX, cryptsetup: fix gcc warning */
467 if (ARGON2_MAX_MEMORY < context->m_cost) {
468 return ARGON2_MEMORY_TOO_MUCH;
471 if (context->m_cost < 8 * context->lanes) {
472 return ARGON2_MEMORY_TOO_LITTLE;
475 /* Validate time cost */
476 if (ARGON2_MIN_TIME > context->t_cost) {
477 return ARGON2_TIME_TOO_SMALL;
480 if (ARGON2_MAX_TIME < context->t_cost) {
481 return ARGON2_TIME_TOO_LARGE;
485 if (ARGON2_MIN_LANES > context->lanes) {
486 return ARGON2_LANES_TOO_FEW;
489 if (ARGON2_MAX_LANES < context->lanes) {
490 return ARGON2_LANES_TOO_MANY;
493 /* Validate threads */
494 if (ARGON2_MIN_THREADS > context->threads) {
495 return ARGON2_THREADS_TOO_FEW;
498 if (ARGON2_MAX_THREADS < context->threads) {
499 return ARGON2_THREADS_TOO_MANY;
502 if (NULL != context->allocate_cbk && NULL == context->free_cbk) {
503 return ARGON2_FREE_MEMORY_CBK_NULL;
506 if (NULL == context->allocate_cbk && NULL != context->free_cbk) {
507 return ARGON2_ALLOCATE_MEMORY_CBK_NULL;
513 void fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance) {
515 /* Make the first and second block in each lane as G(H0||0||i) or
517 uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE];
518 for (l = 0; l < instance->lanes; ++l) {
520 store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 0);
521 store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH + 4, l);
522 blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
523 ARGON2_PREHASH_SEED_LENGTH);
524 load_block(&instance->memory[l * instance->lane_length + 0],
527 store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 1);
528 blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
529 ARGON2_PREHASH_SEED_LENGTH);
530 load_block(&instance->memory[l * instance->lane_length + 1],
533 clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
536 void initial_hash(uint8_t *blockhash, argon2_context *context,
538 blake2b_state BlakeHash;
539 uint8_t value[sizeof(uint32_t)];
541 if (NULL == context || NULL == blockhash) {
545 blake2b_init(&BlakeHash, ARGON2_PREHASH_DIGEST_LENGTH);
547 store32(&value, context->lanes);
548 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
550 store32(&value, context->outlen);
551 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
553 store32(&value, context->m_cost);
554 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
556 store32(&value, context->t_cost);
557 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
559 store32(&value, context->version);
560 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
562 store32(&value, (uint32_t)type);
563 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
565 store32(&value, context->pwdlen);
566 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
568 if (context->pwd != NULL) {
569 blake2b_update(&BlakeHash, (const uint8_t *)context->pwd,
572 if (context->flags & ARGON2_FLAG_CLEAR_PASSWORD) {
573 secure_wipe_memory(context->pwd, context->pwdlen);
578 store32(&value, context->saltlen);
579 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
581 if (context->salt != NULL) {
582 blake2b_update(&BlakeHash, (const uint8_t *)context->salt,
586 store32(&value, context->secretlen);
587 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
589 if (context->secret != NULL) {
590 blake2b_update(&BlakeHash, (const uint8_t *)context->secret,
593 if (context->flags & ARGON2_FLAG_CLEAR_SECRET) {
594 secure_wipe_memory(context->secret, context->secretlen);
595 context->secretlen = 0;
599 store32(&value, context->adlen);
600 blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
602 if (context->ad != NULL) {
603 blake2b_update(&BlakeHash, (const uint8_t *)context->ad,
607 blake2b_final(&BlakeHash, blockhash, ARGON2_PREHASH_DIGEST_LENGTH);
610 int initialize(argon2_instance_t *instance, argon2_context *context) {
611 uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH];
612 int result = ARGON2_OK;
614 if (instance == NULL || context == NULL)
615 return ARGON2_INCORRECT_PARAMETER;
616 instance->context_ptr = context;
618 /* 1. Memory allocation */
619 result = allocate_memory(context, (uint8_t **)&(instance->memory),
620 instance->memory_blocks, sizeof(block));
621 if (result != ARGON2_OK) {
625 /* 2. Initial hashing */
626 /* H_0 + 8 extra bytes to produce the first blocks */
627 /* uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; */
628 /* Hashing all inputs */
629 initial_hash(blockhash, context, instance->type);
630 /* Zeroing 8 extra bytes */
631 clear_internal_memory(blockhash + ARGON2_PREHASH_DIGEST_LENGTH,
632 ARGON2_PREHASH_SEED_LENGTH -
633 ARGON2_PREHASH_DIGEST_LENGTH);
636 initial_kat(blockhash, context, instance->type);
639 /* 3. Creating first blocks, we always have at least two blocks in a slice
641 fill_first_blocks(blockhash, instance);
642 /* Clearing the hash */
643 clear_internal_memory(blockhash, ARGON2_PREHASH_SEED_LENGTH);