2 * LUKS - Linux Unified Key Setup
4 * Copyright (C) 2004-2006 Clemens Fruhwirth <clemens@endorphin.org>
5 * Copyright (C) 2009-2021 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2013-2021 Milan Broz
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 #include <sys/types.h>
25 #include <netinet/in.h>
33 #include <uuid/uuid.h>
39 int LUKS_keyslot_area(const struct luks_phdr *hdr,
44 if(keyslot >= LUKS_NUMKEYS || keyslot < 0)
47 *offset = (uint64_t)hdr->keyblock[keyslot].keyMaterialOffset * SECTOR_SIZE;
48 *length = AF_split_sectors(hdr->keyBytes, LUKS_STRIPES) * SECTOR_SIZE;
53 /* insertsort: because the array has 8 elements and it's mostly sorted. that's why */
54 static void LUKS_sort_keyslots(const struct luks_phdr *hdr, int *array)
58 for (i = 1; i < LUKS_NUMKEYS; i++) {
60 while (j > 0 && hdr->keyblock[array[j-1]].keyMaterialOffset > hdr->keyblock[array[j]].keyMaterialOffset) {
62 array[j] = array[j-1];
69 size_t LUKS_device_sectors(const struct luks_phdr *hdr)
71 int sorted_areas[LUKS_NUMKEYS] = { 0, 1, 2, 3, 4, 5, 6, 7 };
73 LUKS_sort_keyslots(hdr, sorted_areas);
75 return hdr->keyblock[sorted_areas[LUKS_NUMKEYS-1]].keyMaterialOffset + AF_split_sectors(hdr->keyBytes, LUKS_STRIPES);
78 size_t LUKS_keyslots_offset(const struct luks_phdr *hdr)
80 int sorted_areas[LUKS_NUMKEYS] = { 0, 1, 2, 3, 4, 5, 6, 7 };
82 LUKS_sort_keyslots(hdr, sorted_areas);
84 return hdr->keyblock[sorted_areas[0]].keyMaterialOffset;
87 static int LUKS_check_device_size(struct crypt_device *ctx, const struct luks_phdr *hdr, int falloc)
89 struct device *device = crypt_metadata_device(ctx);
90 uint64_t dev_sectors, hdr_sectors;
95 if (device_size(device, &dev_sectors)) {
96 log_dbg(ctx, "Cannot get device size for device %s.", device_path(device));
100 dev_sectors >>= SECTOR_SHIFT;
101 hdr_sectors = LUKS_device_sectors(hdr);
102 log_dbg(ctx, "Key length %u, device size %" PRIu64 " sectors, header size %"
103 PRIu64 " sectors.", hdr->keyBytes, dev_sectors, hdr_sectors);
105 if (hdr_sectors > dev_sectors) {
106 /* If it is header file, increase its size */
107 if (falloc && !device_fallocate(device, hdr_sectors << SECTOR_SHIFT))
110 log_err(ctx, _("Device %s is too small. (LUKS1 requires at least %" PRIu64 " bytes.)"),
111 device_path(device), hdr_sectors * SECTOR_SIZE);
118 static int LUKS_check_keyslots(struct crypt_device *ctx, const struct luks_phdr *phdr)
120 int i, prev, next, sorted_areas[LUKS_NUMKEYS] = { 0, 1, 2, 3, 4, 5, 6, 7 };
121 uint32_t secs_per_stripes = AF_split_sectors(phdr->keyBytes, LUKS_STRIPES);
123 LUKS_sort_keyslots(phdr, sorted_areas);
125 /* Check keyslot to prevent access outside of header and keyslot area */
126 for (i = 0; i < LUKS_NUMKEYS; i++) {
127 /* enforce stripes == 4000 */
128 if (phdr->keyblock[i].stripes != LUKS_STRIPES) {
129 log_dbg(ctx, "Invalid stripes count %u in keyslot %u.",
130 phdr->keyblock[i].stripes, i);
131 log_err(ctx, _("LUKS keyslot %u is invalid."), i);
135 /* First sectors is the header itself */
136 if (phdr->keyblock[i].keyMaterialOffset * SECTOR_SIZE < sizeof(*phdr)) {
137 log_dbg(ctx, "Invalid offset %u in keyslot %u.",
138 phdr->keyblock[i].keyMaterialOffset, i);
139 log_err(ctx, _("LUKS keyslot %u is invalid."), i);
143 /* Ignore following check for detached header where offset can be zero. */
144 if (phdr->payloadOffset == 0)
147 if (phdr->payloadOffset <= phdr->keyblock[i].keyMaterialOffset) {
148 log_dbg(ctx, "Invalid offset %u in keyslot %u (beyond data area offset %u).",
149 phdr->keyblock[i].keyMaterialOffset, i,
150 phdr->payloadOffset);
151 log_err(ctx, _("LUKS keyslot %u is invalid."), i);
155 if (phdr->payloadOffset < (phdr->keyblock[i].keyMaterialOffset + secs_per_stripes)) {
156 log_dbg(ctx, "Invalid keyslot size %u (offset %u, stripes %u) in "
157 "keyslot %u (beyond data area offset %u).",
159 phdr->keyblock[i].keyMaterialOffset,
160 phdr->keyblock[i].stripes,
161 i, phdr->payloadOffset);
162 log_err(ctx, _("LUKS keyslot %u is invalid."), i);
167 /* check no keyslot overlaps with each other */
168 for (i = 1; i < LUKS_NUMKEYS; i++) {
169 prev = sorted_areas[i-1];
170 next = sorted_areas[i];
171 if (phdr->keyblock[next].keyMaterialOffset <
172 (phdr->keyblock[prev].keyMaterialOffset + secs_per_stripes)) {
173 log_dbg(ctx, "Not enough space in LUKS keyslot %d.", prev);
174 log_err(ctx, _("LUKS keyslot %u is invalid."), prev);
178 /* do not check last keyslot on purpose, it must be tested in device size check */
183 static const char *dbg_slot_state(crypt_keyslot_info ki)
186 case CRYPT_SLOT_INACTIVE:
188 case CRYPT_SLOT_ACTIVE:
190 case CRYPT_SLOT_ACTIVE_LAST:
191 return "ACTIVE_LAST";
192 case CRYPT_SLOT_INVALID:
198 int LUKS_hdr_backup(const char *backup_file, struct crypt_device *ctx)
200 struct device *device = crypt_metadata_device(ctx);
201 struct luks_phdr hdr;
202 int fd, devfd, r = 0;
208 r = LUKS_read_phdr(&hdr, 1, 0, ctx);
212 hdr_size = LUKS_device_sectors(&hdr) << SECTOR_SHIFT;
213 buffer_size = size_round_up(hdr_size, crypt_getpagesize());
215 buffer = crypt_safe_alloc(buffer_size);
216 if (!buffer || hdr_size < LUKS_ALIGN_KEYSLOTS || hdr_size > buffer_size) {
221 log_dbg(ctx, "Storing backup of header (%zu bytes) and keyslot area (%zu bytes).",
222 sizeof(hdr), hdr_size - LUKS_ALIGN_KEYSLOTS);
224 log_dbg(ctx, "Output backup file size: %zu bytes.", buffer_size);
226 devfd = device_open(ctx, device, O_RDONLY);
228 log_err(ctx, _("Device %s is not a valid LUKS device."), device_path(device));
233 if (read_lseek_blockwise(devfd, device_block_size(ctx, device), device_alignment(device),
234 buffer, hdr_size, 0) < (ssize_t)hdr_size) {
239 /* Wipe unused area, so backup cannot contain old signatures */
240 if (hdr.keyblock[0].keyMaterialOffset * SECTOR_SIZE == LUKS_ALIGN_KEYSLOTS)
241 memset(buffer + sizeof(hdr), 0, LUKS_ALIGN_KEYSLOTS - sizeof(hdr));
243 fd = open(backup_file, O_CREAT|O_EXCL|O_WRONLY, S_IRUSR);
246 log_err(ctx, _("Requested header backup file %s already exists."), backup_file);
248 log_err(ctx, _("Cannot create header backup file %s."), backup_file);
252 ret = write_buffer(fd, buffer, buffer_size);
254 if (ret < (ssize_t)buffer_size) {
255 log_err(ctx, _("Cannot write header backup file %s."), backup_file);
262 crypt_safe_memzero(&hdr, sizeof(hdr));
263 crypt_safe_free(buffer);
267 int LUKS_hdr_restore(
268 const char *backup_file,
269 struct luks_phdr *hdr,
270 struct crypt_device *ctx)
272 struct device *device = crypt_metadata_device(ctx);
273 int fd, r = 0, devfd = -1, diff_uuid = 0;
274 ssize_t ret, buffer_size = 0;
275 char *buffer = NULL, msg[200];
276 struct luks_phdr hdr_file;
278 r = LUKS_read_phdr_backup(backup_file, &hdr_file, 0, ctx);
283 buffer_size = LUKS_device_sectors(&hdr_file) << SECTOR_SHIFT;
285 if (r || buffer_size < LUKS_ALIGN_KEYSLOTS) {
286 log_err(ctx, _("Backup file does not contain valid LUKS header."));
291 buffer = crypt_safe_alloc(buffer_size);
297 fd = open(backup_file, O_RDONLY);
299 log_err(ctx, _("Cannot open header backup file %s."), backup_file);
304 ret = read_buffer(fd, buffer, buffer_size);
306 if (ret < buffer_size) {
307 log_err(ctx, _("Cannot read header backup file %s."), backup_file);
312 r = LUKS_read_phdr(hdr, 0, 0, ctx);
314 log_dbg(ctx, "Device %s already contains LUKS header, checking UUID and offset.", device_path(device));
315 if(hdr->payloadOffset != hdr_file.payloadOffset ||
316 hdr->keyBytes != hdr_file.keyBytes) {
317 log_err(ctx, _("Data offset or key size differs on device and backup, restore failed."));
321 if (memcmp(hdr->uuid, hdr_file.uuid, UUID_STRING_L))
325 if (snprintf(msg, sizeof(msg), _("Device %s %s%s"), device_path(device),
326 r ? _("does not contain LUKS header. Replacing header can destroy data on that device.") :
327 _("already contains LUKS header. Replacing header will destroy existing keyslots."),
328 diff_uuid ? _("\nWARNING: real device header has different UUID than backup!") : "") < 0) {
333 if (!crypt_confirm(ctx, msg)) {
338 log_dbg(ctx, "Storing backup of header (%zu bytes) and keyslot area (%zu bytes) to device %s.",
339 sizeof(*hdr), buffer_size - LUKS_ALIGN_KEYSLOTS, device_path(device));
341 devfd = device_open(ctx, device, O_RDWR);
344 log_err(ctx, _("Cannot write to device %s, permission denied."),
345 device_path(device));
347 log_err(ctx, _("Cannot open device %s."), device_path(device));
352 if (write_lseek_blockwise(devfd, device_block_size(ctx, device), device_alignment(device),
353 buffer, buffer_size, 0) < buffer_size) {
358 /* Be sure to reload new data */
359 r = LUKS_read_phdr(hdr, 1, 0, ctx);
361 device_sync(ctx, device);
362 crypt_safe_free(buffer);
366 /* This routine should do some just basic recovery for known problems. */
367 static int _keyslot_repair(struct luks_phdr *phdr, struct crypt_device *ctx)
369 struct luks_phdr temp_phdr;
370 const unsigned char *sector = (const unsigned char*)phdr;
371 struct volume_key *vk;
372 int i, bad, r, need_write = 0;
374 if (phdr->keyBytes != 16 && phdr->keyBytes != 32 && phdr->keyBytes != 64) {
375 log_err(ctx, _("Non standard key size, manual repair required."));
380 * cryptsetup 1.0 did not align keyslots to 4k, cannot repair this one
381 * Also we cannot trust possibly broken keyslots metadata here through LUKS_keyslots_offset().
382 * Expect first keyslot is aligned, if not, then manual repair is neccessary.
384 if (phdr->keyblock[0].keyMaterialOffset < (LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE)) {
385 log_err(ctx, _("Non standard keyslots alignment, manual repair required."));
389 r = LUKS_check_cipher(ctx, phdr->keyBytes, phdr->cipherName, phdr->cipherMode);
393 vk = crypt_alloc_volume_key(phdr->keyBytes, NULL);
397 log_verbose(ctx, _("Repairing keyslots."));
399 log_dbg(ctx, "Generating second header with the same parameters for check.");
400 /* cipherName, cipherMode, hashSpec, uuid are already null terminated */
401 /* payloadOffset - cannot check */
402 r = LUKS_generate_phdr(&temp_phdr, vk, phdr->cipherName, phdr->cipherMode,
403 phdr->hashSpec, phdr->uuid,
404 phdr->payloadOffset * SECTOR_SIZE, 0, 0, ctx);
408 for(i = 0; i < LUKS_NUMKEYS; ++i) {
409 if (phdr->keyblock[i].active == LUKS_KEY_ENABLED) {
410 log_dbg(ctx, "Skipping repair for active keyslot %i.", i);
415 if (phdr->keyblock[i].keyMaterialOffset != temp_phdr.keyblock[i].keyMaterialOffset) {
416 log_err(ctx, _("Keyslot %i: offset repaired (%u -> %u)."), i,
417 (unsigned)phdr->keyblock[i].keyMaterialOffset,
418 (unsigned)temp_phdr.keyblock[i].keyMaterialOffset);
419 phdr->keyblock[i].keyMaterialOffset = temp_phdr.keyblock[i].keyMaterialOffset;
423 if (phdr->keyblock[i].stripes != temp_phdr.keyblock[i].stripes) {
424 log_err(ctx, _("Keyslot %i: stripes repaired (%u -> %u)."), i,
425 (unsigned)phdr->keyblock[i].stripes,
426 (unsigned)temp_phdr.keyblock[i].stripes);
427 phdr->keyblock[i].stripes = temp_phdr.keyblock[i].stripes;
431 /* Known case - MSDOS partition table signature */
432 if (i == 6 && sector[0x1fe] == 0x55 && sector[0x1ff] == 0xaa) {
433 log_err(ctx, _("Keyslot %i: bogus partition signature."), i);
438 log_err(ctx, _("Keyslot %i: salt wiped."), i);
439 phdr->keyblock[i].active = LUKS_KEY_DISABLED;
440 memset(&phdr->keyblock[i].passwordSalt, 0x00, LUKS_SALTSIZE);
441 phdr->keyblock[i].passwordIterations = 0;
449 * check repair result before writing because repair can't fix out of order
450 * keyslot offsets and would corrupt header again
452 if (LUKS_check_keyslots(ctx, phdr))
454 else if (need_write) {
455 log_verbose(ctx, _("Writing LUKS header to disk."));
456 r = LUKS_write_phdr(phdr, ctx);
460 log_err(ctx, _("Repair failed."));
461 crypt_free_volume_key(vk);
462 crypt_safe_memzero(&temp_phdr, sizeof(temp_phdr));
466 static int _check_and_convert_hdr(const char *device,
467 struct luks_phdr *hdr,
468 int require_luks_device,
470 struct crypt_device *ctx)
474 char luksMagic[] = LUKS_MAGIC;
476 if(memcmp(hdr->magic, luksMagic, LUKS_MAGIC_L)) { /* Check magic */
477 log_dbg(ctx, "LUKS header not detected.");
478 if (require_luks_device)
479 log_err(ctx, _("Device %s is not a valid LUKS device."), device);
481 } else if((hdr->version = ntohs(hdr->version)) != 1) { /* Convert every uint16/32_t item from network byte order */
482 log_err(ctx, _("Unsupported LUKS version %d."), hdr->version);
486 hdr->hashSpec[LUKS_HASHSPEC_L - 1] = '\0';
487 if (crypt_hmac_size(hdr->hashSpec) < LUKS_DIGESTSIZE) {
488 log_err(ctx, _("Requested LUKS hash %s is not supported."), hdr->hashSpec);
492 /* Header detected */
493 hdr->payloadOffset = ntohl(hdr->payloadOffset);
494 hdr->keyBytes = ntohl(hdr->keyBytes);
495 hdr->mkDigestIterations = ntohl(hdr->mkDigestIterations);
497 for(i = 0; i < LUKS_NUMKEYS; ++i) {
498 hdr->keyblock[i].active = ntohl(hdr->keyblock[i].active);
499 hdr->keyblock[i].passwordIterations = ntohl(hdr->keyblock[i].passwordIterations);
500 hdr->keyblock[i].keyMaterialOffset = ntohl(hdr->keyblock[i].keyMaterialOffset);
501 hdr->keyblock[i].stripes = ntohl(hdr->keyblock[i].stripes);
504 if (LUKS_check_keyslots(ctx, hdr))
507 /* Avoid unterminated strings */
508 hdr->cipherName[LUKS_CIPHERNAME_L - 1] = '\0';
509 hdr->cipherMode[LUKS_CIPHERMODE_L - 1] = '\0';
510 hdr->uuid[UUID_STRING_L - 1] = '\0';
514 r = _keyslot_repair(hdr, ctx);
516 log_verbose(ctx, _("No known problems detected for LUKS header."));
522 static void _to_lower(char *str, unsigned max_len)
524 for(; *str && max_len; str++, max_len--)
526 *str = tolower(*str);
529 static void LUKS_fix_header_compatible(struct luks_phdr *header)
531 /* Old cryptsetup expects "sha1", gcrypt allows case insensitive names,
532 * so always convert hash to lower case in header */
533 _to_lower(header->hashSpec, LUKS_HASHSPEC_L);
535 /* ECB mode does not use IV but dmcrypt silently allows it.
536 * Drop any IV here if ECB is used (that is not secure anyway).*/
537 if (!strncmp(header->cipherMode, "ecb-", 4)) {
538 memset(header->cipherMode, 0, LUKS_CIPHERMODE_L);
539 strcpy(header->cipherMode, "ecb");
543 int LUKS_read_phdr_backup(const char *backup_file,
544 struct luks_phdr *hdr,
545 int require_luks_device,
546 struct crypt_device *ctx)
548 ssize_t hdr_size = sizeof(struct luks_phdr);
549 int devfd = 0, r = 0;
551 log_dbg(ctx, "Reading LUKS header of size %d from backup file %s",
552 (int)hdr_size, backup_file);
554 devfd = open(backup_file, O_RDONLY);
556 log_err(ctx, _("Cannot open header backup file %s."), backup_file);
560 if (read_buffer(devfd, hdr, hdr_size) < hdr_size)
563 LUKS_fix_header_compatible(hdr);
564 r = _check_and_convert_hdr(backup_file, hdr,
565 require_luks_device, 0, ctx);
572 int LUKS_read_phdr(struct luks_phdr *hdr,
573 int require_luks_device,
575 struct crypt_device *ctx)
578 struct device *device = crypt_metadata_device(ctx);
579 ssize_t hdr_size = sizeof(struct luks_phdr);
581 /* LUKS header starts at offset 0, first keyslot on LUKS_ALIGN_KEYSLOTS */
582 assert(sizeof(struct luks_phdr) <= LUKS_ALIGN_KEYSLOTS);
584 /* Stripes count cannot be changed without additional code fixes yet */
585 assert(LUKS_STRIPES == 4000);
587 if (repair && !require_luks_device)
590 log_dbg(ctx, "Reading LUKS header of size %zu from device %s",
591 hdr_size, device_path(device));
593 devfd = device_open(ctx, device, O_RDONLY);
595 log_err(ctx, _("Cannot open device %s."), device_path(device));
599 if (read_lseek_blockwise(devfd, device_block_size(ctx, device), device_alignment(device),
600 hdr, hdr_size, 0) < hdr_size)
603 r = _check_and_convert_hdr(device_path(device), hdr, require_luks_device,
607 r = LUKS_check_device_size(ctx, hdr, 0);
610 * Cryptsetup 1.0.0 did not align keyslots to 4k (very rare version).
611 * Disable direct-io to avoid possible IO errors if underlying device
612 * has bigger sector size.
614 if (!r && hdr->keyblock[0].keyMaterialOffset * SECTOR_SIZE < LUKS_ALIGN_KEYSLOTS) {
615 log_dbg(ctx, "Old unaligned LUKS keyslot detected, disabling direct-io.");
616 device_disable_direct_io(device);
622 int LUKS_write_phdr(struct luks_phdr *hdr,
623 struct crypt_device *ctx)
625 struct device *device = crypt_metadata_device(ctx);
626 ssize_t hdr_size = sizeof(struct luks_phdr);
629 struct luks_phdr convHdr;
632 log_dbg(ctx, "Updating LUKS header of size %zu on device %s",
633 sizeof(struct luks_phdr), device_path(device));
635 r = LUKS_check_device_size(ctx, hdr, 1);
639 devfd = device_open(ctx, device, O_RDWR);
642 log_err(ctx, _("Cannot write to device %s, permission denied."),
643 device_path(device));
645 log_err(ctx, _("Cannot open device %s."), device_path(device));
649 memcpy(&convHdr, hdr, hdr_size);
650 memset(&convHdr._padding, 0, sizeof(convHdr._padding));
652 /* Convert every uint16/32_t item to network byte order */
653 convHdr.version = htons(hdr->version);
654 convHdr.payloadOffset = htonl(hdr->payloadOffset);
655 convHdr.keyBytes = htonl(hdr->keyBytes);
656 convHdr.mkDigestIterations = htonl(hdr->mkDigestIterations);
657 for(i = 0; i < LUKS_NUMKEYS; ++i) {
658 convHdr.keyblock[i].active = htonl(hdr->keyblock[i].active);
659 convHdr.keyblock[i].passwordIterations = htonl(hdr->keyblock[i].passwordIterations);
660 convHdr.keyblock[i].keyMaterialOffset = htonl(hdr->keyblock[i].keyMaterialOffset);
661 convHdr.keyblock[i].stripes = htonl(hdr->keyblock[i].stripes);
664 r = write_lseek_blockwise(devfd, device_block_size(ctx, device), device_alignment(device),
665 &convHdr, hdr_size, 0) < hdr_size ? -EIO : 0;
667 log_err(ctx, _("Error during update of LUKS header on device %s."), device_path(device));
669 device_sync(ctx, device);
671 /* Re-read header from disk to be sure that in-memory and on-disk data are the same. */
673 r = LUKS_read_phdr(hdr, 1, 0, ctx);
675 log_err(ctx, _("Error re-reading LUKS header after update on device %s."),
676 device_path(device));
682 /* Check that kernel supports requested cipher by decryption of one sector */
683 int LUKS_check_cipher(struct crypt_device *ctx, size_t keylength, const char *cipher, const char *cipher_mode)
686 struct volume_key *empty_key;
687 char buf[SECTOR_SIZE];
689 log_dbg(ctx, "Checking if cipher %s-%s is usable.", cipher, cipher_mode);
691 empty_key = crypt_alloc_volume_key(keylength, NULL);
695 /* No need to get KEY quality random but it must avoid known weak keys. */
696 r = crypt_random_get(ctx, empty_key->key, empty_key->keylength, CRYPT_RND_NORMAL);
698 r = LUKS_decrypt_from_storage(buf, sizeof(buf), cipher, cipher_mode, empty_key, 0, ctx);
700 crypt_free_volume_key(empty_key);
701 crypt_safe_memzero(buf, sizeof(buf));
705 int LUKS_generate_phdr(struct luks_phdr *header,
706 const struct volume_key *vk,
707 const char *cipherName,
708 const char *cipherMode,
709 const char *hashSpec,
711 uint64_t data_offset, /* in bytes */
712 uint64_t align_offset, /* in bytes */
713 uint64_t required_alignment, /* in bytes */
714 struct crypt_device *ctx)
717 size_t keyslot_sectors, header_sectors;
718 uuid_t partitionUuid;
719 struct crypt_pbkdf_type *pbkdf;
721 char luksMagic[] = LUKS_MAGIC;
723 if (data_offset % SECTOR_SIZE || align_offset % SECTOR_SIZE ||
724 required_alignment % SECTOR_SIZE)
727 memset(header, 0, sizeof(struct luks_phdr));
729 keyslot_sectors = AF_split_sectors(vk->keylength, LUKS_STRIPES);
730 header_sectors = LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE;
732 for (i = 0; i < LUKS_NUMKEYS; i++) {
733 header->keyblock[i].active = LUKS_KEY_DISABLED;
734 header->keyblock[i].keyMaterialOffset = header_sectors;
735 header->keyblock[i].stripes = LUKS_STRIPES;
736 header_sectors = size_round_up(header_sectors + keyslot_sectors,
737 LUKS_ALIGN_KEYSLOTS / SECTOR_SIZE);
739 /* In sector is now size of all keyslot material space */
741 /* Data offset has priority */
743 header->payloadOffset = data_offset / SECTOR_SIZE;
744 else if (required_alignment) {
745 header->payloadOffset = size_round_up(header_sectors, (required_alignment / SECTOR_SIZE));
746 header->payloadOffset += (align_offset / SECTOR_SIZE);
748 header->payloadOffset = 0;
750 if (header->payloadOffset && header->payloadOffset < header_sectors) {
751 log_err(ctx, _("Data offset for LUKS header must be "
752 "either 0 or higher than header size."));
756 if (crypt_hmac_size(hashSpec) < LUKS_DIGESTSIZE) {
757 log_err(ctx, _("Requested LUKS hash %s is not supported."), hashSpec);
761 if (uuid && uuid_parse(uuid, partitionUuid) == -1) {
762 log_err(ctx, _("Wrong LUKS UUID format provided."));
766 uuid_generate(partitionUuid);
769 memcpy(header->magic,luksMagic,LUKS_MAGIC_L);
771 strncpy(header->cipherName,cipherName,LUKS_CIPHERNAME_L-1);
772 strncpy(header->cipherMode,cipherMode,LUKS_CIPHERMODE_L-1);
773 strncpy(header->hashSpec,hashSpec,LUKS_HASHSPEC_L-1);
775 header->keyBytes=vk->keylength;
777 LUKS_fix_header_compatible(header);
779 log_dbg(ctx, "Generating LUKS header version %d using hash %s, %s, %s, MK %d bytes",
780 header->version, header->hashSpec ,header->cipherName, header->cipherMode,
783 r = crypt_random_get(ctx, header->mkDigestSalt, LUKS_SALTSIZE, CRYPT_RND_SALT);
785 log_err(ctx, _("Cannot create LUKS header: reading random salt failed."));
789 /* Compute master key digest */
790 pbkdf = crypt_get_pbkdf(ctx);
791 r = crypt_benchmark_pbkdf_internal(ctx, pbkdf, vk->keylength);
794 assert(pbkdf->iterations);
796 if (pbkdf->flags & CRYPT_PBKDF_NO_BENCHMARK && pbkdf->time_ms == 0)
797 PBKDF2_temp = LUKS_MKD_ITERATIONS_MIN;
798 else /* iterations per ms * LUKS_MKD_ITERATIONS_MS */
799 PBKDF2_temp = (double)pbkdf->iterations * LUKS_MKD_ITERATIONS_MS / pbkdf->time_ms;
801 if (PBKDF2_temp > (double)UINT32_MAX)
803 header->mkDigestIterations = at_least((uint32_t)PBKDF2_temp, LUKS_MKD_ITERATIONS_MIN);
804 assert(header->mkDigestIterations);
806 r = crypt_pbkdf(CRYPT_KDF_PBKDF2, header->hashSpec, vk->key,vk->keylength,
807 header->mkDigestSalt, LUKS_SALTSIZE,
808 header->mkDigest,LUKS_DIGESTSIZE,
809 header->mkDigestIterations, 0, 0);
811 log_err(ctx, _("Cannot create LUKS header: header digest failed (using hash %s)."),
816 uuid_unparse(partitionUuid, header->uuid);
818 log_dbg(ctx, "Data offset %d, UUID %s, digest iterations %" PRIu32,
819 header->payloadOffset, header->uuid, header->mkDigestIterations);
824 int LUKS_hdr_uuid_set(
825 struct luks_phdr *hdr,
827 struct crypt_device *ctx)
829 uuid_t partitionUuid;
831 if (uuid && uuid_parse(uuid, partitionUuid) == -1) {
832 log_err(ctx, _("Wrong LUKS UUID format provided."));
836 uuid_generate(partitionUuid);
838 uuid_unparse(partitionUuid, hdr->uuid);
840 return LUKS_write_phdr(hdr, ctx);
843 int LUKS_set_key(unsigned int keyIndex,
844 const char *password, size_t passwordLen,
845 struct luks_phdr *hdr, struct volume_key *vk,
846 struct crypt_device *ctx)
848 struct volume_key *derived_key;
851 struct crypt_pbkdf_type *pbkdf;
854 if(hdr->keyblock[keyIndex].active != LUKS_KEY_DISABLED) {
855 log_err(ctx, _("Key slot %d active, purge first."), keyIndex);
859 /* LUKS keyslot has always at least 4000 stripes according to specification */
860 if(hdr->keyblock[keyIndex].stripes < 4000) {
861 log_err(ctx, _("Key slot %d material includes too few stripes. Header manipulation?"),
866 log_dbg(ctx, "Calculating data for key slot %d", keyIndex);
867 pbkdf = crypt_get_pbkdf(ctx);
868 r = crypt_benchmark_pbkdf_internal(ctx, pbkdf, vk->keylength);
871 assert(pbkdf->iterations);
874 * Final iteration count is at least LUKS_SLOT_ITERATIONS_MIN
876 hdr->keyblock[keyIndex].passwordIterations =
877 at_least(pbkdf->iterations, LUKS_SLOT_ITERATIONS_MIN);
878 log_dbg(ctx, "Key slot %d use %" PRIu32 " password iterations.", keyIndex,
879 hdr->keyblock[keyIndex].passwordIterations);
881 derived_key = crypt_alloc_volume_key(hdr->keyBytes, NULL);
885 r = crypt_random_get(ctx, hdr->keyblock[keyIndex].passwordSalt,
886 LUKS_SALTSIZE, CRYPT_RND_SALT);
890 r = crypt_pbkdf(CRYPT_KDF_PBKDF2, hdr->hashSpec, password, passwordLen,
891 hdr->keyblock[keyIndex].passwordSalt, LUKS_SALTSIZE,
892 derived_key->key, hdr->keyBytes,
893 hdr->keyblock[keyIndex].passwordIterations, 0, 0);
898 * AF splitting, the masterkey stored in vk->key is split to AfKey
900 assert(vk->keylength == hdr->keyBytes);
901 AFEKSize = AF_split_sectors(vk->keylength, hdr->keyblock[keyIndex].stripes) * SECTOR_SIZE;
902 AfKey = crypt_safe_alloc(AFEKSize);
908 log_dbg(ctx, "Using hash %s for AF in key slot %d, %d stripes",
909 hdr->hashSpec, keyIndex, hdr->keyblock[keyIndex].stripes);
910 r = AF_split(ctx, vk->key, AfKey, vk->keylength, hdr->keyblock[keyIndex].stripes, hdr->hashSpec);
914 log_dbg(ctx, "Updating key slot %d [0x%04x] area.", keyIndex,
915 hdr->keyblock[keyIndex].keyMaterialOffset << 9);
916 /* Encryption via dm */
917 r = LUKS_encrypt_to_storage(AfKey,
919 hdr->cipherName, hdr->cipherMode,
921 hdr->keyblock[keyIndex].keyMaterialOffset,
926 /* Mark the key as active in phdr */
927 r = LUKS_keyslot_set(hdr, (int)keyIndex, 1, ctx);
931 r = LUKS_write_phdr(hdr, ctx);
937 crypt_safe_free(AfKey);
938 crypt_free_volume_key(derived_key);
942 /* Check whether a volume key is invalid. */
943 int LUKS_verify_volume_key(const struct luks_phdr *hdr,
944 const struct volume_key *vk)
946 char checkHashBuf[LUKS_DIGESTSIZE];
948 if (crypt_pbkdf(CRYPT_KDF_PBKDF2, hdr->hashSpec, vk->key, vk->keylength,
949 hdr->mkDigestSalt, LUKS_SALTSIZE,
950 checkHashBuf, LUKS_DIGESTSIZE,
951 hdr->mkDigestIterations, 0, 0) < 0)
954 if (memcmp(checkHashBuf, hdr->mkDigest, LUKS_DIGESTSIZE))
960 /* Try to open a particular key slot */
961 static int LUKS_open_key(unsigned int keyIndex,
962 const char *password,
964 struct luks_phdr *hdr,
965 struct volume_key **vk,
966 struct crypt_device *ctx)
968 crypt_keyslot_info ki = LUKS_keyslot_info(hdr, keyIndex);
969 struct volume_key *derived_key;
974 log_dbg(ctx, "Trying to open key slot %d [%s].", keyIndex,
977 if (ki < CRYPT_SLOT_ACTIVE)
980 derived_key = crypt_alloc_volume_key(hdr->keyBytes, NULL);
984 *vk = crypt_alloc_volume_key(hdr->keyBytes, NULL);
990 AFEKSize = AF_split_sectors(hdr->keyBytes, hdr->keyblock[keyIndex].stripes) * SECTOR_SIZE;
991 AfKey = crypt_safe_alloc(AFEKSize);
997 r = crypt_pbkdf(CRYPT_KDF_PBKDF2, hdr->hashSpec, password, passwordLen,
998 hdr->keyblock[keyIndex].passwordSalt, LUKS_SALTSIZE,
999 derived_key->key, hdr->keyBytes,
1000 hdr->keyblock[keyIndex].passwordIterations, 0, 0);
1002 log_err(ctx, _("Cannot open keyslot (using hash %s)."), hdr->hashSpec);
1006 log_dbg(ctx, "Reading key slot %d area.", keyIndex);
1007 r = LUKS_decrypt_from_storage(AfKey,
1009 hdr->cipherName, hdr->cipherMode,
1011 hdr->keyblock[keyIndex].keyMaterialOffset,
1016 r = AF_merge(ctx, AfKey, (*vk)->key, (*vk)->keylength, hdr->keyblock[keyIndex].stripes, hdr->hashSpec);
1020 r = LUKS_verify_volume_key(hdr, *vk);
1022 /* Allow only empty passphrase with null cipher */
1023 if (!r && crypt_is_cipher_null(hdr->cipherName) && passwordLen)
1027 crypt_free_volume_key(*vk);
1030 crypt_safe_free(AfKey);
1031 crypt_free_volume_key(derived_key);
1035 int LUKS_open_key_with_hdr(int keyIndex,
1036 const char *password,
1038 struct luks_phdr *hdr,
1039 struct volume_key **vk,
1040 struct crypt_device *ctx)
1042 unsigned int i, tried = 0;
1045 if (keyIndex >= 0) {
1046 r = LUKS_open_key(keyIndex, password, passwordLen, hdr, vk, ctx);
1047 return (r < 0) ? r : keyIndex;
1050 for (i = 0; i < LUKS_NUMKEYS; i++) {
1051 r = LUKS_open_key(i, password, passwordLen, hdr, vk, ctx);
1055 /* Do not retry for errors that are no -EPERM or -ENOENT,
1056 former meaning password wrong, latter key slot inactive */
1057 if ((r != -EPERM) && (r != -ENOENT))
1062 /* Warning, early returns above */
1063 return tried ? -EPERM : -ENOENT;
1066 int LUKS_del_key(unsigned int keyIndex,
1067 struct luks_phdr *hdr,
1068 struct crypt_device *ctx)
1070 struct device *device = crypt_metadata_device(ctx);
1071 unsigned int startOffset, endOffset;
1074 r = LUKS_read_phdr(hdr, 1, 0, ctx);
1078 r = LUKS_keyslot_set(hdr, keyIndex, 0, ctx);
1080 log_err(ctx, _("Key slot %d is invalid, please select keyslot between 0 and %d."),
1081 keyIndex, LUKS_NUMKEYS - 1);
1085 /* secure deletion of key material */
1086 startOffset = hdr->keyblock[keyIndex].keyMaterialOffset;
1087 endOffset = startOffset + AF_split_sectors(hdr->keyBytes, hdr->keyblock[keyIndex].stripes);
1089 r = crypt_wipe_device(ctx, device, CRYPT_WIPE_SPECIAL, startOffset * SECTOR_SIZE,
1090 (endOffset - startOffset) * SECTOR_SIZE,
1091 (endOffset - startOffset) * SECTOR_SIZE, NULL, NULL);
1094 log_err(ctx, _("Cannot write to device %s, permission denied."),
1095 device_path(device));
1098 log_err(ctx, _("Cannot wipe device %s."),
1099 device_path(device));
1103 /* Wipe keyslot info */
1104 memset(&hdr->keyblock[keyIndex].passwordSalt, 0, LUKS_SALTSIZE);
1105 hdr->keyblock[keyIndex].passwordIterations = 0;
1107 r = LUKS_write_phdr(hdr, ctx);
1112 crypt_keyslot_info LUKS_keyslot_info(struct luks_phdr *hdr, int keyslot)
1116 if(keyslot >= LUKS_NUMKEYS || keyslot < 0)
1117 return CRYPT_SLOT_INVALID;
1119 if (hdr->keyblock[keyslot].active == LUKS_KEY_DISABLED)
1120 return CRYPT_SLOT_INACTIVE;
1122 if (hdr->keyblock[keyslot].active != LUKS_KEY_ENABLED)
1123 return CRYPT_SLOT_INVALID;
1125 for(i = 0; i < LUKS_NUMKEYS; i++)
1126 if(i != keyslot && hdr->keyblock[i].active == LUKS_KEY_ENABLED)
1127 return CRYPT_SLOT_ACTIVE;
1129 return CRYPT_SLOT_ACTIVE_LAST;
1132 int LUKS_keyslot_find_empty(struct luks_phdr *hdr)
1136 for (i = 0; i < LUKS_NUMKEYS; i++)
1137 if(hdr->keyblock[i].active == LUKS_KEY_DISABLED)
1140 if (i == LUKS_NUMKEYS)
1146 int LUKS_keyslot_active_count(struct luks_phdr *hdr)
1150 for (i = 0; i < LUKS_NUMKEYS; i++)
1151 if(hdr->keyblock[i].active == LUKS_KEY_ENABLED)
1157 int LUKS_keyslot_set(struct luks_phdr *hdr, int keyslot, int enable, struct crypt_device *ctx)
1159 crypt_keyslot_info ki = LUKS_keyslot_info(hdr, keyslot);
1161 if (ki == CRYPT_SLOT_INVALID)
1164 hdr->keyblock[keyslot].active = enable ? LUKS_KEY_ENABLED : LUKS_KEY_DISABLED;
1165 log_dbg(ctx, "Key slot %d was %s in LUKS header.", keyslot, enable ? "enabled" : "disabled");
1169 int LUKS1_activate(struct crypt_device *cd,
1171 struct volume_key *vk,
1175 struct crypt_dm_active_device dmd = {
1177 .uuid = crypt_get_uuid(cd),
1180 r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd),
1181 vk, crypt_get_cipher_spec(cd), crypt_get_iv_offset(cd),
1182 crypt_get_data_offset(cd), crypt_get_integrity(cd),
1183 crypt_get_integrity_tag_size(cd), crypt_get_sector_size(cd));
1185 r = create_or_reload_device(cd, name, CRYPT_LUKS1, &dmd);
1187 dm_targets_free(cd, &dmd);
1192 int LUKS_wipe_header_areas(struct luks_phdr *hdr,
1193 struct crypt_device *ctx)
1196 uint64_t offset, length;
1199 /* Wipe complete header, keyslots and padding areas with zeroes. */
1201 length = (uint64_t)hdr->payloadOffset * SECTOR_SIZE;
1202 wipe_block = 1024 * 1024;
1204 /* On detached header or bogus header, wipe at least the first 4k */
1205 if (length == 0 || length > (LUKS_MAX_KEYSLOT_SIZE * LUKS_NUMKEYS)) {
1210 log_dbg(ctx, "Wiping LUKS areas (0x%06" PRIx64 " - 0x%06" PRIx64") with zeroes.",
1211 offset, length + offset);
1213 r = crypt_wipe_device(ctx, crypt_metadata_device(ctx), CRYPT_WIPE_ZERO,
1214 offset, length, wipe_block, NULL, NULL);
1218 /* Wipe keyslots areas */
1219 wipe_block = 1024 * 1024;
1220 for (i = 0; i < LUKS_NUMKEYS; i++) {
1221 r = LUKS_keyslot_area(hdr, i, &offset, &length);
1225 /* Ignore too big LUKS1 keyslots here */
1226 if (length > LUKS_MAX_KEYSLOT_SIZE ||
1227 offset > (LUKS_MAX_KEYSLOT_SIZE - length))
1230 if (length == 0 || offset < 4096)
1233 log_dbg(ctx, "Wiping keyslot %i area (0x%06" PRIx64 " - 0x%06" PRIx64") with random data.",
1234 i, offset, length + offset);
1236 r = crypt_wipe_device(ctx, crypt_metadata_device(ctx), CRYPT_WIPE_RANDOM,
1237 offset, length, wipe_block, NULL, NULL);
1245 int LUKS_keyslot_pbkdf(struct luks_phdr *hdr, int keyslot, struct crypt_pbkdf_type *pbkdf)
1247 if (LUKS_keyslot_info(hdr, keyslot) < CRYPT_SLOT_ACTIVE)
1250 pbkdf->type = CRYPT_KDF_PBKDF2;
1251 pbkdf->hash = hdr->hashSpec;
1252 pbkdf->iterations = hdr->keyblock[keyslot].passwordIterations;
1253 pbkdf->max_memory_kb = 0;
1254 pbkdf->parallel_threads = 0;