2 * libcryptsetup - cryptsetup library
4 * Copyright (C) 2004 Jana Saout <jana@saout.de>
5 * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
6 * Copyright (C) 2009-2023 Red Hat, Inc. All rights reserved.
7 * Copyright (C) 2009-2023 Milan Broz
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
28 #include <sys/utsname.h>
31 #include "libcryptsetup.h"
32 #include "luks1/luks.h"
33 #include "luks2/luks2.h"
34 #include "loopaes/loopaes.h"
35 #include "verity/verity.h"
36 #include "tcrypt/tcrypt.h"
37 #include "integrity/integrity.h"
38 #include "bitlk/bitlk.h"
39 #include "fvault2/fvault2.h"
40 #include "utils_device_locking.h"
42 #include "keyslot_context.h"
44 #define CRYPT_CD_UNRESTRICTED (1 << 0)
45 #define CRYPT_CD_QUIET (1 << 1)
50 struct device *device;
51 struct device *metadata_device;
53 struct volume_key *volume_key;
55 uint32_t compatibility;
56 struct crypt_pbkdf_type pbkdf;
58 /* global context scope settings */
59 unsigned key_in_keyring:1;
62 uint64_t metadata_size; /* Used in LUKS2 format */
63 uint64_t keyslots_size; /* Used in LUKS2 format */
65 /* Workaround for OOM during parallel activation (like in systemd) */
66 bool memory_hard_pbkdf_lock_enabled;
67 struct crypt_lock_handle *pbkdf_memory_hard_lock;
70 struct { /* used in CRYPT_LUKS1 */
74 struct { /* used in CRYPT_LUKS2 */
76 char cipher[MAX_CIPHER_LEN]; /* only for compatibility */
77 char cipher_mode[MAX_CIPHER_LEN]; /* only for compatibility */
79 unsigned int keyslot_key_size;
80 struct luks2_reencrypt *rh;
82 struct { /* used in CRYPT_PLAIN */
83 struct crypt_params_plain hdr;
86 const char *cipher_mode;
87 unsigned int key_size;
89 struct { /* used in CRYPT_LOOPAES */
90 struct crypt_params_loopaes hdr;
93 const char *cipher_mode;
94 unsigned int key_size;
96 struct { /* used in CRYPT_VERITY */
97 struct crypt_params_verity hdr;
98 const char *root_hash;
99 unsigned int root_hash_size;
101 struct device *fec_device;
103 struct { /* used in CRYPT_TCRYPT */
104 struct crypt_params_tcrypt params;
105 struct tcrypt_phdr hdr;
107 struct { /* used in CRYPT_INTEGRITY */
108 struct crypt_params_integrity params;
109 struct volume_key *journal_mac_key;
110 struct volume_key *journal_crypt_key;
113 struct { /* used in CRYPT_BITLK */
114 struct bitlk_metadata params;
117 struct { /* used in CRYPT_FVAULT2 */
118 struct fvault2_params params;
120 struct { /* used if initialized without header by name */
122 /* buffers, must refresh from kernel on every query */
123 char cipher_spec[MAX_CIPHER_LEN*2+1];
124 char cipher[MAX_CIPHER_LEN];
125 const char *cipher_mode;
126 unsigned int key_size;
130 /* callbacks definitions */
131 void (*log)(int level, const char *msg, void *usrptr);
133 int (*confirm)(const char *msg, void *usrptr);
134 void *confirm_usrptr;
137 /* Just to suppress redundant messages about crypto backend */
138 static int _crypto_logged = 0;
141 static void (*_default_log)(int level, const char *msg, void *usrptr) = NULL;
142 static void *_default_log_usrptr = NULL;
143 static int _debug_level = 0;
145 /* Library can do metadata locking */
146 static int _metadata_locking = 1;
148 /* Library scope detection for kernel keyring support */
149 static int _kernel_keyring_supported;
151 /* Library allowed to use kernel keyring for loading VK in kernel crypto layer */
152 static int _vk_via_keyring = 1;
154 void crypt_set_debug_level(int level)
156 _debug_level = level;
159 int crypt_get_debug_level(void)
164 void crypt_log(struct crypt_device *cd, int level, const char *msg)
169 if (level < _debug_level)
173 cd->log(level, msg, cd->log_usrptr);
174 else if (_default_log)
175 _default_log(level, msg, _default_log_usrptr);
176 /* Default to stdout/stderr if there is no callback. */
178 fprintf(level == CRYPT_LOG_ERROR ? stderr : stdout, "%s", msg);
181 __attribute__((format(printf, 3, 4)))
182 void crypt_logf(struct crypt_device *cd, int level, const char *format, ...)
185 char target[LOG_MAX_LEN + 2];
188 va_start(argp, format);
190 len = vsnprintf(&target[0], LOG_MAX_LEN, format, argp);
191 if (len > 0 && len < LOG_MAX_LEN) {
192 /* All verbose and error messages in tools end with EOL. */
193 if (level == CRYPT_LOG_VERBOSE || level == CRYPT_LOG_ERROR ||
194 level == CRYPT_LOG_DEBUG || level == CRYPT_LOG_DEBUG_JSON)
195 strncat(target, "\n", LOG_MAX_LEN);
197 crypt_log(cd, level, target);
203 static const char *mdata_device_path(struct crypt_device *cd)
205 return device_path(cd->metadata_device ?: cd->device);
208 static const char *data_device_path(struct crypt_device *cd)
210 return device_path(cd->device);
214 struct device *crypt_metadata_device(struct crypt_device *cd)
216 return cd->metadata_device ?: cd->device;
219 struct device *crypt_data_device(struct crypt_device *cd)
224 int init_crypto(struct crypt_device *ctx)
229 r = crypt_random_init(ctx);
231 log_err(ctx, _("Cannot initialize crypto RNG backend."));
235 r = crypt_backend_init(crypt_fips_mode());
237 log_err(ctx, _("Cannot initialize crypto backend."));
239 if (!r && !_crypto_logged) {
240 log_dbg(ctx, "Crypto backend (%s) initialized in cryptsetup library version %s.",
241 crypt_backend_version(), PACKAGE_VERSION);
243 log_dbg(ctx, "Detected kernel %s %s %s.",
244 uts.sysname, uts.release, uts.machine);
251 static int process_key(struct crypt_device *cd, const char *hash_name,
252 size_t key_size, const char *pass, size_t passLen,
253 struct volume_key **vk)
260 *vk = crypt_alloc_volume_key(key_size, NULL);
265 r = crypt_plain_hash(cd, hash_name, (*vk)->key, key_size, pass, passLen);
268 log_err(cd, _("Hash algorithm %s not supported."),
271 log_err(cd, _("Key processing error (using hash %s)."),
273 crypt_free_volume_key(*vk);
277 } else if (passLen > key_size) {
278 memcpy((*vk)->key, pass, key_size);
280 memcpy((*vk)->key, pass, passLen);
286 static int isPLAIN(const char *type)
288 return (type && !strcmp(CRYPT_PLAIN, type));
291 static int isLUKS1(const char *type)
293 return (type && !strcmp(CRYPT_LUKS1, type));
296 static int isLUKS2(const char *type)
298 return (type && !strcmp(CRYPT_LUKS2, type));
301 static int isLUKS(const char *type)
303 return (isLUKS2(type) || isLUKS1(type));
306 static int isLOOPAES(const char *type)
308 return (type && !strcmp(CRYPT_LOOPAES, type));
311 static int isVERITY(const char *type)
313 return (type && !strcmp(CRYPT_VERITY, type));
316 static int isTCRYPT(const char *type)
318 return (type && !strcmp(CRYPT_TCRYPT, type));
321 static int isINTEGRITY(const char *type)
323 return (type && !strcmp(CRYPT_INTEGRITY, type));
326 static int isBITLK(const char *type)
328 return (type && !strcmp(CRYPT_BITLK, type));
331 static int isFVAULT2(const char *type)
333 return (type && !strcmp(CRYPT_FVAULT2, type));
336 static int _onlyLUKS(struct crypt_device *cd, uint32_t cdflags)
340 if (cd && !cd->type) {
341 if (!(cdflags & CRYPT_CD_QUIET))
342 log_err(cd, _("Cannot determine device type. Incompatible activation of device?"));
346 if (!cd || !isLUKS(cd->type)) {
347 if (!(cdflags & CRYPT_CD_QUIET))
348 log_err(cd, _("This operation is supported only for LUKS device."));
352 if (r || (cdflags & CRYPT_CD_UNRESTRICTED) || isLUKS1(cd->type))
355 return LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, 0, cdflags & CRYPT_CD_QUIET);
358 static int onlyLUKS(struct crypt_device *cd)
360 return _onlyLUKS(cd, 0);
363 static int _onlyLUKS2(struct crypt_device *cd, uint32_t cdflags, uint32_t mask)
367 if (cd && !cd->type) {
368 if (!(cdflags & CRYPT_CD_QUIET))
369 log_err(cd, _("Cannot determine device type. Incompatible activation of device?"));
373 if (!cd || !isLUKS2(cd->type)) {
374 if (!(cdflags & CRYPT_CD_QUIET))
375 log_err(cd, _("This operation is supported only for LUKS2 device."));
379 if (r || (cdflags & CRYPT_CD_UNRESTRICTED))
382 return LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, mask, cdflags & CRYPT_CD_QUIET);
386 int onlyLUKS2(struct crypt_device *cd)
388 return _onlyLUKS2(cd, 0, 0);
392 int onlyLUKS2mask(struct crypt_device *cd, uint32_t mask)
394 return _onlyLUKS2(cd, 0, mask);
397 static void crypt_set_null_type(struct crypt_device *cd)
402 cd->metadata_size = 0;
403 cd->keyslots_size = 0;
404 crypt_safe_memzero(&cd->u, sizeof(cd->u));
407 static void crypt_reset_null_type(struct crypt_device *cd)
412 free(cd->u.none.active_name);
413 cd->u.none.active_name = NULL;
416 /* keyslot helpers */
417 static int keyslot_verify_or_find_empty(struct crypt_device *cd, int *keyslot)
419 crypt_keyslot_info ki;
421 if (*keyslot == CRYPT_ANY_SLOT) {
422 if (isLUKS1(cd->type))
423 *keyslot = LUKS_keyslot_find_empty(&cd->u.luks1.hdr);
425 *keyslot = LUKS2_keyslot_find_empty(cd, &cd->u.luks2.hdr, 0);
427 log_err(cd, _("All key slots full."));
432 if (isLUKS1(cd->type))
433 ki = LUKS_keyslot_info(&cd->u.luks1.hdr, *keyslot);
435 ki = LUKS2_keyslot_info(&cd->u.luks2.hdr, *keyslot);
437 case CRYPT_SLOT_INVALID:
438 log_err(cd, _("Key slot %d is invalid, please select between 0 and %d."),
439 *keyslot, crypt_keyslot_max(cd->type) - 1);
441 case CRYPT_SLOT_INACTIVE:
444 log_err(cd, _("Key slot %d is full, please select another one."),
449 log_dbg(cd, "Selected keyslot %d.", *keyslot);
454 * compares UUIDs returned by device-mapper (striped by cryptsetup) and uuid in header
456 int crypt_uuid_cmp(const char *dm_uuid, const char *hdr_uuid)
461 if (!dm_uuid || !hdr_uuid)
464 str = strchr(dm_uuid, '-');
468 for (i = 0, j = 1; hdr_uuid[i]; i++) {
469 if (hdr_uuid[i] == '-')
472 if (!str[j] || str[j] == '-')
475 if (str[j] != hdr_uuid[i])
484 * compares type of active device to provided string (only if there is no explicit type)
486 static int crypt_uuid_type_cmp(struct crypt_device *cd, const char *type)
488 struct crypt_dm_active_device dmd;
492 /* Must use header-on-disk if we know the type here */
493 if (cd->type || !cd->u.none.active_name)
496 log_dbg(cd, "Checking if active device %s without header has UUID type %s.",
497 cd->u.none.active_name, type);
499 r = dm_query_device(cd, cd->u.none.active_name, DM_ACTIVE_UUID, &dmd);
505 if (dmd.uuid && strlen(dmd.uuid) > len &&
506 !strncmp(dmd.uuid, type, len) && dmd.uuid[len] == '-')
509 free(CONST_CAST(void*)dmd.uuid);
513 int PLAIN_activate(struct crypt_device *cd,
515 struct volume_key *vk,
520 struct crypt_dm_active_device dmd = {
525 log_dbg(cd, "Trying to activate PLAIN device %s using cipher %s.",
526 name, crypt_get_cipher_spec(cd));
528 if (MISALIGNED(size, device_block_size(cd, crypt_data_device(cd)) >> SECTOR_SHIFT)) {
529 log_err(cd, _("Device size is not aligned to device logical block size."));
533 r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd),
534 vk, crypt_get_cipher_spec(cd), crypt_get_iv_offset(cd),
535 crypt_get_data_offset(cd), crypt_get_integrity(cd),
536 crypt_get_integrity_tag_size(cd), crypt_get_sector_size(cd));
540 r = create_or_reload_device(cd, name, CRYPT_PLAIN, &dmd);
542 dm_targets_free(cd, &dmd);
546 int crypt_confirm(struct crypt_device *cd, const char *msg)
548 if (!cd || !cd->confirm)
551 return cd->confirm(msg, cd->confirm_usrptr);
554 void crypt_set_log_callback(struct crypt_device *cd,
555 void (*log)(int level, const char *msg, void *usrptr),
560 _default_log_usrptr = usrptr;
563 cd->log_usrptr = usrptr;
567 void crypt_set_confirm_callback(struct crypt_device *cd,
568 int (*confirm)(const char *msg, void *usrptr),
572 cd->confirm = confirm;
573 cd->confirm_usrptr = usrptr;
577 const char *crypt_get_dir(void)
582 int crypt_init(struct crypt_device **cd, const char *device)
584 struct crypt_device *h = NULL;
590 log_dbg(NULL, "Allocating context for crypt device %s.", device ?: "(none)");
591 #if !HAVE_DECL_O_CLOEXEC
592 log_dbg(NULL, "Running without O_CLOEXEC.");
595 if (!(h = malloc(sizeof(struct crypt_device))))
598 memset(h, 0, sizeof(*h));
600 r = device_alloc(NULL, &h->device, device);
606 dm_backend_init(NULL);
608 h->rng_type = crypt_random_default_key_rng();
614 static int crypt_check_data_device_size(struct crypt_device *cd)
617 uint64_t size, size_min;
619 /* Check data device size, require at least header or one sector */
620 size_min = crypt_get_data_offset(cd) << SECTOR_SHIFT ?: SECTOR_SIZE;
622 r = device_size(cd->device, &size);
626 if (size < size_min) {
627 log_err(cd, _("Header detected but device %s is too small."),
628 device_path(cd->device));
635 static int _crypt_set_data_device(struct crypt_device *cd, const char *device)
637 struct device *dev = NULL;
640 r = device_alloc(cd, &dev, device);
644 if (!cd->metadata_device) {
645 cd->metadata_device = cd->device;
647 device_free(cd, cd->device);
651 r = crypt_check_data_device_size(cd);
652 if (!r && isLUKS2(cd->type))
653 device_set_block_size(crypt_data_device(cd), LUKS2_get_sector_size(&cd->u.luks2.hdr));
658 int crypt_set_data_device(struct crypt_device *cd, const char *device)
660 /* metadata device must be set */
661 if (!cd || !cd->device || !device)
664 log_dbg(cd, "Setting ciphertext data device to %s.", device ?: "(none)");
666 if (!isLUKS1(cd->type) && !isLUKS2(cd->type) && !isVERITY(cd->type) &&
667 !isINTEGRITY(cd->type) && !isTCRYPT(cd->type)) {
668 log_err(cd, _("This operation is not supported for this device type."));
672 if (isLUKS2(cd->type) && crypt_get_luks2_reencrypt(cd)) {
673 log_err(cd, _("Illegal operation with reencryption in-progress."));
677 return _crypt_set_data_device(cd, device);
680 int crypt_init_data_device(struct crypt_device **cd, const char *device, const char *data_device)
687 r = crypt_init(cd, device);
688 if (r || !data_device || !strcmp(device, data_device))
691 log_dbg(NULL, "Setting ciphertext data device to %s.", data_device);
692 r = _crypt_set_data_device(*cd, data_device);
701 static void crypt_free_type(struct crypt_device *cd, const char *force_type)
703 const char *type = force_type ?: cd->type;
706 free(CONST_CAST(void*)cd->u.plain.hdr.hash);
707 free(cd->u.plain.cipher);
708 free(cd->u.plain.cipher_spec);
709 } else if (isLUKS2(type)) {
710 LUKS2_reencrypt_free(cd, cd->u.luks2.rh);
711 LUKS2_hdr_free(cd, &cd->u.luks2.hdr);
712 free(cd->u.luks2.keyslot_cipher);
713 } else if (isLUKS1(type)) {
714 free(cd->u.luks1.cipher_spec);
715 } else if (isLOOPAES(type)) {
716 free(CONST_CAST(void*)cd->u.loopaes.hdr.hash);
717 free(cd->u.loopaes.cipher);
718 free(cd->u.loopaes.cipher_spec);
719 } else if (isVERITY(type)) {
720 free(CONST_CAST(void*)cd->u.verity.hdr.hash_name);
721 free(CONST_CAST(void*)cd->u.verity.hdr.data_device);
722 free(CONST_CAST(void*)cd->u.verity.hdr.hash_device);
723 free(CONST_CAST(void*)cd->u.verity.hdr.fec_device);
724 free(CONST_CAST(void*)cd->u.verity.hdr.salt);
725 free(CONST_CAST(void*)cd->u.verity.root_hash);
726 free(cd->u.verity.uuid);
727 device_free(cd, cd->u.verity.fec_device);
728 } else if (isINTEGRITY(type)) {
729 free(CONST_CAST(void*)cd->u.integrity.params.integrity);
730 free(CONST_CAST(void*)cd->u.integrity.params.journal_integrity);
731 free(CONST_CAST(void*)cd->u.integrity.params.journal_crypt);
732 crypt_free_volume_key(cd->u.integrity.journal_crypt_key);
733 crypt_free_volume_key(cd->u.integrity.journal_mac_key);
734 } else if (isBITLK(type)) {
735 free(cd->u.bitlk.cipher_spec);
736 BITLK_bitlk_metadata_free(&cd->u.bitlk.params);
738 free(cd->u.none.active_name);
739 cd->u.none.active_name = NULL;
742 crypt_set_null_type(cd);
746 struct crypt_pbkdf_type *crypt_get_pbkdf(struct crypt_device *cd)
752 * crypt_load() helpers
754 static int _crypt_load_luks2(struct crypt_device *cd, int reload, int repair)
758 struct luks2_hdr hdr2 = {};
760 log_dbg(cd, "%soading LUKS2 header (repair %sabled).", reload ? "Rel" : "L", repair ? "en" : "dis");
762 r = LUKS2_hdr_read(cd, &hdr2, repair);
766 if (!reload && !(type = strdup(CRYPT_LUKS2))) {
771 if (verify_pbkdf_params(cd, &cd->pbkdf)) {
772 r = init_pbkdf_type(cd, NULL, CRYPT_LUKS2);
778 LUKS2_hdr_free(cd, &cd->u.luks2.hdr);
779 free(cd->u.luks2.keyslot_cipher);
784 memcpy(&cd->u.luks2.hdr, &hdr2, sizeof(hdr2));
785 cd->u.luks2.keyslot_cipher = NULL;
786 cd->u.luks2.rh = NULL;
791 LUKS2_hdr_free(cd, &hdr2);
796 static void _luks2_rollback(struct crypt_device *cd)
798 if (!cd || !isLUKS2(cd->type))
801 if (LUKS2_hdr_rollback(cd, &cd->u.luks2.hdr)) {
802 log_err(cd, _("Failed to rollback LUKS2 metadata in memory."));
806 free(cd->u.luks2.keyslot_cipher);
807 cd->u.luks2.keyslot_cipher = NULL;
810 static int _crypt_load_luks(struct crypt_device *cd, const char *requested_type,
811 bool quiet, bool repair)
814 struct luks_phdr hdr = {};
821 /* This will return 0 if primary LUKS2 header is damaged */
822 version = LUKS2_hdr_version_unlocked(cd, NULL);
824 if ((isLUKS1(requested_type) && version == 2) ||
825 (isLUKS2(requested_type) && version == 1))
831 if (isLUKS1(requested_type) || version == 1) {
832 if (isLUKS2(cd->type)) {
833 log_dbg(cd, "Context is already initialized to type %s", cd->type);
837 if (verify_pbkdf_params(cd, &cd->pbkdf)) {
838 r = init_pbkdf_type(cd, NULL, CRYPT_LUKS1);
843 r = LUKS_read_phdr(&hdr, !quiet, repair, cd);
847 if (!cd->type && !(cd->type = strdup(CRYPT_LUKS1))) {
852 /* Set hash to the same as in the loaded header */
853 if (!cd->pbkdf.hash || strcmp(cd->pbkdf.hash, hdr.hashSpec)) {
854 free(CONST_CAST(void*)cd->pbkdf.hash);
855 cd->pbkdf.hash = strdup(hdr.hashSpec);
856 if (!cd->pbkdf.hash) {
862 if (asprintf(&cipher_spec, "%s-%s", hdr.cipherName, hdr.cipherMode) < 0) {
867 free(cd->u.luks1.cipher_spec);
868 cd->u.luks1.cipher_spec = cipher_spec;
870 memcpy(&cd->u.luks1.hdr, &hdr, sizeof(hdr));
871 } else if (isLUKS2(requested_type) || version == 2 || version == 0) {
872 if (isLUKS1(cd->type)) {
873 log_dbg(cd, "Context is already initialized to type %s", cd->type);
878 * Current LUKS2 repair just overrides blkid probes
879 * and perform auto-recovery if possible. This is safe
880 * unless future LUKS2 repair code do something more
881 * sophisticated. In such case we would need to check
882 * for LUKS2 requirements and decide if it's safe to
885 r = _crypt_load_luks2(cd, cd->type != NULL, repair);
887 device_set_block_size(crypt_data_device(cd), LUKS2_get_sector_size(&cd->u.luks2.hdr));
889 log_err(cd, _("Device %s is not a valid LUKS device."), mdata_device_path(cd));
892 log_err(cd, _("Unsupported LUKS version %d."), version);
896 crypt_safe_memzero(&hdr, sizeof(hdr));
901 static int _crypt_load_tcrypt(struct crypt_device *cd, struct crypt_params_tcrypt *params)
912 memcpy(&cd->u.tcrypt.params, params, sizeof(*params));
914 r = TCRYPT_read_phdr(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
916 cd->u.tcrypt.params.passphrase = NULL;
917 cd->u.tcrypt.params.passphrase_size = 0;
918 cd->u.tcrypt.params.keyfiles = NULL;
919 cd->u.tcrypt.params.keyfiles_count = 0;
920 cd->u.tcrypt.params.veracrypt_pim = 0;
925 if (!cd->type && !(cd->type = strdup(CRYPT_TCRYPT)))
929 crypt_free_type(cd, CRYPT_TCRYPT);
933 static int _crypt_load_verity(struct crypt_device *cd, struct crypt_params_verity *params)
936 uint64_t sb_offset = 0;
942 if (params && params->flags & CRYPT_VERITY_NO_HEADER)
946 sb_offset = params->hash_area_offset;
948 r = VERITY_read_sb(cd, sb_offset, &cd->u.verity.uuid, &cd->u.verity.hdr);
952 if (!cd->type && !(cd->type = strdup(CRYPT_VERITY))) {
958 cd->u.verity.hdr.flags = params->flags;
960 /* Hash availability checked in sb load */
961 cd->u.verity.root_hash_size = crypt_hash_size(cd->u.verity.hdr.hash_name);
962 if (cd->u.verity.root_hash_size > 4096) {
967 if (params && params->data_device &&
968 (r = crypt_set_data_device(cd, params->data_device)) < 0)
971 if (params && params->fec_device) {
972 r = device_alloc(cd, &cd->u.verity.fec_device, params->fec_device);
975 cd->u.verity.hdr.fec_area_offset = params->fec_area_offset;
976 cd->u.verity.hdr.fec_roots = params->fec_roots;
980 crypt_free_type(cd, CRYPT_VERITY);
984 static int _crypt_load_integrity(struct crypt_device *cd,
985 struct crypt_params_integrity *params)
993 r = INTEGRITY_read_sb(cd, &cd->u.integrity.params, &cd->u.integrity.sb_flags);
997 // FIXME: add checks for fields in integrity sb vs params
1001 cd->u.integrity.params.journal_watermark = params->journal_watermark;
1002 cd->u.integrity.params.journal_commit_time = params->journal_commit_time;
1003 cd->u.integrity.params.buffer_sectors = params->buffer_sectors;
1004 if (params->integrity &&
1005 !(cd->u.integrity.params.integrity = strdup(params->integrity)))
1007 cd->u.integrity.params.integrity_key_size = params->integrity_key_size;
1008 if (params->journal_integrity &&
1009 !(cd->u.integrity.params.journal_integrity = strdup(params->journal_integrity)))
1011 if (params->journal_crypt &&
1012 !(cd->u.integrity.params.journal_crypt = strdup(params->journal_crypt)))
1015 if (params->journal_crypt_key) {
1016 cd->u.integrity.journal_crypt_key =
1017 crypt_alloc_volume_key(params->journal_crypt_key_size,
1018 params->journal_crypt_key);
1019 if (!cd->u.integrity.journal_crypt_key)
1022 if (params->journal_integrity_key) {
1023 cd->u.integrity.journal_mac_key =
1024 crypt_alloc_volume_key(params->journal_integrity_key_size,
1025 params->journal_integrity_key);
1026 if (!cd->u.integrity.journal_mac_key)
1031 if (!cd->type && !(cd->type = strdup(CRYPT_INTEGRITY)))
1036 crypt_free_type(cd, CRYPT_INTEGRITY);
1040 static int _crypt_load_bitlk(struct crypt_device *cd)
1044 r = init_crypto(cd);
1048 r = BITLK_read_sb(cd, &cd->u.bitlk.params);
1052 if (asprintf(&cd->u.bitlk.cipher_spec, "%s-%s",
1053 cd->u.bitlk.params.cipher, cd->u.bitlk.params.cipher_mode) < 0) {
1054 cd->u.bitlk.cipher_spec = NULL;
1059 if (!cd->type && !(cd->type = strdup(CRYPT_BITLK))) {
1064 device_set_block_size(crypt_data_device(cd), cd->u.bitlk.params.sector_size);
1067 crypt_free_type(cd, CRYPT_BITLK);
1071 static int _crypt_load_fvault2(struct crypt_device *cd)
1075 r = init_crypto(cd);
1079 r = FVAULT2_read_metadata(cd, &cd->u.fvault2.params);
1083 if (!cd->type && !(cd->type = strdup(CRYPT_FVAULT2)))
1087 crypt_free_type(cd, CRYPT_FVAULT2);
1091 int crypt_load(struct crypt_device *cd,
1092 const char *requested_type,
1100 log_dbg(cd, "Trying to load %s crypt type from device %s.",
1101 requested_type ?: "any", mdata_device_path(cd) ?: "(none)");
1103 if (!crypt_metadata_device(cd))
1106 crypt_reset_null_type(cd);
1107 cd->data_offset = 0;
1108 cd->metadata_size = 0;
1109 cd->keyslots_size = 0;
1111 if (!requested_type || isLUKS1(requested_type) || isLUKS2(requested_type)) {
1112 if (cd->type && !isLUKS1(cd->type) && !isLUKS2(cd->type)) {
1113 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1117 r = _crypt_load_luks(cd, requested_type, true, false);
1118 } else if (isVERITY(requested_type)) {
1119 if (cd->type && !isVERITY(cd->type)) {
1120 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1123 r = _crypt_load_verity(cd, params);
1124 } else if (isTCRYPT(requested_type)) {
1125 if (cd->type && !isTCRYPT(cd->type)) {
1126 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1129 r = _crypt_load_tcrypt(cd, params);
1130 } else if (isINTEGRITY(requested_type)) {
1131 if (cd->type && !isINTEGRITY(cd->type)) {
1132 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1135 r = _crypt_load_integrity(cd, params);
1136 } else if (isBITLK(requested_type)) {
1137 if (cd->type && !isBITLK(cd->type)) {
1138 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1141 r = _crypt_load_bitlk(cd);
1142 } else if (isFVAULT2(requested_type)) {
1143 if (cd->type && !isFVAULT2(cd->type)) {
1144 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1147 r = _crypt_load_fvault2(cd);
1155 * crypt_init() helpers
1157 static int _init_by_name_crypt_none(struct crypt_device *cd)
1160 char _mode[MAX_CIPHER_LEN];
1161 struct crypt_dm_active_device dmd;
1162 struct dm_target *tgt = &dmd.segment;
1164 if (cd->type || !cd->u.none.active_name)
1167 r = dm_query_device(cd, cd->u.none.active_name,
1168 DM_ACTIVE_CRYPT_CIPHER |
1169 DM_ACTIVE_CRYPT_KEYSIZE, &dmd);
1172 if (!single_segment(&dmd) || tgt->type != DM_CRYPT)
1175 r = crypt_parse_name_and_mode(tgt->u.crypt.cipher,
1176 cd->u.none.cipher, NULL,
1180 r = snprintf(cd->u.none.cipher_spec, sizeof(cd->u.none.cipher_spec),
1181 "%s-%s", cd->u.none.cipher, _mode);
1182 if (r < 0 || (size_t)r >= sizeof(cd->u.none.cipher_spec))
1185 cd->u.none.cipher_mode = cd->u.none.cipher_spec + strlen(cd->u.none.cipher) + 1;
1186 cd->u.none.key_size = tgt->u.crypt.vk->keylength;
1191 dm_targets_free(cd, &dmd);
1195 static const char *LUKS_UUID(struct crypt_device *cd)
1199 else if (isLUKS1(cd->type))
1200 return cd->u.luks1.hdr.uuid;
1201 else if (isLUKS2(cd->type))
1202 return cd->u.luks2.hdr.uuid;
1207 static int _init_by_name_crypt(struct crypt_device *cd, const char *name)
1210 char **dep, *cipher_spec = NULL, cipher[MAX_CIPHER_LEN], cipher_mode[MAX_CIPHER_LEN];
1211 char deps_uuid_prefix[40], *deps[MAX_DM_DEPS+1] = {};
1212 const char *dev, *namei;
1214 struct crypt_dm_active_device dmd, dmdi = {}, dmdep = {};
1215 struct dm_target *tgt = &dmd.segment, *tgti = &dmdi.segment;
1217 r = dm_query_device(cd, name,
1220 DM_ACTIVE_CRYPT_CIPHER |
1221 DM_ACTIVE_CRYPT_KEYSIZE, &dmd);
1225 if (tgt->type != DM_CRYPT && tgt->type != DM_LINEAR) {
1226 log_dbg(cd, "Unsupported device table detected in %s.", name);
1234 r = snprintf(deps_uuid_prefix, sizeof(deps_uuid_prefix), CRYPT_SUBDEV "-%.32s", dmd.uuid + 6);
1235 if (r < 0 || (size_t)r != (sizeof(deps_uuid_prefix) - 1))
1240 r = dm_device_deps(cd, name, deps_uuid_prefix, deps, ARRAY_SIZE(deps));
1245 r = crypt_parse_name_and_mode(tgt->type == DM_LINEAR ? "null" : tgt->u.crypt.cipher, cipher,
1246 &key_nums, cipher_mode);
1248 log_dbg(cd, "Cannot parse cipher and mode from active device.");
1254 if (tgt->type == DM_CRYPT && tgt->u.crypt.integrity && (namei = device_dm_name(tgt->data_device))) {
1255 r = dm_query_device(cd, namei, DM_ACTIVE_DEVICE, &dmdi);
1258 if (!single_segment(&dmdi) || tgti->type != DM_INTEGRITY) {
1259 log_dbg(cd, "Unsupported device table detected in %s.", namei);
1263 if (!cd->metadata_device) {
1264 device_free(cd, cd->device);
1265 MOVE_REF(cd->device, tgti->data_device);
1269 /* do not try to lookup LUKS2 header in detached header mode */
1270 if (dmd.uuid && !cd->metadata_device && !found) {
1271 while (*dep && !found) {
1272 r = dm_query_device(cd, *dep, DM_ACTIVE_DEVICE, &dmdep);
1276 tgt = &dmdep.segment;
1278 while (tgt && !found) {
1279 dev = device_path(tgt->data_device);
1284 if (!strstr(dev, dm_get_dir()) ||
1285 !crypt_string_in(dev + strlen(dm_get_dir()) + 1, deps, ARRAY_SIZE(deps))) {
1286 device_free(cd, cd->device);
1287 MOVE_REF(cd->device, tgt->data_device);
1293 dm_targets_free(cd, &dmdep);
1297 if (asprintf(&cipher_spec, "%s-%s", cipher, cipher_mode) < 0) {
1306 if (isPLAIN(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) {
1307 cd->u.plain.hdr.hash = NULL; /* no way to get this */
1308 cd->u.plain.hdr.offset = tgt->u.crypt.offset;
1309 cd->u.plain.hdr.skip = tgt->u.crypt.iv_offset;
1310 cd->u.plain.hdr.sector_size = tgt->u.crypt.sector_size;
1311 cd->u.plain.key_size = tgt->u.crypt.vk->keylength;
1312 cd->u.plain.cipher = strdup(cipher);
1313 MOVE_REF(cd->u.plain.cipher_spec, cipher_spec);
1314 cd->u.plain.cipher_mode = cd->u.plain.cipher_spec + strlen(cipher) + 1;
1315 } else if (isLOOPAES(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) {
1316 cd->u.loopaes.hdr.offset = tgt->u.crypt.offset;
1317 cd->u.loopaes.cipher = strdup(cipher);
1318 MOVE_REF(cd->u.loopaes.cipher_spec, cipher_spec);
1319 cd->u.loopaes.cipher_mode = cd->u.loopaes.cipher_spec + strlen(cipher) + 1;
1320 /* version 3 uses last key for IV */
1321 if (tgt->u.crypt.vk->keylength % key_nums)
1323 cd->u.loopaes.key_size = tgt->u.crypt.vk->keylength / key_nums;
1324 } else if (isLUKS1(cd->type) || isLUKS2(cd->type)) {
1325 if (crypt_metadata_device(cd)) {
1326 r = _crypt_load_luks(cd, cd->type, true, false);
1328 log_dbg(cd, "LUKS device header does not match active device.");
1329 crypt_set_null_type(cd);
1330 device_close(cd, cd->metadata_device);
1331 device_close(cd, cd->device);
1335 /* check whether UUIDs match each other */
1336 r = crypt_uuid_cmp(dmd.uuid, LUKS_UUID(cd));
1338 log_dbg(cd, "LUKS device header uuid: %s mismatches DM returned uuid %s",
1339 LUKS_UUID(cd), dmd.uuid);
1340 crypt_free_type(cd, NULL);
1345 log_dbg(cd, "LUKS device header not available.");
1346 crypt_set_null_type(cd);
1349 } else if (isTCRYPT(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) {
1350 r = TCRYPT_init_by_name(cd, name, dmd.uuid, tgt, &cd->device,
1351 &cd->u.tcrypt.params, &cd->u.tcrypt.hdr);
1352 } else if (isBITLK(cd->type)) {
1353 r = _crypt_load_bitlk(cd);
1355 log_dbg(cd, "BITLK device header not available.");
1356 crypt_set_null_type(cd);
1359 } else if (isFVAULT2(cd->type)) {
1360 r = _crypt_load_fvault2(cd);
1362 log_dbg(cd, "FVAULT2 device header not available.");
1363 crypt_set_null_type(cd);
1368 dm_targets_free(cd, &dmd);
1369 dm_targets_free(cd, &dmdi);
1370 dm_targets_free(cd, &dmdep);
1371 free(CONST_CAST(void*)dmd.uuid);
1379 static int _init_by_name_verity(struct crypt_device *cd, const char *name)
1381 struct crypt_dm_active_device dmd;
1382 struct dm_target *tgt = &dmd.segment;
1385 r = dm_query_device(cd, name,
1387 DM_ACTIVE_VERITY_HASH_DEVICE |
1388 DM_ACTIVE_VERITY_ROOT_HASH |
1389 DM_ACTIVE_VERITY_PARAMS, &dmd);
1392 if (!single_segment(&dmd) || tgt->type != DM_VERITY) {
1393 log_dbg(cd, "Unsupported device table detected in %s.", name);
1400 if (isVERITY(cd->type)) {
1401 cd->u.verity.uuid = NULL; // FIXME
1402 cd->u.verity.hdr.flags = CRYPT_VERITY_NO_HEADER; //FIXME
1403 cd->u.verity.hdr.data_size = tgt->u.verity.vp->data_size;
1404 cd->u.verity.root_hash_size = tgt->u.verity.root_hash_size;
1405 MOVE_REF(cd->u.verity.hdr.hash_name, tgt->u.verity.vp->hash_name);
1406 cd->u.verity.hdr.data_device = NULL;
1407 cd->u.verity.hdr.hash_device = NULL;
1408 cd->u.verity.hdr.data_block_size = tgt->u.verity.vp->data_block_size;
1409 cd->u.verity.hdr.hash_block_size = tgt->u.verity.vp->hash_block_size;
1410 cd->u.verity.hdr.hash_area_offset = tgt->u.verity.hash_offset;
1411 cd->u.verity.hdr.fec_area_offset = tgt->u.verity.fec_offset;
1412 cd->u.verity.hdr.hash_type = tgt->u.verity.vp->hash_type;
1413 cd->u.verity.hdr.flags = tgt->u.verity.vp->flags;
1414 cd->u.verity.hdr.salt_size = tgt->u.verity.vp->salt_size;
1415 MOVE_REF(cd->u.verity.hdr.salt, tgt->u.verity.vp->salt);
1416 MOVE_REF(cd->u.verity.hdr.fec_device, tgt->u.verity.vp->fec_device);
1417 cd->u.verity.hdr.fec_roots = tgt->u.verity.vp->fec_roots;
1418 MOVE_REF(cd->u.verity.fec_device, tgt->u.verity.fec_device);
1419 MOVE_REF(cd->metadata_device, tgt->u.verity.hash_device);
1420 MOVE_REF(cd->u.verity.root_hash, tgt->u.verity.root_hash);
1423 dm_targets_free(cd, &dmd);
1427 static int _init_by_name_integrity(struct crypt_device *cd, const char *name)
1429 struct crypt_dm_active_device dmd;
1430 struct dm_target *tgt = &dmd.segment;
1433 r = dm_query_device(cd, name, DM_ACTIVE_DEVICE |
1434 DM_ACTIVE_CRYPT_KEY |
1435 DM_ACTIVE_CRYPT_KEYSIZE |
1436 DM_ACTIVE_INTEGRITY_PARAMS, &dmd);
1439 if (!single_segment(&dmd) || tgt->type != DM_INTEGRITY) {
1440 log_dbg(cd, "Unsupported device table detected in %s.", name);
1447 if (isINTEGRITY(cd->type)) {
1448 cd->u.integrity.params.tag_size = tgt->u.integrity.tag_size;
1449 cd->u.integrity.params.sector_size = tgt->u.integrity.sector_size;
1450 cd->u.integrity.params.journal_size = tgt->u.integrity.journal_size;
1451 cd->u.integrity.params.journal_watermark = tgt->u.integrity.journal_watermark;
1452 cd->u.integrity.params.journal_commit_time = tgt->u.integrity.journal_commit_time;
1453 cd->u.integrity.params.interleave_sectors = tgt->u.integrity.interleave_sectors;
1454 cd->u.integrity.params.buffer_sectors = tgt->u.integrity.buffer_sectors;
1455 MOVE_REF(cd->u.integrity.params.integrity, tgt->u.integrity.integrity);
1456 MOVE_REF(cd->u.integrity.params.journal_integrity, tgt->u.integrity.journal_integrity);
1457 MOVE_REF(cd->u.integrity.params.journal_crypt, tgt->u.integrity.journal_crypt);
1459 if (tgt->u.integrity.vk)
1460 cd->u.integrity.params.integrity_key_size = tgt->u.integrity.vk->keylength;
1461 if (tgt->u.integrity.journal_integrity_key)
1462 cd->u.integrity.params.journal_integrity_key_size = tgt->u.integrity.journal_integrity_key->keylength;
1463 if (tgt->u.integrity.journal_crypt_key)
1464 cd->u.integrity.params.integrity_key_size = tgt->u.integrity.journal_crypt_key->keylength;
1465 MOVE_REF(cd->metadata_device, tgt->u.integrity.meta_device);
1468 dm_targets_free(cd, &dmd);
1472 int crypt_init_by_name_and_header(struct crypt_device **cd,
1474 const char *header_device)
1476 crypt_status_info ci;
1477 struct crypt_dm_active_device dmd;
1478 struct dm_target *tgt = &dmd.segment;
1484 log_dbg(NULL, "Allocating crypt device context by device %s.", name);
1486 ci = crypt_status(NULL, name);
1487 if (ci == CRYPT_INVALID)
1490 if (ci < CRYPT_ACTIVE) {
1491 log_err(NULL, _("Device %s is not active."), name);
1495 r = dm_query_device(NULL, name, DM_ACTIVE_DEVICE | DM_ACTIVE_UUID, &dmd);
1501 if (header_device) {
1502 r = crypt_init(cd, header_device);
1504 r = crypt_init(cd, device_path(tgt->data_device));
1506 /* Underlying device disappeared but mapping still active */
1507 if (!tgt->data_device || r == -ENOTBLK)
1508 log_verbose(NULL, _("Underlying device for crypt device %s disappeared."),
1511 /* Underlying device is not readable but crypt mapping exists */
1513 r = crypt_init(cd, NULL);
1520 if (!strncmp(CRYPT_PLAIN, dmd.uuid, sizeof(CRYPT_PLAIN)-1))
1521 (*cd)->type = strdup(CRYPT_PLAIN);
1522 else if (!strncmp(CRYPT_LOOPAES, dmd.uuid, sizeof(CRYPT_LOOPAES)-1))
1523 (*cd)->type = strdup(CRYPT_LOOPAES);
1524 else if (!strncmp(CRYPT_LUKS1, dmd.uuid, sizeof(CRYPT_LUKS1)-1))
1525 (*cd)->type = strdup(CRYPT_LUKS1);
1526 else if (!strncmp(CRYPT_LUKS2, dmd.uuid, sizeof(CRYPT_LUKS2)-1))
1527 (*cd)->type = strdup(CRYPT_LUKS2);
1528 else if (!strncmp(CRYPT_VERITY, dmd.uuid, sizeof(CRYPT_VERITY)-1))
1529 (*cd)->type = strdup(CRYPT_VERITY);
1530 else if (!strncmp(CRYPT_TCRYPT, dmd.uuid, sizeof(CRYPT_TCRYPT)-1))
1531 (*cd)->type = strdup(CRYPT_TCRYPT);
1532 else if (!strncmp(CRYPT_INTEGRITY, dmd.uuid, sizeof(CRYPT_INTEGRITY)-1))
1533 (*cd)->type = strdup(CRYPT_INTEGRITY);
1534 else if (!strncmp(CRYPT_BITLK, dmd.uuid, sizeof(CRYPT_BITLK)-1))
1535 (*cd)->type = strdup(CRYPT_BITLK);
1536 else if (!strncmp(CRYPT_FVAULT2, dmd.uuid, sizeof(CRYPT_FVAULT2)-1))
1537 (*cd)->type = strdup(CRYPT_FVAULT2);
1539 log_dbg(NULL, "Unknown UUID set, some parameters are not set.");
1541 log_dbg(NULL, "Active device has no UUID set, some parameters are not set.");
1543 if (header_device) {
1544 r = crypt_set_data_device(*cd, device_path(tgt->data_device));
1549 /* Try to initialize basic parameters from active device */
1551 if (tgt->type == DM_CRYPT || tgt->type == DM_LINEAR)
1552 r = _init_by_name_crypt(*cd, name);
1553 else if (tgt->type == DM_VERITY)
1554 r = _init_by_name_verity(*cd, name);
1555 else if (tgt->type == DM_INTEGRITY)
1556 r = _init_by_name_integrity(*cd, name);
1561 } else if (!(*cd)->type) {
1562 /* For anonymous device (no header found) remember initialized name */
1563 (*cd)->u.none.active_name = strdup(name);
1566 free(CONST_CAST(void*)dmd.uuid);
1567 dm_targets_free(NULL, &dmd);
1571 int crypt_init_by_name(struct crypt_device **cd, const char *name)
1573 return crypt_init_by_name_and_header(cd, name, NULL);
1577 * crypt_format() helpers
1579 static int _crypt_format_plain(struct crypt_device *cd,
1581 const char *cipher_mode,
1583 size_t volume_key_size,
1584 struct crypt_params_plain *params)
1586 unsigned int sector_size = params ? params->sector_size : SECTOR_SIZE;
1589 if (!cipher || !cipher_mode) {
1590 log_err(cd, _("Invalid plain crypt parameters."));
1594 if (volume_key_size > 1024) {
1595 log_err(cd, _("Invalid key size."));
1600 log_err(cd, _("UUID is not supported for this crypt type."));
1604 if (cd->metadata_device) {
1605 log_err(cd, _("Detached metadata device is not supported for this crypt type."));
1609 /* For compatibility with old params structure */
1611 sector_size = SECTOR_SIZE;
1613 if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE ||
1614 NOTPOW2(sector_size)) {
1615 log_err(cd, _("Unsupported encryption sector size."));
1619 if (sector_size > SECTOR_SIZE && !device_size(cd->device, &dev_size)) {
1620 if (params && params->offset)
1621 dev_size -= (params->offset * SECTOR_SIZE);
1622 if (dev_size % sector_size) {
1623 log_err(cd, _("Device size is not aligned to requested sector size."));
1626 device_set_block_size(crypt_data_device(cd), sector_size);
1629 if (!(cd->type = strdup(CRYPT_PLAIN)))
1632 cd->u.plain.key_size = volume_key_size;
1633 cd->volume_key = crypt_alloc_volume_key(volume_key_size, NULL);
1634 if (!cd->volume_key)
1637 if (asprintf(&cd->u.plain.cipher_spec, "%s-%s", cipher, cipher_mode) < 0) {
1638 cd->u.plain.cipher_spec = NULL;
1641 cd->u.plain.cipher = strdup(cipher);
1642 cd->u.plain.cipher_mode = cd->u.plain.cipher_spec + strlen(cipher) + 1;
1644 if (params && params->hash)
1645 cd->u.plain.hdr.hash = strdup(params->hash);
1647 cd->u.plain.hdr.offset = params ? params->offset : 0;
1648 cd->u.plain.hdr.skip = params ? params->skip : 0;
1649 cd->u.plain.hdr.size = params ? params->size : 0;
1650 cd->u.plain.hdr.sector_size = sector_size;
1652 if (!cd->u.plain.cipher)
1658 static int _crypt_format_luks1(struct crypt_device *cd,
1660 const char *cipher_mode,
1662 const char *volume_key,
1663 size_t volume_key_size,
1664 struct crypt_params_luks1 *params)
1667 unsigned long required_alignment = DEFAULT_DISK_ALIGNMENT;
1668 unsigned long alignment_offset = 0;
1671 if (!cipher || !cipher_mode)
1674 if (!crypt_metadata_device(cd)) {
1675 log_err(cd, _("Can't format LUKS without device."));
1679 if (params && cd->data_offset && params->data_alignment &&
1680 (cd->data_offset % params->data_alignment)) {
1681 log_err(cd, _("Requested data alignment is not compatible with data offset."));
1685 if (!(cd->type = strdup(CRYPT_LUKS1)))
1689 cd->volume_key = crypt_alloc_volume_key(volume_key_size,
1692 cd->volume_key = crypt_generate_volume_key(cd, volume_key_size);
1694 if (!cd->volume_key)
1697 if (verify_pbkdf_params(cd, &cd->pbkdf)) {
1698 r = init_pbkdf_type(cd, NULL, CRYPT_LUKS1);
1703 if (params && params->hash && strcmp(params->hash, cd->pbkdf.hash)) {
1704 free(CONST_CAST(void*)cd->pbkdf.hash);
1705 cd->pbkdf.hash = strdup(params->hash);
1706 if (!cd->pbkdf.hash)
1710 if (params && params->data_device) {
1711 if (!cd->metadata_device)
1712 cd->metadata_device = cd->device;
1714 device_free(cd, cd->device);
1716 if (device_alloc(cd, &cd->device, params->data_device) < 0)
1720 if (params && cd->metadata_device) {
1721 /* For detached header the alignment is used directly as data offset */
1722 if (!cd->data_offset)
1723 cd->data_offset = params->data_alignment;
1724 required_alignment = params->data_alignment * SECTOR_SIZE;
1725 } else if (params && params->data_alignment) {
1726 required_alignment = params->data_alignment * SECTOR_SIZE;
1728 device_topology_alignment(cd, cd->device,
1729 &required_alignment,
1730 &alignment_offset, DEFAULT_DISK_ALIGNMENT);
1732 r = LUKS_check_cipher(cd, volume_key_size, cipher, cipher_mode);
1736 r = LUKS_generate_phdr(&cd->u.luks1.hdr, cd->volume_key, cipher, cipher_mode,
1737 cd->pbkdf.hash, uuid,
1738 cd->data_offset * SECTOR_SIZE,
1739 alignment_offset, required_alignment, cd);
1743 r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL);
1748 if (asprintf(&cd->u.luks1.cipher_spec, "%s-%s", cipher, cipher_mode) < 0) {
1749 cd->u.luks1.cipher_spec = NULL;
1753 r = LUKS_wipe_header_areas(&cd->u.luks1.hdr, cd);
1755 free(cd->u.luks1.cipher_spec);
1756 log_err(cd, _("Cannot wipe header on device %s."),
1757 mdata_device_path(cd));
1761 r = LUKS_write_phdr(&cd->u.luks1.hdr, cd);
1763 free(cd->u.luks1.cipher_spec);
1767 if (!device_size(crypt_data_device(cd), &dev_size) &&
1768 dev_size <= (crypt_get_data_offset(cd) * SECTOR_SIZE))
1769 log_std(cd, _("Device %s is too small for activation, there is no remaining space for data.\n"),
1770 device_path(crypt_data_device(cd)));
1775 static int _crypt_format_luks2(struct crypt_device *cd,
1777 const char *cipher_mode,
1779 const char *volume_key,
1780 size_t volume_key_size,
1781 struct crypt_params_luks2 *params,
1782 bool sector_size_autodetect)
1784 int r, integrity_key_size = 0;
1785 unsigned long required_alignment = DEFAULT_DISK_ALIGNMENT;
1786 unsigned long alignment_offset = 0;
1787 unsigned int sector_size;
1788 const char *integrity = params ? params->integrity : NULL;
1792 cd->u.luks2.hdr.jobj = NULL;
1793 cd->u.luks2.keyslot_cipher = NULL;
1795 if (!cipher || !cipher_mode)
1798 if (!crypt_metadata_device(cd)) {
1799 log_err(cd, _("Can't format LUKS without device."));
1803 if (params && cd->data_offset && params->data_alignment &&
1804 (cd->data_offset % params->data_alignment)) {
1805 log_err(cd, _("Requested data alignment is not compatible with data offset."));
1809 if (params && params->sector_size)
1810 sector_size_autodetect = false;
1812 if (params && params->data_device) {
1813 if (!cd->metadata_device)
1814 cd->metadata_device = cd->device;
1816 device_free(cd, cd->device);
1818 if (device_alloc(cd, &cd->device, params->data_device) < 0)
1822 if (sector_size_autodetect) {
1823 sector_size = device_optimal_encryption_sector_size(cd, crypt_data_device(cd));
1824 log_dbg(cd, "Auto-detected optimal encryption sector size for device %s is %d bytes.",
1825 device_path(crypt_data_device(cd)), sector_size);
1827 sector_size = params ? params->sector_size : SECTOR_SIZE;
1829 if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE ||
1830 NOTPOW2(sector_size)) {
1831 log_err(cd, _("Unsupported encryption sector size."));
1834 if (sector_size != SECTOR_SIZE && !dm_flags(cd, DM_CRYPT, &dmc_flags) &&
1835 !(dmc_flags & DM_SECTOR_SIZE_SUPPORTED)) {
1836 if (sector_size_autodetect) {
1837 log_dbg(cd, "dm-crypt does not support encryption sector size option. Reverting to 512 bytes.");
1838 sector_size = SECTOR_SIZE;
1840 log_std(cd, _("WARNING: The device activation will fail, dm-crypt is missing "
1841 "support for requested encryption sector size.\n"));
1845 if (params->integrity_params) {
1846 /* Standalone dm-integrity must not be used */
1847 if (params->integrity_params->integrity ||
1848 params->integrity_params->integrity_key_size)
1850 /* FIXME: journal encryption and MAC is here not yet supported */
1851 if (params->integrity_params->journal_crypt ||
1852 params->integrity_params->journal_integrity)
1855 if (!INTEGRITY_tag_size(integrity, cipher, cipher_mode)) {
1856 if (!strcmp(integrity, "none"))
1861 integrity_key_size = INTEGRITY_key_size(integrity);
1862 if ((integrity_key_size < 0) || (integrity_key_size >= (int)volume_key_size)) {
1863 log_err(cd, _("Volume key is too small for encryption with integrity extensions."));
1868 r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL);
1872 if (!(cd->type = strdup(CRYPT_LUKS2)))
1876 cd->volume_key = crypt_alloc_volume_key(volume_key_size,
1879 cd->volume_key = crypt_generate_volume_key(cd, volume_key_size);
1881 if (!cd->volume_key)
1884 if (params && params->pbkdf)
1885 r = crypt_set_pbkdf_type(cd, params->pbkdf);
1886 else if (verify_pbkdf_params(cd, &cd->pbkdf))
1887 r = init_pbkdf_type(cd, NULL, CRYPT_LUKS2);
1892 if (params && cd->metadata_device) {
1893 /* For detached header the alignment is used directly as data offset */
1894 if (!cd->data_offset)
1895 cd->data_offset = params->data_alignment;
1896 required_alignment = params->data_alignment * SECTOR_SIZE;
1897 } else if (params && params->data_alignment) {
1898 required_alignment = params->data_alignment * SECTOR_SIZE;
1900 device_topology_alignment(cd, cd->device,
1901 &required_alignment,
1902 &alignment_offset, DEFAULT_DISK_ALIGNMENT);
1904 r = device_size(crypt_data_device(cd), &dev_size);
1908 if (sector_size_autodetect) {
1909 if (cd->data_offset && MISALIGNED(cd->data_offset, sector_size)) {
1910 log_dbg(cd, "Data offset not aligned to sector size. Reverting to 512 bytes.");
1911 sector_size = SECTOR_SIZE;
1912 } else if (MISALIGNED(dev_size - (uint64_t)required_alignment - (uint64_t)alignment_offset, sector_size)) {
1913 /* underflow does not affect misalignment checks */
1914 log_dbg(cd, "Device size is not aligned to sector size. Reverting to 512 bytes.");
1915 sector_size = SECTOR_SIZE;
1919 /* FIXME: allow this later also for normal ciphers (check AF_ALG availability. */
1920 if (integrity && !integrity_key_size) {
1921 r = crypt_cipher_check_kernel(cipher, cipher_mode, integrity, volume_key_size);
1923 log_err(cd, _("Cipher %s-%s (key size %zd bits) is not available."),
1924 cipher, cipher_mode, volume_key_size * 8);
1929 if ((!integrity || integrity_key_size) && !crypt_cipher_wrapped_key(cipher, cipher_mode) &&
1930 !INTEGRITY_tag_size(NULL, cipher, cipher_mode)) {
1931 r = LUKS_check_cipher(cd, volume_key_size - integrity_key_size,
1932 cipher, cipher_mode);
1937 r = LUKS2_generate_hdr(cd, &cd->u.luks2.hdr, cd->volume_key,
1938 cipher, cipher_mode,
1941 cd->data_offset * SECTOR_SIZE,
1944 cd->metadata_size, cd->keyslots_size);
1948 if (cd->metadata_size && (cd->metadata_size != LUKS2_metadata_size(&cd->u.luks2.hdr)))
1949 log_std(cd, _("WARNING: LUKS2 metadata size changed to %" PRIu64 " bytes.\n"),
1950 LUKS2_metadata_size(&cd->u.luks2.hdr));
1952 if (cd->keyslots_size && (cd->keyslots_size != LUKS2_keyslots_size(&cd->u.luks2.hdr)))
1953 log_std(cd, _("WARNING: LUKS2 keyslots area size changed to %" PRIu64 " bytes.\n"),
1954 LUKS2_keyslots_size(&cd->u.luks2.hdr));
1956 if (!integrity && sector_size > SECTOR_SIZE) {
1957 dev_size -= (crypt_get_data_offset(cd) * SECTOR_SIZE);
1958 if (dev_size % sector_size) {
1959 log_err(cd, _("Device size is not aligned to requested sector size."));
1965 if (params && (params->label || params->subsystem)) {
1966 r = LUKS2_hdr_labels(cd, &cd->u.luks2.hdr,
1967 params->label, params->subsystem, 0);
1972 device_set_block_size(crypt_data_device(cd), sector_size);
1974 r = LUKS2_wipe_header_areas(cd, &cd->u.luks2.hdr, cd->metadata_device != NULL);
1976 log_err(cd, _("Cannot wipe header on device %s."),
1977 mdata_device_path(cd));
1978 if (dev_size < LUKS2_hdr_and_areas_size(&cd->u.luks2.hdr))
1979 log_err(cd, _("Device %s is too small."), device_path(crypt_metadata_device(cd)));
1983 /* Wipe integrity superblock and create integrity superblock */
1984 if (crypt_get_integrity_tag_size(cd)) {
1985 r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_ZERO,
1986 crypt_get_data_offset(cd) * SECTOR_SIZE,
1987 8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL);
1990 log_err(cd, _("Cannot format device %s in use."),
1991 data_device_path(cd));
1992 else if (r == -EACCES) {
1993 log_err(cd, _("Cannot format device %s, permission denied."),
1994 data_device_path(cd));
1997 log_err(cd, _("Cannot wipe header on device %s."),
1998 data_device_path(cd));
2003 r = INTEGRITY_format(cd, params ? params->integrity_params : NULL, NULL, NULL);
2005 log_err(cd, _("Cannot format integrity for device %s."),
2006 data_device_path(cd));
2012 /* override sequence id check with format */
2013 r = LUKS2_hdr_write_force(cd, &cd->u.luks2.hdr);
2016 log_err(cd, _("Cannot format device %s in use."),
2017 mdata_device_path(cd));
2018 else if (r == -EACCES) {
2019 log_err(cd, _("Cannot format device %s, permission denied."),
2020 mdata_device_path(cd));
2023 log_err(cd, _("Cannot format device %s."),
2024 mdata_device_path(cd));
2029 LUKS2_hdr_free(cd, &cd->u.luks2.hdr);
2033 /* Device size can be larger now if it is a file container */
2034 if (!device_size(crypt_data_device(cd), &dev_size) &&
2035 dev_size <= (crypt_get_data_offset(cd) * SECTOR_SIZE))
2036 log_std(cd, _("Device %s is too small for activation, there is no remaining space for data.\n"),
2037 device_path(crypt_data_device(cd)));
2042 static int _crypt_format_loopaes(struct crypt_device *cd,
2045 size_t volume_key_size,
2046 struct crypt_params_loopaes *params)
2048 if (!crypt_metadata_device(cd)) {
2049 log_err(cd, _("Can't format LOOPAES without device."));
2053 if (volume_key_size > 1024) {
2054 log_err(cd, _("Invalid key size."));
2059 log_err(cd, _("UUID is not supported for this crypt type."));
2063 if (cd->metadata_device) {
2064 log_err(cd, _("Detached metadata device is not supported for this crypt type."));
2068 if (!(cd->type = strdup(CRYPT_LOOPAES)))
2071 cd->u.loopaes.key_size = volume_key_size;
2073 cd->u.loopaes.cipher = strdup(cipher ?: DEFAULT_LOOPAES_CIPHER);
2075 if (params && params->hash)
2076 cd->u.loopaes.hdr.hash = strdup(params->hash);
2078 cd->u.loopaes.hdr.offset = params ? params->offset : 0;
2079 cd->u.loopaes.hdr.skip = params ? params->skip : 0;
2084 static int _crypt_format_verity(struct crypt_device *cd,
2086 struct crypt_params_verity *params)
2088 int r = 0, hash_size;
2089 uint64_t data_device_size, hash_blocks_size;
2090 struct device *fec_device = NULL;
2091 char *fec_device_path = NULL, *hash_name = NULL, *root_hash = NULL, *salt = NULL;
2093 if (!crypt_metadata_device(cd)) {
2094 log_err(cd, _("Can't format VERITY without device."));
2101 if (!params->data_device && !cd->metadata_device)
2104 if (params->hash_type > VERITY_MAX_HASH_TYPE) {
2105 log_err(cd, _("Unsupported VERITY hash type %d."), params->hash_type);
2109 if (VERITY_BLOCK_SIZE_OK(params->data_block_size) ||
2110 VERITY_BLOCK_SIZE_OK(params->hash_block_size)) {
2111 log_err(cd, _("Unsupported VERITY block size."));
2115 if (MISALIGNED_512(params->hash_area_offset)) {
2116 log_err(cd, _("Unsupported VERITY hash offset."));
2120 if (MISALIGNED_512(params->fec_area_offset)) {
2121 log_err(cd, _("Unsupported VERITY FEC offset."));
2125 if (!(cd->type = strdup(CRYPT_VERITY)))
2128 if (params->data_device) {
2129 r = crypt_set_data_device(cd, params->data_device);
2134 if (!params->data_size) {
2135 r = device_size(cd->device, &data_device_size);
2139 cd->u.verity.hdr.data_size = data_device_size / params->data_block_size;
2141 cd->u.verity.hdr.data_size = params->data_size;
2143 if (device_is_identical(crypt_metadata_device(cd), crypt_data_device(cd)) > 0 &&
2144 (cd->u.verity.hdr.data_size * params->data_block_size) > params->hash_area_offset) {
2145 log_err(cd, _("Data area overlaps with hash area."));
2149 hash_size = crypt_hash_size(params->hash_name);
2150 if (hash_size <= 0) {
2151 log_err(cd, _("Hash algorithm %s not supported."),
2155 cd->u.verity.root_hash_size = hash_size;
2157 if (params->fec_device) {
2158 fec_device_path = strdup(params->fec_device);
2159 if (!fec_device_path)
2161 r = device_alloc(cd, &fec_device, params->fec_device);
2167 hash_blocks_size = VERITY_hash_blocks(cd, params) * params->hash_block_size;
2168 if (device_is_identical(crypt_metadata_device(cd), fec_device) > 0 &&
2169 (params->hash_area_offset + hash_blocks_size) > params->fec_area_offset) {
2170 log_err(cd, _("Hash area overlaps with FEC area."));
2175 if (device_is_identical(crypt_data_device(cd), fec_device) > 0 &&
2176 (cd->u.verity.hdr.data_size * params->data_block_size) > params->fec_area_offset) {
2177 log_err(cd, _("Data area overlaps with FEC area."));
2183 root_hash = malloc(cd->u.verity.root_hash_size);
2184 hash_name = strdup(params->hash_name);
2185 salt = malloc(params->salt_size);
2187 if (!root_hash || !hash_name || !salt) {
2192 cd->u.verity.hdr.flags = params->flags;
2193 cd->u.verity.root_hash = root_hash;
2194 cd->u.verity.hdr.hash_name = hash_name;
2195 cd->u.verity.hdr.data_device = NULL;
2196 cd->u.verity.fec_device = fec_device;
2197 cd->u.verity.hdr.fec_device = fec_device_path;
2198 cd->u.verity.hdr.fec_roots = params->fec_roots;
2199 cd->u.verity.hdr.data_block_size = params->data_block_size;
2200 cd->u.verity.hdr.hash_block_size = params->hash_block_size;
2201 cd->u.verity.hdr.hash_area_offset = params->hash_area_offset;
2202 cd->u.verity.hdr.fec_area_offset = params->fec_area_offset;
2203 cd->u.verity.hdr.hash_type = params->hash_type;
2204 cd->u.verity.hdr.flags = params->flags;
2205 cd->u.verity.hdr.salt_size = params->salt_size;
2206 cd->u.verity.hdr.salt = salt;
2209 memcpy(salt, params->salt, params->salt_size);
2211 r = crypt_random_get(cd, salt, params->salt_size, CRYPT_RND_SALT);
2215 if (params->flags & CRYPT_VERITY_CREATE_HASH) {
2216 r = VERITY_create(cd, &cd->u.verity.hdr,
2217 cd->u.verity.root_hash, cd->u.verity.root_hash_size);
2218 if (!r && params->fec_device)
2219 r = VERITY_FEC_process(cd, &cd->u.verity.hdr, cd->u.verity.fec_device, 0, NULL);
2224 if (!(params->flags & CRYPT_VERITY_NO_HEADER)) {
2226 if (!(cd->u.verity.uuid = strdup(uuid)))
2229 r = VERITY_UUID_generate(&cd->u.verity.uuid);
2232 r = VERITY_write_sb(cd, cd->u.verity.hdr.hash_area_offset,
2239 device_free(cd, fec_device);
2242 free(fec_device_path);
2249 static int _crypt_format_integrity(struct crypt_device *cd,
2251 struct crypt_params_integrity *params)
2254 uint32_t integrity_tag_size;
2255 char *integrity = NULL, *journal_integrity = NULL, *journal_crypt = NULL;
2256 struct volume_key *journal_crypt_key = NULL, *journal_mac_key = NULL;
2262 log_err(cd, _("UUID is not supported for this crypt type."));
2266 r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL);
2270 /* Wipe first 8 sectors - fs magic numbers etc. */
2271 r = crypt_wipe_device(cd, crypt_metadata_device(cd), CRYPT_WIPE_ZERO, 0,
2272 8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL);
2274 log_err(cd, _("Cannot wipe header on device %s."),
2275 mdata_device_path(cd));
2279 if (!(cd->type = strdup(CRYPT_INTEGRITY)))
2282 if (params->journal_crypt_key) {
2283 journal_crypt_key = crypt_alloc_volume_key(params->journal_crypt_key_size,
2284 params->journal_crypt_key);
2285 if (!journal_crypt_key)
2289 if (params->journal_integrity_key) {
2290 journal_mac_key = crypt_alloc_volume_key(params->journal_integrity_key_size,
2291 params->journal_integrity_key);
2292 if (!journal_mac_key) {
2298 if (params->integrity && !(integrity = strdup(params->integrity))) {
2302 if (params->journal_integrity && !(journal_integrity = strdup(params->journal_integrity))) {
2306 if (params->journal_crypt && !(journal_crypt = strdup(params->journal_crypt))) {
2311 integrity_tag_size = INTEGRITY_hash_tag_size(integrity);
2312 if (integrity_tag_size > 0 && params->tag_size && integrity_tag_size != params->tag_size)
2313 log_std(cd, _("WARNING: Requested tag size %d bytes differs from %s size output (%d bytes).\n"),
2314 params->tag_size, integrity, integrity_tag_size);
2316 if (params->tag_size)
2317 integrity_tag_size = params->tag_size;
2319 cd->u.integrity.journal_crypt_key = journal_crypt_key;
2320 cd->u.integrity.journal_mac_key = journal_mac_key;
2321 cd->u.integrity.params.journal_size = params->journal_size;
2322 cd->u.integrity.params.journal_watermark = params->journal_watermark;
2323 cd->u.integrity.params.journal_commit_time = params->journal_commit_time;
2324 cd->u.integrity.params.interleave_sectors = params->interleave_sectors;
2325 cd->u.integrity.params.buffer_sectors = params->buffer_sectors;
2326 cd->u.integrity.params.sector_size = params->sector_size;
2327 cd->u.integrity.params.tag_size = integrity_tag_size;
2328 cd->u.integrity.params.integrity = integrity;
2329 cd->u.integrity.params.journal_integrity = journal_integrity;
2330 cd->u.integrity.params.journal_crypt = journal_crypt;
2332 r = INTEGRITY_format(cd, params, cd->u.integrity.journal_crypt_key, cd->u.integrity.journal_mac_key);
2334 log_err(cd, _("Cannot format integrity for device %s."),
2335 mdata_device_path(cd));
2338 crypt_free_volume_key(journal_crypt_key);
2339 crypt_free_volume_key(journal_mac_key);
2341 free(journal_integrity);
2342 free(journal_crypt);
2348 static int _crypt_format(struct crypt_device *cd,
2351 const char *cipher_mode,
2353 const char *volume_key,
2354 size_t volume_key_size,
2356 bool sector_size_autodetect)
2364 log_dbg(cd, "Context already formatted as %s.", cd->type);
2368 log_dbg(cd, "Formatting device %s as type %s.", mdata_device_path(cd) ?: "(none)", type);
2370 crypt_reset_null_type(cd);
2372 r = init_crypto(cd);
2377 r = _crypt_format_plain(cd, cipher, cipher_mode,
2378 uuid, volume_key_size, params);
2379 else if (isLUKS1(type))
2380 r = _crypt_format_luks1(cd, cipher, cipher_mode,
2381 uuid, volume_key, volume_key_size, params);
2382 else if (isLUKS2(type))
2383 r = _crypt_format_luks2(cd, cipher, cipher_mode,
2384 uuid, volume_key, volume_key_size, params, sector_size_autodetect);
2385 else if (isLOOPAES(type))
2386 r = _crypt_format_loopaes(cd, cipher, uuid, volume_key_size, params);
2387 else if (isVERITY(type))
2388 r = _crypt_format_verity(cd, uuid, params);
2389 else if (isINTEGRITY(type))
2390 r = _crypt_format_integrity(cd, uuid, params);
2392 log_err(cd, _("Unknown crypt device type %s requested."), type);
2397 crypt_set_null_type(cd);
2398 crypt_free_volume_key(cd->volume_key);
2399 cd->volume_key = NULL;
2405 CRYPT_SYMBOL_EXPORT_NEW(int, crypt_format, 2, 4,
2406 /* crypt_format parameters follows */
2407 struct crypt_device *cd,
2410 const char *cipher_mode,
2412 const char *volume_key,
2413 size_t volume_key_size,
2416 return _crypt_format(cd, type, cipher, cipher_mode, uuid, volume_key, volume_key_size, params, true);
2420 CRYPT_SYMBOL_EXPORT_OLD(int, crypt_format, 2, 0,
2421 /* crypt_format parameters follows */
2422 struct crypt_device *cd,
2425 const char *cipher_mode,
2427 const char *volume_key,
2428 size_t volume_key_size,
2431 return _crypt_format(cd, type, cipher, cipher_mode, uuid, volume_key, volume_key_size, params, false);
2434 int crypt_repair(struct crypt_device *cd,
2435 const char *requested_type,
2436 void *params __attribute__((unused)))
2443 log_dbg(cd, "Trying to repair %s crypt type from device %s.",
2444 requested_type ?: "any", mdata_device_path(cd) ?: "(none)");
2446 if (!crypt_metadata_device(cd))
2449 if (requested_type && !isLUKS(requested_type))
2452 /* Load with repair */
2453 r = _crypt_load_luks(cd, requested_type, false, true);
2457 /* cd->type and header must be set in context */
2458 r = crypt_check_data_device_size(cd);
2460 crypt_set_null_type(cd);
2465 /* compare volume keys */
2466 static int _compare_volume_keys(struct volume_key *svk, unsigned skeyring_only,
2467 struct volume_key *tvk, unsigned tkeyring_only)
2471 else if (!svk || !tvk)
2474 if (svk->keylength != tvk->keylength)
2477 if (!skeyring_only && !tkeyring_only)
2478 return crypt_backend_memeq(svk->key, tvk->key, svk->keylength);
2480 if (svk->key_description && tvk->key_description)
2481 return strcmp(svk->key_description, tvk->key_description);
2486 static int _compare_device_types(struct crypt_device *cd,
2487 const struct crypt_dm_active_device *src,
2488 const struct crypt_dm_active_device *tgt)
2491 log_dbg(cd, "Missing device uuid in target device.");
2495 if (isLUKS2(cd->type) && !strncmp("INTEGRITY-", tgt->uuid, strlen("INTEGRITY-"))) {
2496 if (crypt_uuid_cmp(tgt->uuid, src->uuid)) {
2497 log_dbg(cd, "LUKS UUID mismatch.");
2500 } else if (isLUKS(cd->type)) {
2501 if (!src->uuid || strncmp(cd->type, tgt->uuid, strlen(cd->type)) ||
2502 crypt_uuid_cmp(tgt->uuid, src->uuid)) {
2503 log_dbg(cd, "LUKS UUID mismatch.");
2506 } else if (isPLAIN(cd->type) || isLOOPAES(cd->type)) {
2507 if (strncmp(cd->type, tgt->uuid, strlen(cd->type))) {
2508 log_dbg(cd, "Unexpected uuid prefix %s in target device.", tgt->uuid);
2511 } else if (!isINTEGRITY(cd->type)) {
2512 log_dbg(cd, "Unsupported device type %s for reload.", cd->type ?: "<empty>");
2519 static int _compare_crypt_devices(struct crypt_device *cd,
2520 const struct dm_target *src,
2521 const struct dm_target *tgt)
2523 char *src_cipher = NULL, *src_integrity = NULL;
2526 /* for crypt devices keys are mandatory */
2527 if (!src->u.crypt.vk || !tgt->u.crypt.vk)
2531 if (!src->u.crypt.cipher || !tgt->u.crypt.cipher)
2535 * dm_query_target converts capi cipher specification to dm-crypt format.
2536 * We need to do same for cipher specification requested in source
2539 if (crypt_capi_to_cipher(&src_cipher, &src_integrity, src->u.crypt.cipher, src->u.crypt.integrity))
2542 if (strcmp(src_cipher, tgt->u.crypt.cipher)) {
2543 log_dbg(cd, "Cipher specs do not match.");
2547 if (tgt->u.crypt.vk->keylength == 0 && crypt_is_cipher_null(tgt->u.crypt.cipher))
2548 log_dbg(cd, "Existing device uses cipher null. Skipping key comparison.");
2549 else if (_compare_volume_keys(src->u.crypt.vk, 0, tgt->u.crypt.vk, tgt->u.crypt.vk->key_description != NULL)) {
2550 log_dbg(cd, "Keys in context and target device do not match.");
2554 if (crypt_strcmp(src_integrity, tgt->u.crypt.integrity)) {
2555 log_dbg(cd, "Integrity parameters do not match.");
2559 if (src->u.crypt.offset != tgt->u.crypt.offset ||
2560 src->u.crypt.sector_size != tgt->u.crypt.sector_size ||
2561 src->u.crypt.iv_offset != tgt->u.crypt.iv_offset ||
2562 src->u.crypt.tag_size != tgt->u.crypt.tag_size) {
2563 log_dbg(cd, "Integer parameters do not match.");
2567 if (device_is_identical(src->data_device, tgt->data_device) <= 0)
2568 log_dbg(cd, "Data devices do not match.");
2574 free(src_integrity);
2579 static int _compare_integrity_devices(struct crypt_device *cd,
2580 const struct dm_target *src,
2581 const struct dm_target *tgt)
2584 * some parameters may be implicit (and set in dm-integrity ctor)
2588 * journal_commit_time
2590 * interleave_sectors
2593 /* check remaining integer values that makes sense */
2594 if (src->u.integrity.tag_size != tgt->u.integrity.tag_size ||
2595 src->u.integrity.offset != tgt->u.integrity.offset ||
2596 src->u.integrity.sector_size != tgt->u.integrity.sector_size) {
2597 log_dbg(cd, "Integer parameters do not match.");
2601 if (crypt_strcmp(src->u.integrity.integrity, tgt->u.integrity.integrity) ||
2602 crypt_strcmp(src->u.integrity.journal_integrity, tgt->u.integrity.journal_integrity) ||
2603 crypt_strcmp(src->u.integrity.journal_crypt, tgt->u.integrity.journal_crypt)) {
2604 log_dbg(cd, "Journal parameters do not match.");
2608 /* unfortunately dm-integrity doesn't support keyring */
2609 if (_compare_volume_keys(src->u.integrity.vk, 0, tgt->u.integrity.vk, 0) ||
2610 _compare_volume_keys(src->u.integrity.journal_integrity_key, 0, tgt->u.integrity.journal_integrity_key, 0) ||
2611 _compare_volume_keys(src->u.integrity.journal_crypt_key, 0, tgt->u.integrity.journal_crypt_key, 0)) {
2612 log_dbg(cd, "Journal keys do not match.");
2616 if (device_is_identical(src->data_device, tgt->data_device) <= 0) {
2617 log_dbg(cd, "Data devices do not match.");
2624 int crypt_compare_dm_devices(struct crypt_device *cd,
2625 const struct crypt_dm_active_device *src,
2626 const struct crypt_dm_active_device *tgt)
2629 const struct dm_target *s, *t;
2634 r = _compare_device_types(cd, src, tgt);
2643 log_dbg(cd, "segments count mismatch.");
2646 if (s->type != t->type) {
2647 log_dbg(cd, "segment type mismatch.");
2654 r = _compare_crypt_devices(cd, s, t);
2657 r = _compare_integrity_devices(cd, s, t);
2660 r = (s->u.linear.offset == t->u.linear.offset) ? 0 : -EINVAL;
2676 static int _reload_device(struct crypt_device *cd, const char *name,
2677 struct crypt_dm_active_device *sdmd)
2680 struct crypt_dm_active_device tdmd;
2681 struct dm_target *src, *tgt = &tdmd.segment;
2683 if (!cd || !cd->type || !name || !(sdmd->flags & CRYPT_ACTIVATE_REFRESH))
2686 r = dm_query_device(cd, name, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
2687 DM_ACTIVE_UUID | DM_ACTIVE_CRYPT_KEYSIZE |
2688 DM_ACTIVE_CRYPT_KEY | DM_ACTIVE_INTEGRITY_PARAMS |
2689 DM_ACTIVE_JOURNAL_CRYPT_KEY | DM_ACTIVE_JOURNAL_MAC_KEY, &tdmd);
2691 log_err(cd, _("Device %s is not active."), name);
2695 if (!single_segment(&tdmd) ||
2696 (tgt->type != DM_CRYPT && tgt->type != DM_INTEGRITY) ||
2697 (tgt->type == DM_CRYPT && tgt->u.crypt.tag_size)) {
2699 log_err(cd, _("Unsupported parameters on device %s."), name);
2703 r = crypt_compare_dm_devices(cd, sdmd, &tdmd);
2705 log_err(cd, _("Mismatching parameters on device %s."), name);
2709 src = &sdmd->segment;
2711 /* Changing read only flag for active device makes no sense */
2712 if (tdmd.flags & CRYPT_ACTIVATE_READONLY)
2713 sdmd->flags |= CRYPT_ACTIVATE_READONLY;
2715 sdmd->flags &= ~CRYPT_ACTIVATE_READONLY;
2717 if (tgt->type == DM_CRYPT && sdmd->flags & CRYPT_ACTIVATE_KEYRING_KEY) {
2718 r = crypt_volume_key_set_description(tgt->u.crypt.vk, src->u.crypt.vk->key_description);
2721 } else if (tgt->type == DM_CRYPT) {
2722 crypt_free_volume_key(tgt->u.crypt.vk);
2723 tgt->u.crypt.vk = crypt_alloc_volume_key(src->u.crypt.vk->keylength, src->u.crypt.vk->key);
2724 if (!tgt->u.crypt.vk) {
2730 if (tgt->type == DM_CRYPT)
2731 r = device_block_adjust(cd, src->data_device, DEV_OK,
2732 src->u.crypt.offset, &sdmd->size, NULL);
2733 else if (tgt->type == DM_INTEGRITY)
2734 r = device_block_adjust(cd, src->data_device, DEV_OK,
2735 src->u.integrity.offset, &sdmd->size, NULL);
2742 tdmd.flags = sdmd->flags;
2743 tgt->size = tdmd.size = sdmd->size;
2745 r = dm_reload_device(cd, name, &tdmd, 0, 1);
2747 dm_targets_free(cd, &tdmd);
2748 free(CONST_CAST(void*)tdmd.uuid);
2753 static int _reload_device_with_integrity(struct crypt_device *cd,
2757 struct crypt_dm_active_device *sdmd,
2758 struct crypt_dm_active_device *sdmdi)
2761 struct crypt_dm_active_device tdmd, tdmdi = {};
2762 struct dm_target *src, *srci, *tgt = &tdmd.segment, *tgti = &tdmdi.segment;
2763 struct device *data_device = NULL;
2766 if (!cd || !cd->type || !name || !iname || !(sdmd->flags & CRYPT_ACTIVATE_REFRESH))
2769 r = dm_query_device(cd, name, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
2770 DM_ACTIVE_UUID | DM_ACTIVE_CRYPT_KEYSIZE |
2771 DM_ACTIVE_CRYPT_KEY, &tdmd);
2773 log_err(cd, _("Device %s is not active."), name);
2777 if (!single_segment(&tdmd) || tgt->type != DM_CRYPT || !tgt->u.crypt.tag_size) {
2778 log_err(cd, _("Unsupported parameters on device %s."), name);
2783 r = dm_query_device(cd, iname, DM_ACTIVE_DEVICE | DM_ACTIVE_UUID, &tdmdi);
2785 log_err(cd, _("Device %s is not active."), iname);
2790 if (!single_segment(&tdmdi) || tgti->type != DM_INTEGRITY) {
2791 log_err(cd, _("Unsupported parameters on device %s."), iname);
2796 r = crypt_compare_dm_devices(cd, sdmdi, &tdmdi);
2798 log_err(cd, _("Mismatching parameters on device %s."), iname);
2802 /* unsupported underneath dm-crypt with auth. encryption */
2803 if (sdmdi->segment.u.integrity.meta_device || tdmdi.segment.u.integrity.meta_device)
2806 src = &sdmd->segment;
2807 srci = &sdmdi->segment;
2809 r = device_alloc(cd, &data_device, ipath);
2813 r = device_block_adjust(cd, srci->data_device, DEV_OK,
2814 srci->u.integrity.offset, &sdmdi->size, NULL);
2818 src->data_device = data_device;
2820 r = crypt_compare_dm_devices(cd, sdmd, &tdmd);
2822 log_err(cd, _("Crypt devices mismatch."));
2826 /* Changing read only flag for active device makes no sense */
2827 if (tdmd.flags & CRYPT_ACTIVATE_READONLY)
2828 sdmd->flags |= CRYPT_ACTIVATE_READONLY;
2830 sdmd->flags &= ~CRYPT_ACTIVATE_READONLY;
2832 if (tdmdi.flags & CRYPT_ACTIVATE_READONLY)
2833 sdmdi->flags |= CRYPT_ACTIVATE_READONLY;
2835 sdmdi->flags &= ~CRYPT_ACTIVATE_READONLY;
2837 if (sdmd->flags & CRYPT_ACTIVATE_KEYRING_KEY) {
2838 r = crypt_volume_key_set_description(tgt->u.crypt.vk, src->u.crypt.vk->key_description);
2842 crypt_free_volume_key(tgt->u.crypt.vk);
2843 tgt->u.crypt.vk = crypt_alloc_volume_key(src->u.crypt.vk->keylength, src->u.crypt.vk->key);
2844 if (!tgt->u.crypt.vk) {
2850 r = device_block_adjust(cd, src->data_device, DEV_OK,
2851 src->u.crypt.offset, &sdmd->size, NULL);
2855 tdmd.flags = sdmd->flags;
2856 tdmd.size = sdmd->size;
2858 if ((r = dm_reload_device(cd, iname, sdmdi, 0, 0))) {
2859 log_err(cd, _("Failed to reload device %s."), iname);
2863 if ((r = dm_reload_device(cd, name, &tdmd, 0, 0))) {
2864 log_err(cd, _("Failed to reload device %s."), name);
2869 if ((r = dm_suspend_device(cd, name, 0))) {
2870 log_err(cd, _("Failed to suspend device %s."), name);
2875 if ((r = dm_suspend_device(cd, iname, 0))) {
2876 log_err(cd, _("Failed to suspend device %s."), iname);
2881 if ((r = dm_resume_device(cd, iname, act2dmflags(sdmdi->flags)))) {
2882 log_err(cd, _("Failed to resume device %s."), iname);
2887 r = dm_resume_device(cd, name, act2dmflags(tdmd.flags));
2892 * This is worst case scenario. We have active underlying dm-integrity device with
2893 * new table but dm-crypt resume failed for some reason. Tear everything down and
2897 log_err(cd, _("Fatal error while reloading device %s (on top of device %s)."), name, iname);
2899 if (dm_error_device(cd, name))
2900 log_err(cd, _("Failed to switch device %s to dm-error."), name);
2901 if (dm_error_device(cd, iname))
2902 log_err(cd, _("Failed to switch device %s to dm-error."), iname);
2905 dm_clear_device(cd, name);
2906 dm_clear_device(cd, iname);
2908 if (dm_status_suspended(cd, name) > 0)
2909 dm_resume_device(cd, name, 0);
2910 if (dm_status_suspended(cd, iname) > 0)
2911 dm_resume_device(cd, iname, 0);
2914 dm_targets_free(cd, &tdmd);
2915 dm_targets_free(cd, &tdmdi);
2916 free(CONST_CAST(void*)tdmdi.uuid);
2917 free(CONST_CAST(void*)tdmd.uuid);
2918 device_free(cd, data_device);
2923 int crypt_resize(struct crypt_device *cd, const char *name, uint64_t new_size)
2925 struct crypt_dm_active_device dmdq, dmd = {};
2926 struct dm_target *tgt = &dmdq.segment;
2927 struct crypt_params_integrity params = {};
2928 uint32_t supported_flags = 0;
2933 * FIXME: Also with LUKS2 we must not allow resize when there's
2934 * explicit size stored in metadata (length != "dynamic")
2937 /* Device context type must be initialized */
2938 if (!cd || !cd->type || !name)
2941 if (isTCRYPT(cd->type) || isBITLK(cd->type)) {
2942 log_err(cd, _("This operation is not supported for this device type."));
2946 log_dbg(cd, "Resizing device %s to %" PRIu64 " sectors.", name, new_size);
2948 r = dm_query_device(cd, name, DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
2949 DM_ACTIVE_INTEGRITY_PARAMS | DM_ACTIVE_JOURNAL_CRYPT_KEY |
2950 DM_ACTIVE_JOURNAL_MAC_KEY, &dmdq);
2952 log_err(cd, _("Device %s is not active."), name);
2955 if (!single_segment(&dmdq) || (tgt->type != DM_CRYPT && tgt->type != DM_INTEGRITY)) {
2956 log_dbg(cd, "Unsupported device table detected in %s.", name);
2961 if ((dmdq.flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_key_in_keyring(cd)) {
2966 if (crypt_key_in_keyring(cd)) {
2967 if (!isLUKS2(cd->type)) {
2971 r = LUKS2_key_description_by_segment(cd, &cd->u.luks2.hdr,
2972 tgt->u.crypt.vk, CRYPT_DEFAULT_SEGMENT);
2976 dmdq.flags |= CRYPT_ACTIVATE_KEYRING_KEY;
2979 if (crypt_loop_device(crypt_get_device_name(cd))) {
2980 log_dbg(cd, "Trying to resize underlying loop device %s.",
2981 crypt_get_device_name(cd));
2982 /* Here we always use default size not new_size */
2983 if (crypt_loop_resize(crypt_get_device_name(cd)))
2984 log_err(cd, _("Cannot resize loop device."));
2989 * Integrity device metadata are maintained by the kernel. We need to
2990 * reload the device (with the same parameters) and let the kernel
2991 * calculate the maximum size of integrity device and store it in the
2994 if (!new_size && tgt->type == DM_INTEGRITY) {
2995 r = INTEGRITY_data_sectors(cd, crypt_metadata_device(cd),
2996 crypt_get_data_offset(cd) * SECTOR_SIZE, &old_size);
3000 dmd.size = dmdq.size;
3001 dmd.flags = dmdq.flags | CRYPT_ACTIVATE_REFRESH | CRYPT_ACTIVATE_PRIVATE;
3003 r = crypt_get_integrity_info(cd, ¶ms);
3007 r = dm_integrity_target_set(cd, &dmd.segment, 0, dmdq.segment.size,
3008 crypt_metadata_device(cd), crypt_data_device(cd),
3009 crypt_get_integrity_tag_size(cd), crypt_get_data_offset(cd),
3010 crypt_get_sector_size(cd), tgt->u.integrity.vk, tgt->u.integrity.journal_crypt_key,
3011 tgt->u.integrity.journal_integrity_key, ¶ms);
3014 r = _reload_device(cd, name, &dmd);
3018 r = INTEGRITY_data_sectors(cd, crypt_metadata_device(cd),
3019 crypt_get_data_offset(cd) * SECTOR_SIZE, &new_size);
3022 log_dbg(cd, "Maximum integrity device size from kernel %" PRIu64, new_size);
3024 if (old_size == new_size && new_size == dmdq.size &&
3025 !dm_flags(cd, tgt->type, &supported_flags) &&
3026 !(supported_flags & DM_INTEGRITY_RESIZE_SUPPORTED))
3027 log_std(cd, _("WARNING: Maximum size already set or kernel doesn't support resize.\n"));
3030 r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
3031 crypt_get_data_offset(cd), &new_size, &dmdq.flags);
3035 if (MISALIGNED(new_size, (tgt->type == DM_CRYPT ? tgt->u.crypt.sector_size : tgt->u.integrity.sector_size) >> SECTOR_SHIFT)) {
3036 log_err(cd, _("Device size is not aligned to requested sector size."));
3041 if (MISALIGNED(new_size, device_block_size(cd, crypt_data_device(cd)) >> SECTOR_SHIFT)) {
3042 log_err(cd, _("Device size is not aligned to device logical block size."));
3047 dmd.uuid = crypt_get_uuid(cd);
3048 dmd.size = new_size;
3049 dmd.flags = dmdq.flags | CRYPT_ACTIVATE_REFRESH;
3051 if (tgt->type == DM_CRYPT) {
3052 r = dm_crypt_target_set(&dmd.segment, 0, new_size, crypt_data_device(cd),
3053 tgt->u.crypt.vk, crypt_get_cipher_spec(cd),
3054 crypt_get_iv_offset(cd), crypt_get_data_offset(cd),
3055 crypt_get_integrity(cd), crypt_get_integrity_tag_size(cd),
3056 crypt_get_sector_size(cd));
3059 } else if (tgt->type == DM_INTEGRITY) {
3060 r = crypt_get_integrity_info(cd, ¶ms);
3064 r = dm_integrity_target_set(cd, &dmd.segment, 0, new_size,
3065 crypt_metadata_device(cd), crypt_data_device(cd),
3066 crypt_get_integrity_tag_size(cd), crypt_get_data_offset(cd),
3067 crypt_get_sector_size(cd), tgt->u.integrity.vk, tgt->u.integrity.journal_crypt_key,
3068 tgt->u.integrity.journal_integrity_key, ¶ms);
3073 if (new_size == dmdq.size) {
3074 log_dbg(cd, "Device has already requested size %" PRIu64
3075 " sectors.", dmdq.size);
3078 if (isTCRYPT(cd->type))
3080 else if (isLUKS2(cd->type))
3081 r = LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, 0, 0);
3083 r = _reload_device(cd, name, &dmd);
3085 if (r && tgt->type == DM_INTEGRITY &&
3086 !dm_flags(cd, tgt->type, &supported_flags) &&
3087 !(supported_flags & DM_INTEGRITY_RESIZE_SUPPORTED))
3088 log_err(cd, _("Resize failed, the kernel doesn't support it."));
3091 dm_targets_free(cd, &dmd);
3092 dm_targets_free(cd, &dmdq);
3097 int crypt_set_uuid(struct crypt_device *cd, const char *uuid)
3099 const char *active_uuid;
3102 log_dbg(cd, "%s device uuid.", uuid ? "Setting new" : "Refreshing");
3104 if ((r = onlyLUKS(cd)))
3107 active_uuid = crypt_get_uuid(cd);
3109 if (uuid && active_uuid && !strncmp(uuid, active_uuid, UUID_STRING_L)) {
3110 log_dbg(cd, "UUID is the same as requested (%s) for device %s.",
3111 uuid, mdata_device_path(cd));
3116 log_dbg(cd, "Requested new UUID change to %s for %s.", uuid, mdata_device_path(cd));
3118 log_dbg(cd, "Requested new UUID refresh for %s.", mdata_device_path(cd));
3120 if (!crypt_confirm(cd, _("Do you really want to change UUID of device?")))
3123 if (isLUKS1(cd->type))
3124 return LUKS_hdr_uuid_set(&cd->u.luks1.hdr, uuid, cd);
3126 return LUKS2_hdr_uuid(cd, &cd->u.luks2.hdr, uuid);
3129 int crypt_set_label(struct crypt_device *cd, const char *label, const char *subsystem)
3133 log_dbg(cd, "Setting new labels.");
3135 if ((r = onlyLUKS2(cd)))
3138 return LUKS2_hdr_labels(cd, &cd->u.luks2.hdr, label, subsystem, 1);
3141 const char *crypt_get_label(struct crypt_device *cd)
3143 if (_onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0))
3146 return cd->u.luks2.hdr.label;
3149 const char *crypt_get_subsystem(struct crypt_device *cd)
3151 if (_onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0))
3154 return cd->u.luks2.hdr.subsystem;
3157 int crypt_header_backup(struct crypt_device *cd,
3158 const char *requested_type,
3159 const char *backup_file)
3163 if (requested_type && !isLUKS(requested_type))
3169 /* Load with repair */
3170 r = _crypt_load_luks(cd, requested_type, false, false);
3174 log_dbg(cd, "Requested header backup of device %s (%s) to "
3175 "file %s.", mdata_device_path(cd), requested_type ?: "any type", backup_file);
3177 if (isLUKS1(cd->type) && (!requested_type || isLUKS1(requested_type)))
3178 r = LUKS_hdr_backup(backup_file, cd);
3179 else if (isLUKS2(cd->type) && (!requested_type || isLUKS2(requested_type)))
3180 r = LUKS2_hdr_backup(cd, &cd->u.luks2.hdr, backup_file);
3187 int crypt_header_restore(struct crypt_device *cd,
3188 const char *requested_type,
3189 const char *backup_file)
3191 struct luks_phdr hdr1;
3192 struct luks2_hdr hdr2;
3195 if (requested_type && !isLUKS(requested_type))
3198 if (!cd || (cd->type && !isLUKS(cd->type)) || !backup_file)
3201 r = init_crypto(cd);
3205 log_dbg(cd, "Requested header restore to device %s (%s) from "
3206 "file %s.", mdata_device_path(cd), requested_type ?: "any type", backup_file);
3208 version = LUKS2_hdr_version_unlocked(cd, backup_file);
3210 (requested_type && version == 1 && !isLUKS1(requested_type)) ||
3211 (requested_type && version == 2 && !isLUKS2(requested_type))) {
3212 log_err(cd, _("Header backup file does not contain compatible LUKS header."));
3216 memset(&hdr2, 0, sizeof(hdr2));
3220 r = LUKS_hdr_restore(backup_file, &hdr1, cd);
3222 r = LUKS2_hdr_restore(cd, &hdr2, backup_file);
3224 crypt_safe_memzero(&hdr1, sizeof(hdr1));
3225 crypt_safe_memzero(&hdr2, sizeof(hdr2));
3226 } else if (isLUKS2(cd->type) && (!requested_type || isLUKS2(requested_type))) {
3227 r = LUKS2_hdr_restore(cd, &cd->u.luks2.hdr, backup_file);
3229 (void) _crypt_load_luks2(cd, 1, 0);
3230 } else if (isLUKS1(cd->type) && (!requested_type || isLUKS1(requested_type)))
3231 r = LUKS_hdr_restore(backup_file, &cd->u.luks1.hdr, cd);
3236 r = _crypt_load_luks(cd, version == 1 ? CRYPT_LUKS1 : CRYPT_LUKS2, false, true);
3241 int crypt_header_is_detached(struct crypt_device *cd)
3245 if (!cd || (cd->type && !isLUKS(cd->type)))
3248 r = device_is_identical(crypt_data_device(cd), crypt_metadata_device(cd));
3250 log_dbg(cd, "Failed to compare data and metadata devices path.");
3257 void crypt_free(struct crypt_device *cd)
3262 log_dbg(cd, "Releasing crypt device %s context.", mdata_device_path(cd) ?: "empty");
3264 dm_backend_exit(cd);
3265 crypt_free_volume_key(cd->volume_key);
3267 crypt_free_type(cd, NULL);
3269 device_free(cd, cd->device);
3270 device_free(cd, cd->metadata_device);
3272 free(CONST_CAST(void*)cd->pbkdf.type);
3273 free(CONST_CAST(void*)cd->pbkdf.hash);
3275 /* Some structures can contain keys (TCRYPT), wipe it */
3276 crypt_safe_memzero(cd, sizeof(*cd));
3280 static char *crypt_get_device_key_description(struct crypt_device *cd, const char *name)
3283 struct crypt_dm_active_device dmd;
3284 struct dm_target *tgt = &dmd.segment;
3286 if (dm_query_device(cd, name, DM_ACTIVE_CRYPT_KEY | DM_ACTIVE_CRYPT_KEYSIZE, &dmd) < 0)
3289 if (single_segment(&dmd) && tgt->type == DM_CRYPT &&
3290 (dmd.flags & CRYPT_ACTIVATE_KEYRING_KEY) && tgt->u.crypt.vk->key_description)
3291 desc = strdup(tgt->u.crypt.vk->key_description);
3293 dm_targets_free(cd, &dmd);
3298 int crypt_suspend(struct crypt_device *cd,
3302 crypt_status_info ci;
3304 uint32_t dmflags = DM_SUSPEND_WIPE_KEY;
3306 /* FIXME: check context uuid matches the dm-crypt device uuid (onlyLUKS branching) */
3311 log_dbg(cd, "Suspending volume %s.", name);
3316 r = crypt_uuid_type_cmp(cd, CRYPT_LUKS1);
3318 r = crypt_uuid_type_cmp(cd, CRYPT_LUKS2);
3320 log_err(cd, _("This operation is supported only for LUKS device."));
3326 ci = crypt_status(NULL, name);
3327 if (ci < CRYPT_ACTIVE) {
3328 log_err(cd, _("Volume %s is not active."), name);
3332 dm_backend_init(cd);
3334 r = dm_status_suspended(cd, name);
3339 log_err(cd, _("Volume %s is already suspended."), name);
3344 key_desc = crypt_get_device_key_description(cd, name);
3346 /* we can't simply wipe wrapped keys */
3347 if (crypt_cipher_wrapped_key(crypt_get_cipher(cd), crypt_get_cipher_mode(cd)))
3348 dmflags &= ~DM_SUSPEND_WIPE_KEY;
3350 r = dm_suspend_device(cd, name, dmflags);
3352 log_err(cd, _("Suspend is not supported for device %s."), name);
3354 log_err(cd, _("Error during suspending device %s."), name);
3356 crypt_drop_keyring_key_by_description(cd, key_desc, LOGON_KEY);
3359 dm_backend_exit(cd);
3363 /* key must be properly verified */
3364 static int resume_by_volume_key(struct crypt_device *cd,
3365 struct volume_key *vk,
3369 struct volume_key *zerokey = NULL;
3371 if (crypt_is_cipher_null(crypt_get_cipher_spec(cd))) {
3372 zerokey = crypt_alloc_volume_key(0, NULL);
3376 } else if (crypt_use_keyring_for_vk(cd)) {
3377 /* LUKS2 path only */
3378 digest = LUKS2_digest_by_segment(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
3381 r = LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, digest);
3386 r = dm_resume_and_reinstate_key(cd, name, vk);
3389 log_err(cd, _("Resume is not supported for device %s."), name);
3391 log_err(cd, _("Error during resuming device %s."), name);
3394 crypt_drop_keyring_key(cd, vk);
3396 crypt_free_volume_key(zerokey);
3401 int crypt_resume_by_passphrase(struct crypt_device *cd,
3404 const char *passphrase,
3405 size_t passphrase_size)
3407 struct volume_key *vk = NULL;
3410 /* FIXME: check context uuid matches the dm-crypt device uuid */
3412 if (!passphrase || !name)
3415 log_dbg(cd, "Resuming volume %s.", name);
3417 if ((r = onlyLUKS(cd)))
3420 r = dm_status_suspended(cd, name);
3425 log_err(cd, _("Volume %s is not suspended."), name);
3429 if (isLUKS1(cd->type))
3430 r = LUKS_open_key_with_hdr(keyslot, passphrase, passphrase_size,
3431 &cd->u.luks1.hdr, &vk, cd);
3433 r = LUKS2_keyslot_open(cd, keyslot, CRYPT_DEFAULT_SEGMENT, passphrase, passphrase_size, &vk);
3440 r = resume_by_volume_key(cd, vk, name);
3442 crypt_free_volume_key(vk);
3443 return r < 0 ? r : keyslot;
3446 int crypt_resume_by_keyfile_device_offset(struct crypt_device *cd,
3449 const char *keyfile,
3450 size_t keyfile_size,
3451 uint64_t keyfile_offset)
3453 struct volume_key *vk = NULL;
3454 char *passphrase_read = NULL;
3455 size_t passphrase_size_read;
3458 /* FIXME: check context uuid matches the dm-crypt device uuid */
3460 if (!name || !keyfile)
3463 log_dbg(cd, "Resuming volume %s.", name);
3465 if ((r = onlyLUKS(cd)))
3468 r = dm_status_suspended(cd, name);
3473 log_err(cd, _("Volume %s is not suspended."), name);
3477 r = crypt_keyfile_device_read(cd, keyfile,
3478 &passphrase_read, &passphrase_size_read,
3479 keyfile_offset, keyfile_size, 0);
3483 if (isLUKS1(cd->type))
3484 r = LUKS_open_key_with_hdr(keyslot, passphrase_read, passphrase_size_read,
3485 &cd->u.luks1.hdr, &vk, cd);
3487 r = LUKS2_keyslot_open(cd, keyslot, CRYPT_DEFAULT_SEGMENT,
3488 passphrase_read, passphrase_size_read, &vk);
3490 crypt_safe_free(passphrase_read);
3496 r = resume_by_volume_key(cd, vk, name);
3498 crypt_free_volume_key(vk);
3499 return r < 0 ? r : keyslot;
3502 int crypt_resume_by_keyfile(struct crypt_device *cd,
3505 const char *keyfile,
3506 size_t keyfile_size)
3508 return crypt_resume_by_keyfile_device_offset(cd, name, keyslot,
3509 keyfile, keyfile_size, 0);
3512 int crypt_resume_by_keyfile_offset(struct crypt_device *cd,
3515 const char *keyfile,
3516 size_t keyfile_size,
3517 size_t keyfile_offset)
3519 return crypt_resume_by_keyfile_device_offset(cd, name, keyslot,
3520 keyfile, keyfile_size, keyfile_offset);
3523 int crypt_resume_by_volume_key(struct crypt_device *cd,
3525 const char *volume_key,
3526 size_t volume_key_size)
3528 struct volume_key *vk = NULL;
3531 if (!name || !volume_key)
3534 log_dbg(cd, "Resuming volume %s by volume key.", name);
3536 if ((r = onlyLUKS(cd)))
3539 r = dm_status_suspended(cd, name);
3544 log_err(cd, _("Volume %s is not suspended."), name);
3548 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
3552 if (isLUKS1(cd->type))
3553 r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
3554 else if (isLUKS2(cd->type))
3555 r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
3558 if (r == -EPERM || r == -ENOENT)
3559 log_err(cd, _("Volume key does not match the volume."));
3562 r = resume_by_volume_key(cd, vk, name);
3564 crypt_free_volume_key(vk);
3568 int crypt_resume_by_token_pin(struct crypt_device *cd, const char *name,
3569 const char *type, int token, const char *pin, size_t pin_size,
3572 struct volume_key *vk = NULL;
3578 log_dbg(cd, "Resuming volume %s by token (%s type) %d.",
3579 name, type ?: "any", token);
3581 if ((r = _onlyLUKS2(cd, CRYPT_CD_QUIET, 0)))
3584 r = dm_status_suspended(cd, name);
3589 log_err(cd, _("Volume %s is not suspended."), name);
3593 r = LUKS2_token_unlock_key(cd, &cd->u.luks2.hdr, token, type,
3594 pin, pin_size, CRYPT_DEFAULT_SEGMENT, usrptr, &vk);
3597 r = resume_by_volume_key(cd, vk, name);
3599 crypt_free_volume_key(vk);
3600 return r < 0 ? r : keyslot;
3604 * Keyslot manipulation
3606 int crypt_keyslot_add_by_passphrase(struct crypt_device *cd,
3607 int keyslot, // -1 any
3608 const char *passphrase,
3609 size_t passphrase_size,
3610 const char *new_passphrase,
3611 size_t new_passphrase_size)
3614 struct crypt_keyslot_context kc, new_kc;
3616 if (!passphrase || !new_passphrase)
3619 crypt_keyslot_unlock_by_passphrase_init_internal(&kc, passphrase, passphrase_size);
3620 crypt_keyslot_unlock_by_passphrase_init_internal(&new_kc, new_passphrase, new_passphrase_size);
3622 r = crypt_keyslot_add_by_keyslot_context(cd, CRYPT_ANY_SLOT, &kc, keyslot, &new_kc, 0);
3624 crypt_keyslot_context_destroy_internal(&kc);
3625 crypt_keyslot_context_destroy_internal(&new_kc);
3630 int crypt_keyslot_change_by_passphrase(struct crypt_device *cd,
3633 const char *passphrase,
3634 size_t passphrase_size,
3635 const char *new_passphrase,
3636 size_t new_passphrase_size)
3638 int digest = -1, r, keyslot_new_orig = keyslot_new;
3639 struct luks2_keyslot_params params;
3640 struct volume_key *vk = NULL;
3642 if (!passphrase || !new_passphrase)
3645 log_dbg(cd, "Changing passphrase from old keyslot %d to new %d.",
3646 keyslot_old, keyslot_new);
3648 if ((r = onlyLUKS(cd)))
3651 if (isLUKS1(cd->type))
3652 r = LUKS_open_key_with_hdr(keyslot_old, passphrase, passphrase_size,
3653 &cd->u.luks1.hdr, &vk, cd);
3654 else if (isLUKS2(cd->type)) {
3655 r = LUKS2_keyslot_open(cd, keyslot_old, CRYPT_ANY_SEGMENT, passphrase, passphrase_size, &vk);
3656 /* will fail for keyslots w/o digest. fix if supported in a future */
3658 digest = LUKS2_digest_by_keyslot(&cd->u.luks2.hdr, r);
3667 if (keyslot_old != CRYPT_ANY_SLOT && keyslot_old != r) {
3668 log_dbg(cd, "Keyslot mismatch.");
3673 if (keyslot_new == CRYPT_ANY_SLOT) {
3674 if (isLUKS1(cd->type))
3675 keyslot_new = LUKS_keyslot_find_empty(&cd->u.luks1.hdr);
3676 else if (isLUKS2(cd->type))
3677 keyslot_new = LUKS2_keyslot_find_empty(cd, &cd->u.luks2.hdr, vk->keylength);
3678 if (keyslot_new < 0)
3679 keyslot_new = keyslot_old;
3681 log_dbg(cd, "Key change, old slot %d, new slot %d.", keyslot_old, keyslot_new);
3683 if (isLUKS1(cd->type)) {
3684 if (keyslot_old == keyslot_new) {
3685 log_dbg(cd, "Key slot %d is going to be overwritten.", keyslot_old);
3686 (void)crypt_keyslot_destroy(cd, keyslot_old);
3688 r = LUKS_set_key(keyslot_new, new_passphrase, new_passphrase_size,
3689 &cd->u.luks1.hdr, vk, cd);
3690 } else if (isLUKS2(cd->type)) {
3691 r = LUKS2_keyslot_params_default(cd, &cd->u.luks2.hdr, ¶ms);
3695 if (keyslot_old != keyslot_new) {
3696 r = LUKS2_digest_assign(cd, &cd->u.luks2.hdr, keyslot_new, digest, 1, 0);
3699 r = LUKS2_token_assignment_copy(cd, &cd->u.luks2.hdr, keyslot_old, keyslot_new, 0);
3703 log_dbg(cd, "Key slot %d is going to be overwritten.", keyslot_old);
3704 /* FIXME: improve return code so that we can detect area is damaged */
3705 r = LUKS2_keyslot_wipe(cd, &cd->u.luks2.hdr, keyslot_old, 1);
3707 /* (void)crypt_keyslot_destroy(cd, keyslot_old); */
3713 r = LUKS2_keyslot_store(cd, &cd->u.luks2.hdr,
3714 keyslot_new, new_passphrase,
3715 new_passphrase_size, vk, ¶ms);
3719 /* Swap old & new so the final keyslot number remains */
3720 if (keyslot_new_orig == CRYPT_ANY_SLOT && keyslot_old != keyslot_new) {
3721 r = LUKS2_keyslot_swap(cd, &cd->u.luks2.hdr, keyslot_old, keyslot_new);
3727 keyslot_old = keyslot_new;
3733 if (r >= 0 && keyslot_old != keyslot_new)
3734 r = crypt_keyslot_destroy(cd, keyslot_old);
3737 log_err(cd, _("Failed to swap new key slot."));
3739 crypt_free_volume_key(vk);
3741 _luks2_rollback(cd);
3747 int crypt_keyslot_add_by_keyfile_device_offset(struct crypt_device *cd,
3749 const char *keyfile,
3750 size_t keyfile_size,
3751 uint64_t keyfile_offset,
3752 const char *new_keyfile,
3753 size_t new_keyfile_size,
3754 uint64_t new_keyfile_offset)
3757 struct crypt_keyslot_context kc, new_kc;
3759 if (!keyfile || !new_keyfile)
3762 crypt_keyslot_unlock_by_keyfile_init_internal(&kc, keyfile, keyfile_size, keyfile_offset);
3763 crypt_keyslot_unlock_by_keyfile_init_internal(&new_kc, new_keyfile, new_keyfile_size, new_keyfile_offset);
3765 r = crypt_keyslot_add_by_keyslot_context(cd, CRYPT_ANY_SLOT, &kc, keyslot, &new_kc, 0);
3767 crypt_keyslot_context_destroy_internal(&kc);
3768 crypt_keyslot_context_destroy_internal(&new_kc);
3773 int crypt_keyslot_add_by_keyfile(struct crypt_device *cd,
3775 const char *keyfile,
3776 size_t keyfile_size,
3777 const char *new_keyfile,
3778 size_t new_keyfile_size)
3780 return crypt_keyslot_add_by_keyfile_device_offset(cd, keyslot,
3781 keyfile, keyfile_size, 0,
3782 new_keyfile, new_keyfile_size, 0);
3785 int crypt_keyslot_add_by_keyfile_offset(struct crypt_device *cd,
3787 const char *keyfile,
3788 size_t keyfile_size,
3789 size_t keyfile_offset,
3790 const char *new_keyfile,
3791 size_t new_keyfile_size,
3792 size_t new_keyfile_offset)
3794 return crypt_keyslot_add_by_keyfile_device_offset(cd, keyslot,
3795 keyfile, keyfile_size, keyfile_offset,
3796 new_keyfile, new_keyfile_size, new_keyfile_offset);
3799 int crypt_keyslot_add_by_volume_key(struct crypt_device *cd,
3801 const char *volume_key,
3802 size_t volume_key_size,
3803 const char *passphrase,
3804 size_t passphrase_size)
3807 struct crypt_keyslot_context kc, new_kc;
3812 crypt_keyslot_unlock_by_key_init_internal(&kc, volume_key, volume_key_size);
3813 crypt_keyslot_unlock_by_passphrase_init_internal(&new_kc, passphrase, passphrase_size);
3815 r = crypt_keyslot_add_by_keyslot_context(cd, CRYPT_ANY_SLOT, &kc, keyslot, &new_kc, 0);
3817 crypt_keyslot_context_destroy_internal(&kc);
3818 crypt_keyslot_context_destroy_internal(&new_kc);
3823 int crypt_keyslot_destroy(struct crypt_device *cd, int keyslot)
3825 crypt_keyslot_info ki;
3828 log_dbg(cd, "Destroying keyslot %d.", keyslot);
3830 if ((r = _onlyLUKS(cd, CRYPT_CD_UNRESTRICTED)))
3833 ki = crypt_keyslot_status(cd, keyslot);
3834 if (ki == CRYPT_SLOT_INVALID) {
3835 log_err(cd, _("Key slot %d is invalid."), keyslot);
3839 if (isLUKS1(cd->type)) {
3840 if (ki == CRYPT_SLOT_INACTIVE) {
3841 log_err(cd, _("Keyslot %d is not active."), keyslot);
3844 return LUKS_del_key(keyslot, &cd->u.luks1.hdr, cd);
3847 return LUKS2_keyslot_wipe(cd, &cd->u.luks2.hdr, keyslot, 0);
3850 static int _check_header_data_overlap(struct crypt_device *cd, const char *name)
3852 if (!name || !isLUKS(cd->type))
3855 if (device_is_identical(crypt_data_device(cd), crypt_metadata_device(cd)) <= 0)
3858 /* FIXME: check real header size */
3859 if (crypt_get_data_offset(cd) == 0) {
3860 log_err(cd, _("Device header overlaps with data area."));
3867 static int check_devices(struct crypt_device *cd, const char *name, const char *iname, uint32_t *flags)
3871 if (!flags || !name)
3875 r = dm_status_device(cd, iname);
3876 if (r >= 0 && !(*flags & CRYPT_ACTIVATE_REFRESH))
3878 if (r < 0 && r != -ENODEV)
3881 *flags &= ~CRYPT_ACTIVATE_REFRESH;
3884 r = dm_status_device(cd, name);
3885 if (r >= 0 && !(*flags & CRYPT_ACTIVATE_REFRESH))
3887 if (r < 0 && r != -ENODEV)
3890 *flags &= ~CRYPT_ACTIVATE_REFRESH;
3895 static int _create_device_with_integrity(struct crypt_device *cd,
3896 const char *type, const char *name, const char *iname,
3897 const char *ipath, struct crypt_dm_active_device *dmd,
3898 struct crypt_dm_active_device *dmdi)
3901 enum devcheck device_check;
3902 struct dm_target *tgt;
3903 struct device *device = NULL;
3905 if (!single_segment(dmd))
3908 tgt = &dmd->segment;
3909 if (tgt->type != DM_CRYPT)
3912 device_check = dmd->flags & CRYPT_ACTIVATE_SHARED ? DEV_OK : DEV_EXCL;
3914 r = INTEGRITY_activate_dmd_device(cd, iname, CRYPT_INTEGRITY, dmdi, 0);
3918 r = device_alloc(cd, &device, ipath);
3921 tgt->data_device = device;
3923 r = device_block_adjust(cd, tgt->data_device, device_check,
3924 tgt->u.crypt.offset, &dmd->size, &dmd->flags);
3927 r = dm_create_device(cd, name, type, dmd);
3930 dm_remove_device(cd, iname, 0);
3932 device_free(cd, device);
3936 static int kernel_keyring_support(void)
3938 static unsigned _checked = 0;
3941 _kernel_keyring_supported = keyring_check();
3945 return _kernel_keyring_supported;
3948 static int dmcrypt_keyring_bug(void)
3952 if (kernel_version(&kversion))
3954 return kversion < compact_version(4,15,0,0);
3957 int create_or_reload_device(struct crypt_device *cd, const char *name,
3958 const char *type, struct crypt_dm_active_device *dmd)
3961 enum devcheck device_check;
3962 struct dm_target *tgt;
3964 if (!type || !name || !single_segment(dmd))
3967 tgt = &dmd->segment;
3968 if (tgt->type != DM_CRYPT && tgt->type != DM_INTEGRITY)
3971 /* drop CRYPT_ACTIVATE_REFRESH flag if any device is inactive */
3972 r = check_devices(cd, name, NULL, &dmd->flags);
3976 if (dmd->flags & CRYPT_ACTIVATE_REFRESH)
3977 r = _reload_device(cd, name, dmd);
3979 if (tgt->type == DM_CRYPT) {
3980 device_check = dmd->flags & CRYPT_ACTIVATE_SHARED ? DEV_OK : DEV_EXCL;
3982 r = device_block_adjust(cd, tgt->data_device, device_check,
3983 tgt->u.crypt.offset, &dmd->size, &dmd->flags);
3985 tgt->size = dmd->size;
3986 r = dm_create_device(cd, name, type, dmd);
3988 } else if (tgt->type == DM_INTEGRITY) {
3989 r = device_block_adjust(cd, tgt->data_device, DEV_EXCL,
3990 tgt->u.integrity.offset, NULL, &dmd->flags);
3994 if (tgt->u.integrity.meta_device) {
3995 r = device_block_adjust(cd, tgt->u.integrity.meta_device, DEV_EXCL, 0, NULL, NULL);
4000 r = dm_create_device(cd, name, type, dmd);
4007 int create_or_reload_device_with_integrity(struct crypt_device *cd, const char *name,
4008 const char *type, struct crypt_dm_active_device *dmd,
4009 struct crypt_dm_active_device *dmdi)
4012 const char *iname = NULL;
4015 if (!type || !name || !dmd || !dmdi)
4018 if (asprintf(&ipath, "%s/%s_dif", dm_get_dir(), name) < 0)
4020 iname = ipath + strlen(dm_get_dir()) + 1;
4022 /* drop CRYPT_ACTIVATE_REFRESH flag if any device is inactive */
4023 r = check_devices(cd, name, iname, &dmd->flags);
4027 if (dmd->flags & CRYPT_ACTIVATE_REFRESH)
4028 r = _reload_device_with_integrity(cd, name, iname, ipath, dmd, dmdi);
4030 r = _create_device_with_integrity(cd, type, name, iname, ipath, dmd, dmdi);
4037 static int _open_and_activate(struct crypt_device *cd,
4040 const char *passphrase,
4041 size_t passphrase_size,
4046 struct volume_key *vk = NULL;
4048 r = LUKS2_keyslot_open(cd, keyslot,
4049 (flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) ?
4050 CRYPT_ANY_SEGMENT : CRYPT_DEFAULT_SEGMENT,
4051 passphrase, passphrase_size, &vk);
4056 if (!crypt_use_keyring_for_vk(cd))
4057 use_keyring = false;
4059 use_keyring = ((name && !crypt_is_cipher_null(crypt_get_cipher(cd))) ||
4060 (flags & CRYPT_ACTIVATE_KEYRING_KEY));
4063 r = LUKS2_volume_key_load_in_keyring_by_keyslot(cd,
4064 &cd->u.luks2.hdr, vk, keyslot);
4067 flags |= CRYPT_ACTIVATE_KEYRING_KEY;
4071 r = LUKS2_activate(cd, name, vk, flags);
4074 crypt_drop_keyring_key(cd, vk);
4075 crypt_free_volume_key(vk);
4077 return r < 0 ? r : keyslot;
4080 #if USE_LUKS2_REENCRYPTION
4081 static int load_all_keys(struct crypt_device *cd, struct luks2_hdr *hdr, struct volume_key *vks)
4084 struct volume_key *vk = vks;
4087 r = LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, crypt_volume_key_get_id(vk));
4090 vk = crypt_volume_key_next(vk);
4096 static int _open_all_keys(struct crypt_device *cd,
4097 struct luks2_hdr *hdr,
4099 const char *passphrase,
4100 size_t passphrase_size,
4102 struct volume_key **vks)
4105 struct volume_key *_vks = NULL;
4106 crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
4108 segment = (flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) ? CRYPT_ANY_SEGMENT : CRYPT_DEFAULT_SEGMENT;
4111 case CRYPT_REENCRYPT_NONE:
4112 r = LUKS2_keyslot_open(cd, keyslot, segment, passphrase, passphrase_size, &_vks);
4114 case CRYPT_REENCRYPT_CLEAN:
4115 case CRYPT_REENCRYPT_CRASH:
4116 if (segment == CRYPT_ANY_SEGMENT)
4117 r = LUKS2_keyslot_open(cd, keyslot, segment, passphrase,
4118 passphrase_size, &_vks);
4120 r = LUKS2_keyslot_open_all_segments(cd, keyslot,
4121 keyslot, passphrase, passphrase_size,
4128 if (keyslot == CRYPT_ANY_SLOT)
4131 if (r >= 0 && (flags & CRYPT_ACTIVATE_KEYRING_KEY))
4132 r = load_all_keys(cd, hdr, _vks);
4135 MOVE_REF(*vks, _vks);
4138 crypt_drop_keyring_key(cd, _vks);
4139 crypt_free_volume_key(_vks);
4141 return r < 0 ? r : keyslot;
4144 static int _open_and_activate_reencrypt_device(struct crypt_device *cd,
4145 struct luks2_hdr *hdr,
4148 const char *passphrase,
4149 size_t passphrase_size,
4153 crypt_reencrypt_info ri;
4154 uint64_t minimal_size, device_size;
4155 struct volume_key *vks = NULL;
4157 struct crypt_lock_handle *reencrypt_lock = NULL;
4159 if (crypt_use_keyring_for_vk(cd))
4160 flags |= CRYPT_ACTIVATE_KEYRING_KEY;
4162 r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
4165 log_err(cd, _("Reencryption in-progress. Cannot activate device."));
4167 log_err(cd, _("Failed to get reencryption lock."));
4171 if ((r = crypt_load(cd, CRYPT_LUKS2, NULL)))
4174 ri = LUKS2_reencrypt_status(hdr);
4176 if (ri == CRYPT_REENCRYPT_CRASH) {
4177 r = LUKS2_reencrypt_locked_recovery_by_passphrase(cd, keyslot,
4178 keyslot, passphrase, passphrase_size, &vks);
4180 log_err(cd, _("LUKS2 reencryption recovery failed."));
4185 ri = LUKS2_reencrypt_status(hdr);
4188 /* recovery finished reencryption or it's already finished */
4189 if (ri == CRYPT_REENCRYPT_NONE) {
4190 crypt_drop_keyring_key(cd, vks);
4191 crypt_free_volume_key(vks);
4192 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
4193 return _open_and_activate(cd, keyslot, name, passphrase, passphrase_size, flags);
4196 if (ri > CRYPT_REENCRYPT_CLEAN) {
4201 if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic_size))
4205 r = _open_all_keys(cd, hdr, keyslot, passphrase, passphrase_size, flags, &vks);
4211 r = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
4216 log_dbg(cd, "Entering clean reencryption state mode.");
4219 r = LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, true, dynamic_size);
4222 r = LUKS2_activate_multi(cd, name, vks, device_size >> SECTOR_SHIFT, flags);
4224 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
4226 crypt_drop_keyring_key(cd, vks);
4227 crypt_free_volume_key(vks);
4229 return r < 0 ? r : keyslot;
4233 * Activation/deactivation of a device
4235 static int _open_and_activate_luks2(struct crypt_device *cd,
4238 const char *passphrase,
4239 size_t passphrase_size,
4242 crypt_reencrypt_info ri;
4244 struct luks2_hdr *hdr = &cd->u.luks2.hdr;
4245 struct volume_key *vks = NULL;
4247 ri = LUKS2_reencrypt_status(hdr);
4248 if (ri == CRYPT_REENCRYPT_INVALID)
4251 if (ri > CRYPT_REENCRYPT_NONE) {
4253 r = _open_and_activate_reencrypt_device(cd, hdr, keyslot, name, passphrase,
4254 passphrase_size, flags);
4256 r = _open_all_keys(cd, hdr, keyslot, passphrase,
4257 passphrase_size, flags, &vks);
4261 rv = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
4262 crypt_free_volume_key(vks);
4267 r = _open_and_activate(cd, keyslot, name, passphrase,
4268 passphrase_size, flags);
4273 static int _open_and_activate_luks2(struct crypt_device *cd,
4276 const char *passphrase,
4277 size_t passphrase_size,
4280 crypt_reencrypt_info ri;
4282 ri = LUKS2_reencrypt_status(&cd->u.luks2.hdr);
4283 if (ri == CRYPT_REENCRYPT_INVALID)
4286 if (ri > CRYPT_REENCRYPT_NONE) {
4287 log_err(cd, _("This operation is not supported for this device type."));
4291 return _open_and_activate(cd, keyslot, name, passphrase, passphrase_size, flags);
4295 static int _activate_by_passphrase(struct crypt_device *cd,
4298 const char *passphrase,
4299 size_t passphrase_size,
4303 struct volume_key *vk = NULL;
4305 if ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd))
4308 if ((flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) && name)
4311 r = _check_header_data_overlap(cd, name);
4315 if (flags & CRYPT_ACTIVATE_SERIALIZE_MEMORY_HARD_PBKDF)
4316 cd->memory_hard_pbkdf_lock_enabled = true;
4318 /* plain, use hashed passphrase */
4319 if (isPLAIN(cd->type)) {
4324 r = process_key(cd, cd->u.plain.hdr.hash,
4325 cd->u.plain.key_size,
4326 passphrase, passphrase_size, &vk);
4330 r = PLAIN_activate(cd, name, vk, cd->u.plain.hdr.size, flags);
4332 } else if (isLUKS1(cd->type)) {
4333 r = LUKS_open_key_with_hdr(keyslot, passphrase,
4334 passphrase_size, &cd->u.luks1.hdr, &vk, cd);
4338 r = LUKS1_activate(cd, name, vk, flags);
4340 } else if (isLUKS2(cd->type)) {
4341 r = _open_and_activate_luks2(cd, keyslot, name, passphrase, passphrase_size, flags);
4343 } else if (isBITLK(cd->type)) {
4344 r = BITLK_activate_by_passphrase(cd, name, passphrase, passphrase_size,
4345 &cd->u.bitlk.params, flags);
4347 } else if (isFVAULT2(cd->type)) {
4348 r = FVAULT2_activate_by_passphrase(cd, name, passphrase, passphrase_size,
4349 &cd->u.fvault2.params, flags);
4352 log_err(cd, _("Device type is not properly initialized."));
4357 crypt_drop_keyring_key(cd, vk);
4358 crypt_free_volume_key(vk);
4360 cd->memory_hard_pbkdf_lock_enabled = false;
4362 return r < 0 ? r : keyslot;
4365 static int _activate_loopaes(struct crypt_device *cd,
4372 unsigned int key_count = 0;
4373 struct volume_key *vk = NULL;
4375 r = LOOPAES_parse_keyfile(cd, &vk, cd->u.loopaes.hdr.hash, &key_count,
4376 buffer, buffer_size);
4379 r = LOOPAES_activate(cd, name, cd->u.loopaes.cipher, key_count,
4382 crypt_free_volume_key(vk);
4387 static int _activate_check_status(struct crypt_device *cd, const char *name, unsigned reload)
4394 r = dm_status_device(cd, name);
4396 if (r >= 0 && reload)
4399 if (r >= 0 || r == -EEXIST) {
4400 log_err(cd, _("Device %s already exists."), name);
4407 log_err(cd, _("Cannot use device %s, name is invalid or still in use."), name);
4411 // activation/deactivation of device mapping
4412 int crypt_activate_by_passphrase(struct crypt_device *cd,
4415 const char *passphrase,
4416 size_t passphrase_size,
4421 if (!cd || !passphrase || (!name && (flags & CRYPT_ACTIVATE_REFRESH)))
4424 log_dbg(cd, "%s volume %s [keyslot %d] using passphrase.",
4425 name ? "Activating" : "Checking", name ?: "passphrase",
4428 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
4432 return _activate_by_passphrase(cd, name, keyslot, passphrase, passphrase_size, flags);
4435 int crypt_activate_by_keyfile_device_offset(struct crypt_device *cd,
4438 const char *keyfile,
4439 size_t keyfile_size,
4440 uint64_t keyfile_offset,
4443 char *passphrase_read = NULL;
4444 size_t passphrase_size_read;
4447 if (!cd || !keyfile ||
4448 ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd)))
4451 log_dbg(cd, "%s volume %s [keyslot %d] using keyfile %s.",
4452 name ? "Activating" : "Checking", name ?: "passphrase", keyslot, keyfile);
4454 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
4458 r = crypt_keyfile_device_read(cd, keyfile,
4459 &passphrase_read, &passphrase_size_read,
4460 keyfile_offset, keyfile_size, 0);
4464 if (isLOOPAES(cd->type))
4465 r = _activate_loopaes(cd, name, passphrase_read, passphrase_size_read, flags);
4467 r = _activate_by_passphrase(cd, name, keyslot, passphrase_read, passphrase_size_read, flags);
4470 crypt_safe_free(passphrase_read);
4474 int crypt_activate_by_keyfile(struct crypt_device *cd,
4477 const char *keyfile,
4478 size_t keyfile_size,
4481 return crypt_activate_by_keyfile_device_offset(cd, name, keyslot, keyfile,
4482 keyfile_size, 0, flags);
4485 int crypt_activate_by_keyfile_offset(struct crypt_device *cd,
4488 const char *keyfile,
4489 size_t keyfile_size,
4490 size_t keyfile_offset,
4493 return crypt_activate_by_keyfile_device_offset(cd, name, keyslot, keyfile,
4494 keyfile_size, keyfile_offset, flags);
4496 int crypt_activate_by_volume_key(struct crypt_device *cd,
4498 const char *volume_key,
4499 size_t volume_key_size,
4503 struct volume_key *vk = NULL;
4507 ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd)))
4510 log_dbg(cd, "%s volume %s by volume key.", name ? "Activating" : "Checking",
4513 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
4517 r = _check_header_data_overlap(cd, name);
4521 /* use key directly, no hash */
4522 if (isPLAIN(cd->type)) {
4526 if (!volume_key || !volume_key_size || volume_key_size != cd->u.plain.key_size) {
4527 log_err(cd, _("Incorrect volume key specified for plain device."));
4531 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4535 r = PLAIN_activate(cd, name, vk, cd->u.plain.hdr.size, flags);
4536 } else if (isLUKS1(cd->type)) {
4537 /* If key is not provided, try to use internal key */
4539 if (!cd->volume_key) {
4540 log_err(cd, _("Volume key does not match the volume."));
4543 volume_key_size = cd->volume_key->keylength;
4544 volume_key = cd->volume_key->key;
4547 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4550 r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
4553 log_err(cd, _("Volume key does not match the volume."));
4556 r = LUKS1_activate(cd, name, vk, flags);
4557 } else if (isLUKS2(cd->type)) {
4558 /* If key is not provided, try to use internal key */
4560 if (!cd->volume_key) {
4561 log_err(cd, _("Volume key does not match the volume."));
4564 volume_key_size = cd->volume_key->keylength;
4565 volume_key = cd->volume_key->key;
4568 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4572 r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
4573 if (r == -EPERM || r == -ENOENT)
4574 log_err(cd, _("Volume key does not match the volume."));
4578 if (!crypt_use_keyring_for_vk(cd))
4579 use_keyring = false;
4581 use_keyring = (name && !crypt_is_cipher_null(crypt_get_cipher(cd))) ||
4582 (flags & CRYPT_ACTIVATE_KEYRING_KEY);
4584 if (!r && use_keyring) {
4585 r = LUKS2_key_description_by_segment(cd,
4586 &cd->u.luks2.hdr, vk, CRYPT_DEFAULT_SEGMENT);
4588 r = crypt_volume_key_load_in_keyring(cd, vk);
4590 flags |= CRYPT_ACTIVATE_KEYRING_KEY;
4594 r = LUKS2_activate(cd, name, vk, flags);
4595 } else if (isVERITY(cd->type)) {
4596 r = crypt_activate_by_signed_key(cd, name, volume_key, volume_key_size, NULL, 0, flags);
4597 } else if (isTCRYPT(cd->type)) {
4600 r = TCRYPT_activate(cd, name, &cd->u.tcrypt.hdr,
4601 &cd->u.tcrypt.params, flags);
4602 } else if (isINTEGRITY(cd->type)) {
4606 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4610 r = INTEGRITY_activate(cd, name, &cd->u.integrity.params, vk,
4611 cd->u.integrity.journal_crypt_key,
4612 cd->u.integrity.journal_mac_key, flags,
4613 cd->u.integrity.sb_flags);
4614 } else if (isBITLK(cd->type)) {
4615 r = BITLK_activate_by_volume_key(cd, name, volume_key, volume_key_size,
4616 &cd->u.bitlk.params, flags);
4618 log_err(cd, _("Device type is not properly initialized."));
4623 crypt_drop_keyring_key(cd, vk);
4624 crypt_free_volume_key(vk);
4629 int crypt_activate_by_signed_key(struct crypt_device *cd,
4631 const char *volume_key,
4632 size_t volume_key_size,
4633 const char *signature,
4634 size_t signature_size,
4637 char description[512];
4640 if (!cd || !isVERITY(cd->type))
4643 if (!volume_key || !volume_key_size || (!name && signature)) {
4644 log_err(cd, _("Incorrect root hash specified for verity device."));
4649 log_dbg(cd, "Activating volume %s by %skey.", name, signature ? "signed " : "");
4651 log_dbg(cd, "Checking volume by key.");
4653 if (cd->u.verity.hdr.flags & CRYPT_VERITY_ROOT_HASH_SIGNATURE && !signature) {
4654 log_err(cd, _("Root hash signature required."));
4658 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
4662 if (signature && !kernel_keyring_support()) {
4663 log_err(cd, _("Kernel keyring missing: required for passing signature to kernel."));
4667 /* volume_key == root hash */
4668 free(CONST_CAST(void*)cd->u.verity.root_hash);
4669 cd->u.verity.root_hash = NULL;
4672 r = snprintf(description, sizeof(description)-1, "cryptsetup:%s%s%s",
4673 crypt_get_uuid(cd) ?: "", crypt_get_uuid(cd) ? "-" : "", name);
4677 log_dbg(cd, "Adding signature into keyring %s", description);
4678 r = keyring_add_key_in_thread_keyring(USER_KEY, description, signature, signature_size);
4680 log_err(cd, _("Failed to load key in kernel keyring."));
4685 r = VERITY_activate(cd, name, volume_key, volume_key_size,
4686 signature ? description : NULL,
4687 cd->u.verity.fec_device,
4688 &cd->u.verity.hdr, flags | CRYPT_ACTIVATE_READONLY);
4691 cd->u.verity.root_hash_size = volume_key_size;
4692 cd->u.verity.root_hash = malloc(volume_key_size);
4693 if (cd->u.verity.root_hash)
4694 memcpy(CONST_CAST(void*)cd->u.verity.root_hash, volume_key, volume_key_size);
4698 crypt_drop_keyring_key_by_description(cd, description, USER_KEY);
4703 int crypt_deactivate_by_name(struct crypt_device *cd, const char *name, uint32_t flags)
4705 struct crypt_device *fake_cd = NULL;
4706 struct luks2_hdr *hdr2 = NULL;
4707 struct crypt_dm_active_device dmd = {};
4709 uint32_t get_flags = DM_ACTIVE_DEVICE | DM_ACTIVE_UUID | DM_ACTIVE_HOLDERS;
4714 if ((flags & CRYPT_DEACTIVATE_DEFERRED) && (flags & CRYPT_DEACTIVATE_DEFERRED_CANCEL))
4717 log_dbg(cd, "Deactivating volume %s.", name);
4720 r = crypt_init_by_name(&fake_cd, name);
4726 /* skip holders detection and early abort when some flags raised */
4727 if (flags & (CRYPT_DEACTIVATE_FORCE | CRYPT_DEACTIVATE_DEFERRED | CRYPT_DEACTIVATE_DEFERRED_CANCEL))
4728 get_flags &= ~DM_ACTIVE_HOLDERS;
4730 switch (crypt_status(cd, name)) {
4733 if (flags & CRYPT_DEACTIVATE_DEFERRED_CANCEL) {
4734 r = dm_cancel_deferred_removal(name);
4736 log_err(cd, _("Could not cancel deferred remove from device %s."), name);
4740 r = dm_query_device(cd, name, get_flags, &dmd);
4743 log_err(cd, _("Device %s is still in use."), name);
4749 if (isLUKS2(cd->type))
4750 hdr2 = crypt_get_hdr(cd, CRYPT_LUKS2);
4752 if ((dmd.uuid && !strncmp(CRYPT_LUKS2, dmd.uuid, sizeof(CRYPT_LUKS2)-1)) || hdr2)
4753 r = LUKS2_deactivate(cd, name, hdr2, &dmd, flags);
4754 else if (isTCRYPT(cd->type))
4755 r = TCRYPT_deactivate(cd, name, flags);
4757 r = dm_remove_device(cd, name, flags);
4758 if (r < 0 && crypt_status(cd, name) == CRYPT_BUSY) {
4759 log_err(cd, _("Device %s is still in use."), name);
4763 case CRYPT_INACTIVE:
4764 log_err(cd, _("Device %s is not active."), name);
4768 log_err(cd, _("Invalid device %s."), name);
4772 dm_targets_free(cd, &dmd);
4773 free(CONST_CAST(void*)dmd.uuid);
4774 crypt_free(fake_cd);
4779 int crypt_deactivate(struct crypt_device *cd, const char *name)
4781 return crypt_deactivate_by_name(cd, name, 0);
4784 int crypt_get_active_device(struct crypt_device *cd, const char *name,
4785 struct crypt_active_device *cad)
4788 struct crypt_dm_active_device dmd, dmdi = {};
4789 const char *namei = NULL;
4790 struct dm_target *tgt = &dmd.segment;
4791 uint64_t min_offset = UINT64_MAX;
4793 if (!cd || !name || !cad)
4796 r = dm_query_device(cd, name, DM_ACTIVE_DEVICE, &dmd);
4800 /* For LUKS2 with integrity we need flags from underlying dm-integrity */
4801 if (isLUKS2(cd->type) && crypt_get_integrity_tag_size(cd) && single_segment(&dmd)) {
4802 namei = device_dm_name(tgt->data_device);
4803 if (namei && dm_query_device(cd, namei, 0, &dmdi) >= 0)
4804 dmd.flags |= dmdi.flags;
4807 if (cd && isTCRYPT(cd->type)) {
4808 cad->offset = TCRYPT_get_data_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
4809 cad->iv_offset = TCRYPT_get_iv_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
4812 if (tgt->type == DM_CRYPT && (min_offset > tgt->u.crypt.offset)) {
4813 min_offset = tgt->u.crypt.offset;
4814 cad->iv_offset = tgt->u.crypt.iv_offset;
4815 } else if (tgt->type == DM_INTEGRITY && (min_offset > tgt->u.integrity.offset)) {
4816 min_offset = tgt->u.integrity.offset;
4818 } else if (tgt->type == DM_LINEAR && (min_offset > tgt->u.linear.offset)) {
4819 min_offset = tgt->u.linear.offset;
4826 if (min_offset != UINT64_MAX)
4827 cad->offset = min_offset;
4829 cad->size = dmd.size;
4830 cad->flags = dmd.flags;
4833 dm_targets_free(cd, &dmd);
4834 dm_targets_free(cd, &dmdi);
4839 uint64_t crypt_get_active_integrity_failures(struct crypt_device *cd, const char *name)
4841 struct crypt_dm_active_device dmd;
4842 uint64_t failures = 0;
4847 /* LUKS2 / dm-crypt does not provide this count. */
4848 if (dm_query_device(cd, name, 0, &dmd) < 0)
4851 if (single_segment(&dmd) && dmd.segment.type == DM_INTEGRITY)
4852 (void)dm_status_integrity_failures(cd, name, &failures);
4854 dm_targets_free(cd, &dmd);
4860 * Volume key handling
4862 int crypt_volume_key_get(struct crypt_device *cd,
4865 size_t *volume_key_size,
4866 const char *passphrase,
4867 size_t passphrase_size)
4870 struct crypt_keyslot_context kc;
4873 return crypt_volume_key_get_by_keyslot_context(cd, keyslot, volume_key, volume_key_size, NULL);
4875 crypt_keyslot_unlock_by_passphrase_init_internal(&kc, passphrase, passphrase_size);
4877 r = crypt_volume_key_get_by_keyslot_context(cd, keyslot, volume_key, volume_key_size, &kc);
4879 crypt_keyslot_context_destroy_internal(&kc);
4884 int crypt_volume_key_get_by_keyslot_context(struct crypt_device *cd,
4887 size_t *volume_key_size,
4888 struct crypt_keyslot_context *kc)
4890 size_t passphrase_size;
4892 const char *passphrase = NULL;
4893 struct volume_key *vk = NULL;
4895 if (!cd || !volume_key || !volume_key_size ||
4896 (!kc && !isLUKS(cd->type) && !isTCRYPT(cd->type) && !isVERITY(cd->type)))
4899 if (isLUKS2(cd->type) && keyslot != CRYPT_ANY_SLOT)
4900 key_len = LUKS2_get_keyslot_stored_key_size(&cd->u.luks2.hdr, keyslot);
4902 key_len = crypt_get_volume_key_size(cd);
4907 if (key_len > (int)*volume_key_size) {
4908 log_err(cd, _("Volume key buffer too small."));
4912 if (kc && (!kc->get_passphrase || kc->type == CRYPT_KC_TYPE_KEY))
4916 r = kc->get_passphrase(cd, kc, &passphrase, &passphrase_size);
4923 if (isLUKS2(cd->type)) {
4924 if (kc && !kc->get_luks2_key)
4925 log_err(cd, _("Cannot retrieve volume key for LUKS2 device."));
4929 r = kc->get_luks2_key(cd, kc, keyslot,
4930 keyslot == CRYPT_ANY_SLOT ? CRYPT_DEFAULT_SEGMENT : CRYPT_ANY_SEGMENT,
4932 } else if (isLUKS1(cd->type)) {
4933 if (kc && !kc->get_luks1_volume_key)
4934 log_err(cd, _("Cannot retrieve volume key for LUKS1 device."));
4938 r = kc->get_luks1_volume_key(cd, kc, keyslot, &vk);
4939 } else if (isPLAIN(cd->type)) {
4940 if (passphrase && cd->u.plain.hdr.hash)
4941 r = process_key(cd, cd->u.plain.hdr.hash, key_len,
4942 passphrase, passphrase_size, &vk);
4944 log_err(cd, _("Cannot retrieve volume key for plain device."));
4945 } else if (isVERITY(cd->type)) {
4946 /* volume_key == root hash */
4947 if (cd->u.verity.root_hash) {
4948 memcpy(volume_key, cd->u.verity.root_hash, cd->u.verity.root_hash_size);
4949 *volume_key_size = cd->u.verity.root_hash_size;
4952 log_err(cd, _("Cannot retrieve root hash for verity device."));
4953 } else if (isTCRYPT(cd->type)) {
4954 r = TCRYPT_get_volume_key(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params, &vk);
4955 } else if (isBITLK(cd->type)) {
4957 r = BITLK_get_volume_key(cd, passphrase, passphrase_size, &cd->u.bitlk.params, &vk);
4959 log_err(cd, _("Cannot retrieve volume key for BITLK device."));
4960 } else if (isFVAULT2(cd->type)) {
4962 r = FVAULT2_get_volume_key(cd, passphrase, passphrase_size, &cd->u.fvault2.params, &vk);
4964 log_err(cd, _("Cannot retrieve volume key for FVAULT2 device."));
4966 log_err(cd, _("This operation is not supported for %s crypt device."), cd->type ?: "(none)");
4968 if (r == -ENOENT && isLUKS(cd->type) && cd->volume_key) {
4969 vk = crypt_alloc_volume_key(cd->volume_key->keylength, cd->volume_key->key);
4970 r = vk ? 0 : -ENOMEM;
4974 memcpy(volume_key, vk->key, vk->keylength);
4975 *volume_key_size = vk->keylength;
4978 crypt_free_volume_key(vk);
4982 int crypt_volume_key_verify(struct crypt_device *cd,
4983 const char *volume_key,
4984 size_t volume_key_size)
4986 struct volume_key *vk;
4989 if ((r = _onlyLUKS(cd, CRYPT_CD_UNRESTRICTED)))
4992 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4996 if (isLUKS1(cd->type))
4997 r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
4998 else if (isLUKS2(cd->type))
4999 r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
5003 crypt_free_volume_key(vk);
5005 return r >= 0 ? 0 : r;
5009 * RNG and memory locking
5011 void crypt_set_rng_type(struct crypt_device *cd, int rng_type)
5017 case CRYPT_RNG_URANDOM:
5018 case CRYPT_RNG_RANDOM:
5019 log_dbg(cd, "RNG set to %d (%s).", rng_type, rng_type ? "random" : "urandom");
5020 cd->rng_type = rng_type;
5024 int crypt_get_rng_type(struct crypt_device *cd)
5029 return cd->rng_type;
5032 int crypt_memory_lock(struct crypt_device *cd, int lock)
5037 void crypt_set_compatibility(struct crypt_device *cd, uint32_t flags)
5040 cd->compatibility = flags;
5043 uint32_t crypt_get_compatibility(struct crypt_device *cd)
5046 return cd->compatibility;
5054 crypt_status_info crypt_status(struct crypt_device *cd, const char *name)
5059 return CRYPT_INVALID;
5062 dm_backend_init(cd);
5064 r = dm_status_device(cd, name);
5067 dm_backend_exit(cd);
5069 if (r < 0 && r != -ENODEV)
5070 return CRYPT_INVALID;
5073 return CRYPT_ACTIVE;
5078 return CRYPT_INACTIVE;
5081 static int _luks_dump(struct crypt_device *cd)
5085 log_std(cd, "LUKS header information for %s\n\n", mdata_device_path(cd));
5086 log_std(cd, "Version: \t%" PRIu16 "\n", cd->u.luks1.hdr.version);
5087 log_std(cd, "Cipher name: \t%s\n", cd->u.luks1.hdr.cipherName);
5088 log_std(cd, "Cipher mode: \t%s\n", cd->u.luks1.hdr.cipherMode);
5089 log_std(cd, "Hash spec: \t%s\n", cd->u.luks1.hdr.hashSpec);
5090 log_std(cd, "Payload offset:\t%" PRIu32 "\n", cd->u.luks1.hdr.payloadOffset);
5091 log_std(cd, "MK bits: \t%" PRIu32 "\n", cd->u.luks1.hdr.keyBytes * 8);
5092 log_std(cd, "MK digest: \t");
5093 crypt_log_hex(cd, cd->u.luks1.hdr.mkDigest, LUKS_DIGESTSIZE, " ", 0, NULL);
5095 log_std(cd, "MK salt: \t");
5096 crypt_log_hex(cd, cd->u.luks1.hdr.mkDigestSalt, LUKS_SALTSIZE/2, " ", 0, NULL);
5097 log_std(cd, "\n \t");
5098 crypt_log_hex(cd, cd->u.luks1.hdr.mkDigestSalt+LUKS_SALTSIZE/2, LUKS_SALTSIZE/2, " ", 0, NULL);
5100 log_std(cd, "MK iterations: \t%" PRIu32 "\n", cd->u.luks1.hdr.mkDigestIterations);
5101 log_std(cd, "UUID: \t%s\n\n", cd->u.luks1.hdr.uuid);
5102 for(i = 0; i < LUKS_NUMKEYS; i++) {
5103 if(cd->u.luks1.hdr.keyblock[i].active == LUKS_KEY_ENABLED) {
5104 log_std(cd, "Key Slot %d: ENABLED\n",i);
5105 log_std(cd, "\tIterations: \t%" PRIu32 "\n",
5106 cd->u.luks1.hdr.keyblock[i].passwordIterations);
5107 log_std(cd, "\tSalt: \t");
5108 crypt_log_hex(cd, cd->u.luks1.hdr.keyblock[i].passwordSalt,
5109 LUKS_SALTSIZE/2, " ", 0, NULL);
5110 log_std(cd, "\n\t \t");
5111 crypt_log_hex(cd, cd->u.luks1.hdr.keyblock[i].passwordSalt +
5112 LUKS_SALTSIZE/2, LUKS_SALTSIZE/2, " ", 0, NULL);
5115 log_std(cd, "\tKey material offset:\t%" PRIu32 "\n",
5116 cd->u.luks1.hdr.keyblock[i].keyMaterialOffset);
5117 log_std(cd, "\tAF stripes: \t%" PRIu32 "\n",
5118 cd->u.luks1.hdr.keyblock[i].stripes);
5121 log_std(cd, "Key Slot %d: DISABLED\n", i);
5126 int crypt_dump(struct crypt_device *cd)
5130 if (isLUKS1(cd->type))
5131 return _luks_dump(cd);
5132 else if (isLUKS2(cd->type))
5133 return LUKS2_hdr_dump(cd, &cd->u.luks2.hdr);
5134 else if (isVERITY(cd->type))
5135 return VERITY_dump(cd, &cd->u.verity.hdr,
5136 cd->u.verity.root_hash, cd->u.verity.root_hash_size,
5137 cd->u.verity.fec_device);
5138 else if (isTCRYPT(cd->type))
5139 return TCRYPT_dump(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
5140 else if (isINTEGRITY(cd->type))
5141 return INTEGRITY_dump(cd, crypt_data_device(cd), 0);
5142 else if (isBITLK(cd->type))
5143 return BITLK_dump(cd, crypt_data_device(cd), &cd->u.bitlk.params);
5144 else if (isFVAULT2(cd->type))
5145 return FVAULT2_dump(cd, crypt_data_device(cd), &cd->u.fvault2.params);
5147 log_err(cd, _("Dump operation is not supported for this device type."));
5151 int crypt_dump_json(struct crypt_device *cd, const char **json, uint32_t flags)
5155 if (isLUKS2(cd->type))
5156 return LUKS2_hdr_dump_json(cd, &cd->u.luks2.hdr, json);
5158 log_err(cd, _("Dump operation is not supported for this device type."));
5163 const char *crypt_get_cipher_spec(struct crypt_device *cd)
5167 else if (isLUKS2(cd->type))
5168 return LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
5169 else if (isLUKS1(cd->type))
5170 return cd->u.luks1.cipher_spec;
5171 else if (isPLAIN(cd->type))
5172 return cd->u.plain.cipher_spec;
5173 else if (isLOOPAES(cd->type))
5174 return cd->u.loopaes.cipher_spec;
5175 else if (isBITLK(cd->type))
5176 return cd->u.bitlk.cipher_spec;
5177 else if (!cd->type && !_init_by_name_crypt_none(cd))
5178 return cd->u.none.cipher_spec;
5183 const char *crypt_get_cipher(struct crypt_device *cd)
5188 if (isPLAIN(cd->type))
5189 return cd->u.plain.cipher;
5191 if (isLUKS1(cd->type))
5192 return cd->u.luks1.hdr.cipherName;
5194 if (isLUKS2(cd->type)) {
5195 if (crypt_parse_name_and_mode(LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT),
5196 cd->u.luks2.cipher, NULL, cd->u.luks2.cipher_mode))
5198 return cd->u.luks2.cipher;
5201 if (isLOOPAES(cd->type))
5202 return cd->u.loopaes.cipher;
5204 if (isTCRYPT(cd->type))
5205 return cd->u.tcrypt.params.cipher;
5207 if (isBITLK(cd->type))
5208 return cd->u.bitlk.params.cipher;
5210 if (isFVAULT2(cd->type))
5211 return cd->u.fvault2.params.cipher;
5213 if (!cd->type && !_init_by_name_crypt_none(cd))
5214 return cd->u.none.cipher;
5219 const char *crypt_get_cipher_mode(struct crypt_device *cd)
5224 if (isPLAIN(cd->type))
5225 return cd->u.plain.cipher_mode;
5227 if (isLUKS1(cd->type))
5228 return cd->u.luks1.hdr.cipherMode;
5230 if (isLUKS2(cd->type)) {
5231 if (crypt_parse_name_and_mode(LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT),
5232 cd->u.luks2.cipher, NULL, cd->u.luks2.cipher_mode))
5234 return cd->u.luks2.cipher_mode;
5237 if (isLOOPAES(cd->type))
5238 return cd->u.loopaes.cipher_mode;
5240 if (isTCRYPT(cd->type))
5241 return cd->u.tcrypt.params.mode;
5243 if (isBITLK(cd->type))
5244 return cd->u.bitlk.params.cipher_mode;
5246 if (isFVAULT2(cd->type))
5247 return cd->u.fvault2.params.cipher_mode;
5249 if (!cd->type && !_init_by_name_crypt_none(cd))
5250 return cd->u.none.cipher_mode;
5256 const char *crypt_get_integrity(struct crypt_device *cd)
5261 if (isINTEGRITY(cd->type))
5262 return cd->u.integrity.params.integrity;
5264 if (isLUKS2(cd->type))
5265 return LUKS2_get_integrity(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
5271 int crypt_get_integrity_key_size(struct crypt_device *cd)
5275 if (isINTEGRITY(cd->type))
5276 key_size = INTEGRITY_key_size(crypt_get_integrity(cd));
5278 if (isLUKS2(cd->type))
5279 key_size = INTEGRITY_key_size(crypt_get_integrity(cd));
5281 return key_size > 0 ? key_size : 0;
5285 int crypt_get_integrity_tag_size(struct crypt_device *cd)
5287 if (isINTEGRITY(cd->type))
5288 return cd->u.integrity.params.tag_size;
5290 if (isLUKS2(cd->type))
5291 return INTEGRITY_tag_size(crypt_get_integrity(cd),
5292 crypt_get_cipher(cd),
5293 crypt_get_cipher_mode(cd));
5297 int crypt_get_sector_size(struct crypt_device *cd)
5302 if (isPLAIN(cd->type))
5303 return cd->u.plain.hdr.sector_size;
5305 if (isINTEGRITY(cd->type))
5306 return cd->u.integrity.params.sector_size;
5308 if (isLUKS2(cd->type))
5309 return LUKS2_get_sector_size(&cd->u.luks2.hdr);
5314 const char *crypt_get_uuid(struct crypt_device *cd)
5319 if (isLUKS1(cd->type))
5320 return cd->u.luks1.hdr.uuid;
5322 if (isLUKS2(cd->type))
5323 return cd->u.luks2.hdr.uuid;
5325 if (isVERITY(cd->type))
5326 return cd->u.verity.uuid;
5328 if (isBITLK(cd->type))
5329 return cd->u.bitlk.params.guid;
5331 if (isFVAULT2(cd->type))
5332 return cd->u.fvault2.params.family_uuid;
5337 const char *crypt_get_device_name(struct crypt_device *cd)
5344 path = device_block_path(cd->device);
5346 path = device_path(cd->device);
5351 const char *crypt_get_metadata_device_name(struct crypt_device *cd)
5355 if (!cd || !cd->metadata_device)
5358 path = device_block_path(cd->metadata_device);
5360 path = device_path(cd->metadata_device);
5365 int crypt_get_volume_key_size(struct crypt_device *cd)
5372 if (isPLAIN(cd->type))
5373 return cd->u.plain.key_size;
5375 if (isLUKS1(cd->type))
5376 return cd->u.luks1.hdr.keyBytes;
5378 if (isLUKS2(cd->type)) {
5379 r = LUKS2_get_volume_key_size(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
5380 if (r < 0 && cd->volume_key)
5381 r = cd->volume_key->keylength;
5382 return r < 0 ? 0 : r;
5385 if (isLOOPAES(cd->type))
5386 return cd->u.loopaes.key_size;
5388 if (isVERITY(cd->type))
5389 return cd->u.verity.root_hash_size;
5391 if (isTCRYPT(cd->type))
5392 return cd->u.tcrypt.params.key_size;
5394 if (isBITLK(cd->type))
5395 return cd->u.bitlk.params.key_size / 8;
5397 if (isFVAULT2(cd->type))
5398 return cd->u.fvault2.params.key_size;
5400 if (!cd->type && !_init_by_name_crypt_none(cd))
5401 return cd->u.none.key_size;
5406 int crypt_keyslot_get_key_size(struct crypt_device *cd, int keyslot)
5408 if (!cd || !isLUKS(cd->type))
5411 if (keyslot < 0 || keyslot >= crypt_keyslot_max(cd->type))
5414 if (isLUKS1(cd->type))
5415 return cd->u.luks1.hdr.keyBytes;
5417 if (isLUKS2(cd->type))
5418 return LUKS2_get_keyslot_stored_key_size(&cd->u.luks2.hdr, keyslot);
5423 int crypt_keyslot_set_encryption(struct crypt_device *cd,
5429 if (!cd || !cipher || !key_size || !isLUKS2(cd->type))
5432 if (LUKS2_keyslot_cipher_incompatible(cd, cipher))
5435 if (!(tmp = strdup(cipher)))
5438 free(cd->u.luks2.keyslot_cipher);
5439 cd->u.luks2.keyslot_cipher = tmp;
5440 cd->u.luks2.keyslot_key_size = key_size;
5445 const char *crypt_keyslot_get_encryption(struct crypt_device *cd, int keyslot, size_t *key_size)
5449 if (!cd || !isLUKS(cd->type) || !key_size)
5452 if (isLUKS1(cd->type)) {
5453 if (keyslot != CRYPT_ANY_SLOT &&
5454 LUKS_keyslot_info(&cd->u.luks1.hdr, keyslot) < CRYPT_SLOT_ACTIVE)
5456 *key_size = crypt_get_volume_key_size(cd);
5457 return cd->u.luks1.cipher_spec;
5460 if (keyslot != CRYPT_ANY_SLOT)
5461 return LUKS2_get_keyslot_cipher(&cd->u.luks2.hdr, keyslot, key_size);
5463 /* Keyslot encryption was set through crypt_keyslot_set_encryption() */
5464 if (cd->u.luks2.keyslot_cipher) {
5465 *key_size = cd->u.luks2.keyslot_key_size;
5466 return cd->u.luks2.keyslot_cipher;
5469 /* Try to reuse volume encryption parameters */
5470 cipher = LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
5471 if (!LUKS2_keyslot_cipher_incompatible(cd, cipher)) {
5472 *key_size = crypt_get_volume_key_size(cd);
5477 /* Fallback to default LUKS2 keyslot encryption */
5478 *key_size = DEFAULT_LUKS2_KEYSLOT_KEYBITS / 8;
5479 return DEFAULT_LUKS2_KEYSLOT_CIPHER;
5482 int crypt_keyslot_get_pbkdf(struct crypt_device *cd, int keyslot, struct crypt_pbkdf_type *pbkdf)
5484 if (!cd || !pbkdf || keyslot == CRYPT_ANY_SLOT)
5487 if (isLUKS1(cd->type))
5488 return LUKS_keyslot_pbkdf(&cd->u.luks1.hdr, keyslot, pbkdf);
5489 else if (isLUKS2(cd->type))
5490 return LUKS2_keyslot_pbkdf(&cd->u.luks2.hdr, keyslot, pbkdf);
5495 int crypt_set_data_offset(struct crypt_device *cd, uint64_t data_offset)
5499 if (data_offset % (MAX_SECTOR_SIZE >> SECTOR_SHIFT)) {
5500 log_err(cd, _("Data offset is not multiple of %u bytes."), MAX_SECTOR_SIZE);
5504 cd->data_offset = data_offset;
5505 log_dbg(cd, "Data offset set to %" PRIu64 " (512-byte) sectors.", data_offset);
5510 int crypt_set_metadata_size(struct crypt_device *cd,
5511 uint64_t metadata_size,
5512 uint64_t keyslots_size)
5517 if (cd->type && !isLUKS2(cd->type))
5520 if (metadata_size && LUKS2_check_metadata_area_size(metadata_size))
5523 if (keyslots_size && LUKS2_check_keyslots_area_size(keyslots_size))
5526 cd->metadata_size = metadata_size;
5527 cd->keyslots_size = keyslots_size;
5532 int crypt_get_metadata_size(struct crypt_device *cd,
5533 uint64_t *metadata_size,
5534 uint64_t *keyslots_size)
5536 uint64_t msize, ksize;
5542 msize = cd->metadata_size;
5543 ksize = cd->keyslots_size;
5544 } else if (isLUKS1(cd->type)) {
5545 msize = LUKS_ALIGN_KEYSLOTS;
5546 ksize = LUKS_device_sectors(&cd->u.luks1.hdr) * SECTOR_SIZE - msize;
5547 } else if (isLUKS2(cd->type)) {
5548 msize = LUKS2_metadata_size(&cd->u.luks2.hdr);
5549 ksize = LUKS2_keyslots_size(&cd->u.luks2.hdr);
5554 *metadata_size = msize;
5556 *keyslots_size = ksize;
5561 uint64_t crypt_get_data_offset(struct crypt_device *cd)
5566 if (isPLAIN(cd->type))
5567 return cd->u.plain.hdr.offset;
5569 if (isLUKS1(cd->type))
5570 return cd->u.luks1.hdr.payloadOffset;
5572 if (isLUKS2(cd->type))
5573 return LUKS2_get_data_offset(&cd->u.luks2.hdr);
5575 if (isLOOPAES(cd->type))
5576 return cd->u.loopaes.hdr.offset;
5578 if (isTCRYPT(cd->type))
5579 return TCRYPT_get_data_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
5581 if (isBITLK(cd->type))
5582 return cd->u.bitlk.params.volume_header_size / SECTOR_SIZE;
5584 if (isFVAULT2(cd->type))
5585 return cd->u.fvault2.params.log_vol_off / SECTOR_SIZE;
5587 return cd->data_offset;
5590 uint64_t crypt_get_iv_offset(struct crypt_device *cd)
5595 if (isPLAIN(cd->type))
5596 return cd->u.plain.hdr.skip;
5598 if (isLOOPAES(cd->type))
5599 return cd->u.loopaes.hdr.skip;
5601 if (isTCRYPT(cd->type))
5602 return TCRYPT_get_iv_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
5607 crypt_keyslot_info crypt_keyslot_status(struct crypt_device *cd, int keyslot)
5609 if (_onlyLUKS(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED) < 0)
5610 return CRYPT_SLOT_INVALID;
5612 if (isLUKS1(cd->type))
5613 return LUKS_keyslot_info(&cd->u.luks1.hdr, keyslot);
5614 else if(isLUKS2(cd->type))
5615 return LUKS2_keyslot_info(&cd->u.luks2.hdr, keyslot);
5617 return CRYPT_SLOT_INVALID;
5620 int crypt_keyslot_max(const char *type)
5623 return LUKS_NUMKEYS;
5626 return LUKS2_KEYSLOTS_MAX;
5631 int crypt_keyslot_area(struct crypt_device *cd,
5636 if (_onlyLUKS(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED) || !offset || !length)
5639 if (isLUKS2(cd->type))
5640 return LUKS2_keyslot_area(&cd->u.luks2.hdr, keyslot, offset, length);
5642 return LUKS_keyslot_area(&cd->u.luks1.hdr, keyslot, offset, length);
5645 crypt_keyslot_priority crypt_keyslot_get_priority(struct crypt_device *cd, int keyslot)
5647 if (_onlyLUKS(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED))
5648 return CRYPT_SLOT_PRIORITY_INVALID;
5650 if (keyslot < 0 || keyslot >= crypt_keyslot_max(cd->type))
5651 return CRYPT_SLOT_PRIORITY_INVALID;
5653 if (isLUKS2(cd->type))
5654 return LUKS2_keyslot_priority_get(&cd->u.luks2.hdr, keyslot);
5656 return CRYPT_SLOT_PRIORITY_NORMAL;
5659 int crypt_keyslot_set_priority(struct crypt_device *cd, int keyslot, crypt_keyslot_priority priority)
5663 log_dbg(cd, "Setting keyslot %d to priority %d.", keyslot, priority);
5665 if (priority == CRYPT_SLOT_PRIORITY_INVALID)
5668 if (keyslot < 0 || keyslot >= crypt_keyslot_max(cd->type))
5671 if ((r = onlyLUKS2(cd)))
5674 return LUKS2_keyslot_priority_set(cd, &cd->u.luks2.hdr, keyslot, priority, 1);
5677 const char *crypt_get_type(struct crypt_device *cd)
5679 return cd ? cd->type : NULL;
5682 const char *crypt_get_default_type(void)
5684 return DEFAULT_LUKS_FORMAT;
5687 int crypt_get_verity_info(struct crypt_device *cd,
5688 struct crypt_params_verity *vp)
5690 if (!cd || !isVERITY(cd->type) || !vp)
5693 vp->data_device = device_path(cd->device);
5694 vp->hash_device = mdata_device_path(cd);
5695 vp->fec_device = device_path(cd->u.verity.fec_device);
5696 vp->fec_area_offset = cd->u.verity.hdr.fec_area_offset;
5697 vp->fec_roots = cd->u.verity.hdr.fec_roots;
5698 vp->hash_name = cd->u.verity.hdr.hash_name;
5699 vp->salt = cd->u.verity.hdr.salt;
5700 vp->salt_size = cd->u.verity.hdr.salt_size;
5701 vp->data_block_size = cd->u.verity.hdr.data_block_size;
5702 vp->hash_block_size = cd->u.verity.hdr.hash_block_size;
5703 vp->data_size = cd->u.verity.hdr.data_size;
5704 vp->hash_area_offset = cd->u.verity.hdr.hash_area_offset;
5705 vp->hash_type = cd->u.verity.hdr.hash_type;
5706 vp->flags = cd->u.verity.hdr.flags & (CRYPT_VERITY_NO_HEADER | CRYPT_VERITY_ROOT_HASH_SIGNATURE);
5710 int crypt_get_integrity_info(struct crypt_device *cd,
5711 struct crypt_params_integrity *ip)
5716 if (isINTEGRITY(cd->type)) {
5717 ip->journal_size = cd->u.integrity.params.journal_size;
5718 ip->journal_watermark = cd->u.integrity.params.journal_watermark;
5719 ip->journal_commit_time = cd->u.integrity.params.journal_commit_time;
5720 ip->interleave_sectors = cd->u.integrity.params.interleave_sectors;
5721 ip->tag_size = cd->u.integrity.params.tag_size;
5722 ip->sector_size = cd->u.integrity.params.sector_size;
5723 ip->buffer_sectors = cd->u.integrity.params.buffer_sectors;
5725 ip->integrity = cd->u.integrity.params.integrity;
5726 ip->integrity_key_size = crypt_get_integrity_key_size(cd);
5728 ip->journal_integrity = cd->u.integrity.params.journal_integrity;
5729 ip->journal_integrity_key_size = cd->u.integrity.params.journal_integrity_key_size;
5730 ip->journal_integrity_key = NULL;
5732 ip->journal_crypt = cd->u.integrity.params.journal_crypt;
5733 ip->journal_crypt_key_size = cd->u.integrity.params.journal_crypt_key_size;
5734 ip->journal_crypt_key = NULL;
5736 } else if (isLUKS2(cd->type)) {
5737 ip->journal_size = 0; // FIXME
5738 ip->journal_watermark = 0; // FIXME
5739 ip->journal_commit_time = 0; // FIXME
5740 ip->interleave_sectors = 0; // FIXME
5741 ip->sector_size = crypt_get_sector_size(cd);
5742 ip->buffer_sectors = 0; // FIXME
5744 ip->integrity = LUKS2_get_integrity(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
5745 ip->integrity_key_size = crypt_get_integrity_key_size(cd);
5746 ip->tag_size = INTEGRITY_tag_size(ip->integrity, crypt_get_cipher(cd), crypt_get_cipher_mode(cd));
5748 ip->journal_integrity = NULL;
5749 ip->journal_integrity_key_size = 0;
5750 ip->journal_integrity_key = NULL;
5752 ip->journal_crypt = NULL;
5753 ip->journal_crypt_key_size = 0;
5754 ip->journal_crypt_key = NULL;
5761 int crypt_convert(struct crypt_device *cd,
5765 struct luks_phdr hdr1;
5766 struct luks2_hdr hdr2;
5772 log_dbg(cd, "Converting LUKS device to type %s", type);
5774 if ((r = onlyLUKS(cd)))
5777 if (isLUKS1(cd->type) && isLUKS2(type))
5778 r = LUKS2_luks1_to_luks2(cd, &cd->u.luks1.hdr, &hdr2);
5779 else if (isLUKS2(cd->type) && isLUKS1(type))
5780 r = LUKS2_luks2_to_luks1(cd, &cd->u.luks2.hdr, &hdr1);
5785 /* in-memory header may be invalid after failed conversion */
5786 _luks2_rollback(cd);
5788 log_err(cd, _("Cannot convert device %s which is still in use."), mdata_device_path(cd));
5792 crypt_free_type(cd, NULL);
5794 return crypt_load(cd, type, params);
5797 /* Internal access function to header pointer */
5798 void *crypt_get_hdr(struct crypt_device *cd, const char *type)
5800 /* If requested type differs, ignore it */
5801 if (strcmp(cd->type, type))
5804 if (isPLAIN(cd->type))
5805 return &cd->u.plain;
5807 if (isLUKS1(cd->type))
5808 return &cd->u.luks1.hdr;
5810 if (isLUKS2(cd->type))
5811 return &cd->u.luks2.hdr;
5813 if (isLOOPAES(cd->type))
5814 return &cd->u.loopaes;
5816 if (isVERITY(cd->type))
5817 return &cd->u.verity;
5819 if (isTCRYPT(cd->type))
5820 return &cd->u.tcrypt;
5826 struct luks2_reencrypt *crypt_get_luks2_reencrypt(struct crypt_device *cd)
5828 return cd->u.luks2.rh;
5832 void crypt_set_luks2_reencrypt(struct crypt_device *cd, struct luks2_reencrypt *rh)
5834 cd->u.luks2.rh = rh;
5840 int crypt_activate_by_token_pin(struct crypt_device *cd, const char *name,
5841 const char *type, int token, const char *pin, size_t pin_size,
5842 void *usrptr, uint32_t flags)
5846 log_dbg(cd, "%s volume %s using token (%s type) %d.",
5847 name ? "Activating" : "Checking", name ?: "passphrase",
5848 type ?: "any", token);
5850 if ((r = _onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0)))
5853 if ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd))
5856 if ((flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) && name)
5859 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
5863 return LUKS2_token_open_and_activate(cd, &cd->u.luks2.hdr, token, name, type,
5864 pin, pin_size, flags, usrptr);
5867 int crypt_activate_by_token(struct crypt_device *cd,
5868 const char *name, int token, void *usrptr, uint32_t flags)
5870 return crypt_activate_by_token_pin(cd, name, NULL, token, NULL, 0, usrptr, flags);
5873 int crypt_token_json_get(struct crypt_device *cd, int token, const char **json)
5880 log_dbg(cd, "Requesting JSON for token %d.", token);
5882 if ((r = _onlyLUKS2(cd, CRYPT_CD_UNRESTRICTED, 0)))
5885 return LUKS2_token_json_get(&cd->u.luks2.hdr, token, json) ?: token;
5888 int crypt_token_json_set(struct crypt_device *cd, int token, const char *json)
5892 log_dbg(cd, "Updating JSON for token %d.", token);
5894 if ((r = onlyLUKS2(cd)))
5897 return LUKS2_token_create(cd, &cd->u.luks2.hdr, token, json, 1);
5900 crypt_token_info crypt_token_status(struct crypt_device *cd, int token, const char **type)
5902 if (_onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0))
5903 return CRYPT_TOKEN_INVALID;
5905 return LUKS2_token_status(cd, &cd->u.luks2.hdr, token, type);
5908 int crypt_token_max(const char *type)
5911 return LUKS2_TOKENS_MAX;
5916 int crypt_token_luks2_keyring_get(struct crypt_device *cd,
5918 struct crypt_token_params_luks2_keyring *params)
5920 crypt_token_info token_info;
5927 log_dbg(cd, "Requesting LUKS2 keyring token %d.", token);
5929 if ((r = _onlyLUKS2(cd, CRYPT_CD_UNRESTRICTED, 0)))
5932 token_info = LUKS2_token_status(cd, &cd->u.luks2.hdr, token, &type);
5933 switch (token_info) {
5934 case CRYPT_TOKEN_INVALID:
5935 log_dbg(cd, "Token %d is invalid.", token);
5937 case CRYPT_TOKEN_INACTIVE:
5938 log_dbg(cd, "Token %d is inactive.", token);
5940 case CRYPT_TOKEN_INTERNAL:
5941 if (!strcmp(type, LUKS2_TOKEN_KEYRING))
5944 case CRYPT_TOKEN_INTERNAL_UNKNOWN:
5945 case CRYPT_TOKEN_EXTERNAL:
5946 case CRYPT_TOKEN_EXTERNAL_UNKNOWN:
5947 log_dbg(cd, "Token %d has unexpected type %s.", token, type);
5951 return LUKS2_token_keyring_get(&cd->u.luks2.hdr, token, params);
5954 int crypt_token_luks2_keyring_set(struct crypt_device *cd,
5956 const struct crypt_token_params_luks2_keyring *params)
5961 if (!params || !params->key_description)
5964 log_dbg(cd, "Creating new LUKS2 keyring token (%d).", token);
5966 if ((r = onlyLUKS2(cd)))
5969 r = LUKS2_token_keyring_json(json, sizeof(json), params);
5973 return LUKS2_token_create(cd, &cd->u.luks2.hdr, token, json, 1);
5976 int crypt_token_assign_keyslot(struct crypt_device *cd, int token, int keyslot)
5980 if ((r = onlyLUKS2(cd)))
5983 return LUKS2_token_assign(cd, &cd->u.luks2.hdr, keyslot, token, 1, 1);
5986 int crypt_token_unassign_keyslot(struct crypt_device *cd, int token, int keyslot)
5990 if ((r = onlyLUKS2(cd)))
5993 return LUKS2_token_assign(cd, &cd->u.luks2.hdr, keyslot, token, 0, 1);
5996 int crypt_token_is_assigned(struct crypt_device *cd, int token, int keyslot)
6000 if ((r = _onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0)))
6003 return LUKS2_token_is_assigned(&cd->u.luks2.hdr, keyslot, token);
6007 int crypt_metadata_locking_enabled(void)
6009 return _metadata_locking;
6012 int crypt_metadata_locking(struct crypt_device *cd __attribute__((unused)), int enable)
6014 if (enable && !_metadata_locking)
6017 _metadata_locking = enable ? 1 : 0;
6021 int crypt_persistent_flags_set(struct crypt_device *cd, crypt_flags_type type, uint32_t flags)
6025 if ((r = onlyLUKS2(cd)))
6028 if (type == CRYPT_FLAGS_ACTIVATION)
6029 return LUKS2_config_set_flags(cd, &cd->u.luks2.hdr, flags);
6031 if (type == CRYPT_FLAGS_REQUIREMENTS)
6032 return LUKS2_config_set_requirements(cd, &cd->u.luks2.hdr, flags, true);
6037 int crypt_persistent_flags_get(struct crypt_device *cd, crypt_flags_type type, uint32_t *flags)
6044 if ((r = _onlyLUKS2(cd, CRYPT_CD_UNRESTRICTED, 0)))
6047 if (type == CRYPT_FLAGS_ACTIVATION)
6048 return LUKS2_config_get_flags(cd, &cd->u.luks2.hdr, flags);
6050 if (type == CRYPT_FLAGS_REQUIREMENTS)
6051 return LUKS2_config_get_requirements(cd, &cd->u.luks2.hdr, flags);
6056 static int update_volume_key_segment_digest(struct crypt_device *cd, struct luks2_hdr *hdr, int digest, int commit)
6060 /* Remove any assignments in memory */
6061 r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_DEFAULT_SEGMENT, CRYPT_ANY_DIGEST, 0, 0);
6065 /* Assign it to the specific digest */
6066 return LUKS2_digest_segment_assign(cd, hdr, CRYPT_DEFAULT_SEGMENT, digest, 1, commit);
6069 static int verify_and_update_segment_digest(struct crypt_device *cd,
6070 struct luks2_hdr *hdr, int keyslot, struct crypt_keyslot_context *kc)
6073 struct volume_key *vk = NULL;
6076 assert(kc->get_luks2_key);
6077 assert(keyslot >= 0);
6079 r = kc->get_luks2_key(cd, kc, keyslot, CRYPT_ANY_SEGMENT, &vk);
6083 /* check volume_key (param) digest matches keyslot digest */
6084 r = LUKS2_digest_verify(cd, hdr, vk, keyslot);
6089 /* nothing to do, volume key in keyslot is already assigned to default segment */
6090 r = LUKS2_digest_verify_by_segment(cd, hdr, CRYPT_DEFAULT_SEGMENT, vk);
6094 /* FIXME: check new volume key is usable with current default segment */
6096 r = update_volume_key_segment_digest(cd, &cd->u.luks2.hdr, digest, 1);
6098 log_err(cd, _("Failed to assign keyslot %u as the new volume key."), keyslot);
6100 crypt_free_volume_key(vk);
6102 return r < 0 ? r : keyslot;
6105 static int luks2_keyslot_add_by_verified_volume_key(struct crypt_device *cd,
6107 const char *new_passphrase,
6108 size_t new_passphrase_size,
6109 struct volume_key *vk)
6112 struct luks2_keyslot_params params;
6115 assert(keyslot_new >= 0);
6116 assert(new_passphrase);
6118 assert(crypt_volume_key_get_id(vk) >= 0);
6120 r = LUKS2_keyslot_params_default(cd, &cd->u.luks2.hdr, ¶ms);
6122 log_err(cd, _("Failed to initialize default LUKS2 keyslot parameters."));
6126 r = LUKS2_digest_assign(cd, &cd->u.luks2.hdr, keyslot_new, crypt_volume_key_get_id(vk), 1, 0);
6128 log_err(cd, _("Failed to assign keyslot %d to digest."), keyslot_new);
6132 r = LUKS2_keyslot_store(cd, &cd->u.luks2.hdr, keyslot_new,
6133 CONST_CAST(char*)new_passphrase,
6134 new_passphrase_size, vk, ¶ms);
6136 return r < 0 ? r : keyslot_new;
6139 static int luks2_keyslot_add_by_volume_key(struct crypt_device *cd,
6141 const char *new_passphrase,
6142 size_t new_passphrase_size,
6143 struct volume_key *vk)
6148 assert(keyslot_new >= 0);
6149 assert(new_passphrase);
6152 r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
6154 crypt_volume_key_set_id(vk, r);
6157 log_err(cd, _("Volume key does not match the volume."));
6161 return luks2_keyslot_add_by_verified_volume_key(cd, keyslot_new, new_passphrase, new_passphrase_size, vk);
6164 static int luks1_keyslot_add_by_volume_key(struct crypt_device *cd,
6166 const char *new_passphrase,
6167 size_t new_passphrase_size,
6168 struct volume_key *vk)
6173 assert(keyslot_new >= 0);
6174 assert(new_passphrase);
6177 r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
6179 log_err(cd, _("Volume key does not match the volume."));
6183 r = LUKS_set_key(keyslot_new, CONST_CAST(char*)new_passphrase,
6184 new_passphrase_size, &cd->u.luks1.hdr, vk, cd);
6186 return r < 0 ? r : keyslot_new;
6189 static int keyslot_add_by_key(struct crypt_device *cd,
6192 const char *new_passphrase,
6193 size_t new_passphrase_size,
6194 struct volume_key *vk,
6200 assert(keyslot_new >= 0);
6201 assert(new_passphrase);
6205 return is_luks1 ? luks1_keyslot_add_by_volume_key(cd, keyslot_new, new_passphrase, new_passphrase_size, vk) :
6206 luks2_keyslot_add_by_volume_key(cd, keyslot_new, new_passphrase, new_passphrase_size, vk);
6211 digest = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
6212 if (digest >= 0) /* if key matches volume key digest tear down new vk flag */
6213 flags &= ~CRYPT_VOLUME_KEY_SET;
6215 /* if key matches any existing digest, do not create new digest */
6216 if ((flags & CRYPT_VOLUME_KEY_DIGEST_REUSE))
6217 digest = LUKS2_digest_any_matching(cd, &cd->u.luks2.hdr, vk);
6219 /* no segment flag or new vk flag requires new key digest */
6220 if (flags & (CRYPT_VOLUME_KEY_NO_SEGMENT | CRYPT_VOLUME_KEY_SET)) {
6221 if (digest < 0 || !(flags & CRYPT_VOLUME_KEY_DIGEST_REUSE))
6222 digest = LUKS2_digest_create(cd, "pbkdf2", &cd->u.luks2.hdr, vk);
6228 log_err(cd, _("Volume key does not match the volume."));
6232 crypt_volume_key_set_id(vk, digest);
6234 if (flags & CRYPT_VOLUME_KEY_SET) {
6235 r = update_volume_key_segment_digest(cd, &cd->u.luks2.hdr, digest, 0);
6237 log_err(cd, _("Failed to assign keyslot %u as the new volume key."), keyslot_new);
6241 r = luks2_keyslot_add_by_verified_volume_key(cd, keyslot_new, new_passphrase, new_passphrase_size, vk);
6243 return r < 0 ? r : keyslot_new;
6246 int crypt_keyslot_add_by_key(struct crypt_device *cd,
6248 const char *volume_key,
6249 size_t volume_key_size,
6250 const char *passphrase,
6251 size_t passphrase_size,
6255 struct crypt_keyslot_context kc, new_kc;
6257 if (!passphrase || ((flags & CRYPT_VOLUME_KEY_NO_SEGMENT) &&
6258 (flags & CRYPT_VOLUME_KEY_SET)))
6261 if ((r = onlyLUKS(cd)) < 0)
6264 if ((flags & CRYPT_VOLUME_KEY_SET) && crypt_keyslot_status(cd, keyslot) > CRYPT_SLOT_INACTIVE &&
6265 isLUKS2(cd->type)) {
6267 crypt_keyslot_unlock_by_key_init_internal(&kc, volume_key, volume_key_size);
6269 crypt_keyslot_unlock_by_passphrase_init_internal(&kc, passphrase, passphrase_size);
6271 r = verify_and_update_segment_digest(cd, &cd->u.luks2.hdr, keyslot, &kc);
6273 crypt_keyslot_context_destroy_internal(&kc);
6278 crypt_keyslot_unlock_by_key_init_internal(&kc, volume_key, volume_key_size);
6279 crypt_keyslot_unlock_by_passphrase_init_internal(&new_kc, passphrase, passphrase_size);
6281 r = crypt_keyslot_add_by_keyslot_context(cd, CRYPT_ANY_SLOT, &kc, keyslot, &new_kc, flags);
6283 crypt_keyslot_context_destroy_internal(&kc);
6284 crypt_keyslot_context_destroy_internal(&new_kc);
6289 int crypt_keyslot_add_by_keyslot_context(struct crypt_device *cd,
6290 int keyslot_existing,
6291 struct crypt_keyslot_context *kc,
6293 struct crypt_keyslot_context *new_kc,
6297 int active_slots, r;
6298 const char *new_passphrase;
6299 size_t new_passphrase_size;
6300 struct volume_key *vk = NULL;
6302 if (!kc || ((flags & CRYPT_VOLUME_KEY_NO_SEGMENT) &&
6303 (flags & CRYPT_VOLUME_KEY_SET)))
6306 r = flags ? onlyLUKS2(cd) : onlyLUKS(cd);
6310 if ((flags & CRYPT_VOLUME_KEY_SET) && crypt_keyslot_status(cd, keyslot_existing) > CRYPT_SLOT_INACTIVE)
6311 return verify_and_update_segment_digest(cd, &cd->u.luks2.hdr, keyslot_existing, kc);
6313 if (!new_kc || !new_kc->get_passphrase)
6316 log_dbg(cd, "Adding new keyslot %d by %s%s, volume key provided by %s (%d).",
6317 keyslot_new, keyslot_context_type_string(new_kc),
6318 (flags & CRYPT_VOLUME_KEY_NO_SEGMENT) ? " unassigned to a crypt segment" : "",
6319 keyslot_context_type_string(kc), keyslot_existing);
6321 r = keyslot_verify_or_find_empty(cd, &keyslot_new);
6325 is_luks1 = isLUKS1(cd->type);
6327 active_slots = LUKS_keyslot_active_count(&cd->u.luks1.hdr);
6329 active_slots = LUKS2_keyslot_active_count(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
6331 if (active_slots < 0)
6334 if (active_slots == 0 && kc->type != CRYPT_KC_TYPE_KEY)
6336 else if (is_luks1 && kc->get_luks1_volume_key)
6337 r = kc->get_luks1_volume_key(cd, kc, keyslot_existing, &vk);
6338 else if (!is_luks1 && kc->get_luks2_volume_key)
6339 r = kc->get_luks2_volume_key(cd, kc, keyslot_existing, &vk);
6344 if ((flags & CRYPT_VOLUME_KEY_NO_SEGMENT) && kc->type == CRYPT_KC_TYPE_KEY) {
6345 if (!(vk = crypt_generate_volume_key(cd, kc->u.k.volume_key_size)))
6348 } else if (cd->volume_key) {
6349 if (!(vk = crypt_alloc_volume_key(cd->volume_key->keylength, cd->volume_key->key)))
6352 } else if (active_slots == 0) {
6353 log_err(cd, _("Cannot add key slot, all slots disabled and no volume key provided."));
6361 r = new_kc->get_passphrase(cd, new_kc, &new_passphrase, &new_passphrase_size);
6362 /* If new keyslot context is token just assign it to new keyslot */
6363 if (r >= 0 && new_kc->type == CRYPT_KC_TYPE_TOKEN && !is_luks1)
6364 r = LUKS2_token_assign(cd, &cd->u.luks2.hdr, keyslot_new, new_kc->u.t.id, 1, 0);
6366 r = keyslot_add_by_key(cd, is_luks1, keyslot_new, new_passphrase, new_passphrase_size, vk, flags);
6368 crypt_free_volume_key(vk);
6371 _luks2_rollback(cd);
6381 int crypt_use_keyring_for_vk(struct crypt_device *cd)
6385 /* dm backend must be initialized */
6386 if (!cd || !isLUKS2(cd->type))
6389 if (!_vk_via_keyring || !kernel_keyring_support())
6392 if (dm_flags(cd, DM_CRYPT, &dmc_flags))
6393 return dmcrypt_keyring_bug() ? 0 : 1;
6395 return (dmc_flags & DM_KERNEL_KEYRING_SUPPORTED);
6398 int crypt_volume_key_keyring(struct crypt_device *cd __attribute__((unused)), int enable)
6400 _vk_via_keyring = enable ? 1 : 0;
6405 int crypt_volume_key_load_in_keyring(struct crypt_device *cd, struct volume_key *vk)
6408 const char *type_name = key_type_name(LOGON_KEY);
6410 if (!vk || !cd || !type_name)
6413 if (!vk->key_description) {
6414 log_dbg(cd, "Invalid key description");
6418 log_dbg(cd, "Loading key (%zu bytes, type %s) in thread keyring.", vk->keylength, type_name);
6420 r = keyring_add_key_in_thread_keyring(LOGON_KEY, vk->key_description, vk->key, vk->keylength);
6422 log_dbg(cd, "keyring_add_key_in_thread_keyring failed (error %d)", r);
6423 log_err(cd, _("Failed to load key in kernel keyring."));
6425 crypt_set_key_in_keyring(cd, 1);
6431 int crypt_key_in_keyring(struct crypt_device *cd)
6433 return cd ? cd->key_in_keyring : 0;
6437 void crypt_set_key_in_keyring(struct crypt_device *cd, unsigned key_in_keyring)
6442 cd->key_in_keyring = key_in_keyring;
6446 void crypt_drop_keyring_key_by_description(struct crypt_device *cd, const char *key_description, key_type_t ktype)
6449 const char *type_name = key_type_name(ktype);
6451 if (!key_description || !type_name)
6454 log_dbg(cd, "Requesting keyring %s key for revoke and unlink.", type_name);
6456 r = keyring_revoke_and_unlink_key(ktype, key_description);
6458 log_dbg(cd, "keyring_revoke_and_unlink_key failed (error %d)", r);
6459 crypt_set_key_in_keyring(cd, 0);
6463 void crypt_drop_keyring_key(struct crypt_device *cd, struct volume_key *vks)
6465 struct volume_key *vk = vks;
6468 crypt_drop_keyring_key_by_description(cd, vk->key_description, LOGON_KEY);
6469 vk = crypt_volume_key_next(vk);
6473 int crypt_activate_by_keyring(struct crypt_device *cd,
6475 const char *key_description,
6480 size_t passphrase_size;
6483 if (!cd || !key_description)
6486 log_dbg(cd, "%s volume %s [keyslot %d] using passphrase in keyring.",
6487 name ? "Activating" : "Checking", name ?: "passphrase", keyslot);
6489 if (!kernel_keyring_support()) {
6490 log_err(cd, _("Kernel keyring is not supported by the kernel."));
6494 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
6498 r = keyring_get_passphrase(key_description, &passphrase, &passphrase_size);
6500 log_err(cd, _("Failed to read passphrase from keyring (error %d)."), r);
6504 r = _activate_by_passphrase(cd, name, keyslot, passphrase, passphrase_size, flags);
6506 crypt_safe_free(passphrase);
6512 * Workaround for serialization of parallel activation and memory-hard PBKDF
6513 * In specific situation (systemd activation) this causes OOM killer activation.
6514 * For now, let's provide this ugly way to serialize unlocking of devices.
6516 int crypt_serialize_lock(struct crypt_device *cd)
6518 if (!cd->memory_hard_pbkdf_lock_enabled)
6521 log_dbg(cd, "Taking global memory-hard access serialization lock.");
6522 if (crypt_write_lock(cd, "memory-hard-access", true, &cd->pbkdf_memory_hard_lock)) {
6523 log_err(cd, _("Failed to acquire global memory-hard access serialization lock."));
6524 cd->pbkdf_memory_hard_lock = NULL;
6531 void crypt_serialize_unlock(struct crypt_device *cd)
6533 if (!cd->memory_hard_pbkdf_lock_enabled)
6536 crypt_unlock_internal(cd, cd->pbkdf_memory_hard_lock);
6537 cd->pbkdf_memory_hard_lock = NULL;
6540 crypt_reencrypt_info crypt_reencrypt_status(struct crypt_device *cd,
6541 struct crypt_params_reencrypt *params)
6544 memset(params, 0, sizeof(*params));
6546 if (!cd || !isLUKS(cd->type))
6547 return CRYPT_REENCRYPT_INVALID;
6549 if (isLUKS1(cd->type))
6550 return CRYPT_REENCRYPT_NONE;
6552 if (_onlyLUKS2(cd, CRYPT_CD_QUIET, CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
6553 return CRYPT_REENCRYPT_INVALID;
6555 return LUKS2_reencrypt_get_params(&cd->u.luks2.hdr, params);
6558 static void __attribute__((destructor)) libcryptsetup_exit(void)
6560 crypt_token_unload_external_all(NULL);
6562 crypt_backend_destroy();
6563 crypt_random_exit();