2 * libcryptsetup - cryptsetup library
4 * Copyright (C) 2004 Jana Saout <jana@saout.de>
5 * Copyright (C) 2004-2007 Clemens Fruhwirth <clemens@endorphin.org>
6 * Copyright (C) 2009-2021 Red Hat, Inc. All rights reserved.
7 * Copyright (C) 2009-2021 Milan Broz
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
28 #include <sys/utsname.h>
31 #include "libcryptsetup.h"
37 #include "integrity.h"
39 #include "utils_device_locking.h"
42 #define CRYPT_CD_UNRESTRICTED (1 << 0)
43 #define CRYPT_CD_QUIET (1 << 1)
48 struct device *device;
49 struct device *metadata_device;
51 struct volume_key *volume_key;
53 uint32_t compatibility;
54 struct crypt_pbkdf_type pbkdf;
56 /* global context scope settings */
57 unsigned key_in_keyring:1;
60 uint64_t metadata_size; /* Used in LUKS2 format */
61 uint64_t keyslots_size; /* Used in LUKS2 format */
63 /* Workaround for OOM during parallel activation (like in systemd) */
64 bool memory_hard_pbkdf_lock_enabled;
65 struct crypt_lock_handle *pbkdf_memory_hard_lock;
67 // FIXME: private binary headers and access it properly
68 // through sub-library (LUKS1, TCRYPT)
71 struct { /* used in CRYPT_LUKS1 */
75 struct { /* used in CRYPT_LUKS2 */
77 char cipher[MAX_CIPHER_LEN]; /* only for compatibility */
78 char cipher_mode[MAX_CIPHER_LEN]; /* only for compatibility */
80 unsigned int keyslot_key_size;
81 struct luks2_reencrypt *rh;
83 struct { /* used in CRYPT_PLAIN */
84 struct crypt_params_plain hdr;
87 const char *cipher_mode;
88 unsigned int key_size;
90 struct { /* used in CRYPT_LOOPAES */
91 struct crypt_params_loopaes hdr;
94 const char *cipher_mode;
95 unsigned int key_size;
97 struct { /* used in CRYPT_VERITY */
98 struct crypt_params_verity hdr;
99 const char *root_hash;
100 unsigned int root_hash_size;
102 struct device *fec_device;
104 struct { /* used in CRYPT_TCRYPT */
105 struct crypt_params_tcrypt params;
106 struct tcrypt_phdr hdr;
108 struct { /* used in CRYPT_INTEGRITY */
109 struct crypt_params_integrity params;
110 struct volume_key *journal_mac_key;
111 struct volume_key *journal_crypt_key;
114 struct { /* used in CRYPT_BITLK */
115 struct bitlk_metadata params;
118 struct { /* used if initialized without header by name */
120 /* buffers, must refresh from kernel on every query */
121 char cipher_spec[MAX_CIPHER_LEN*2+1];
122 char cipher[MAX_CIPHER_LEN];
123 const char *cipher_mode;
124 unsigned int key_size;
128 /* callbacks definitions */
129 void (*log)(int level, const char *msg, void *usrptr);
131 int (*confirm)(const char *msg, void *usrptr);
132 void *confirm_usrptr;
135 /* Just to suppress redundant messages about crypto backend */
136 static int _crypto_logged = 0;
139 static void (*_default_log)(int level, const char *msg, void *usrptr) = NULL;
140 static int _debug_level = 0;
142 /* Library can do metadata locking */
143 static int _metadata_locking = 1;
145 /* Library scope detection for kernel keyring support */
146 static int _kernel_keyring_supported;
148 /* Library allowed to use kernel keyring for loading VK in kernel crypto layer */
149 static int _vk_via_keyring = 1;
151 void crypt_set_debug_level(int level)
153 _debug_level = level;
156 int crypt_get_debug_level(void)
161 void crypt_log(struct crypt_device *cd, int level, const char *msg)
166 if (level < _debug_level)
170 cd->log(level, msg, cd->log_usrptr);
171 else if (_default_log)
172 _default_log(level, msg, NULL);
173 /* Default to stdout/stderr if there is no callback. */
175 fprintf(level == CRYPT_LOG_ERROR ? stderr : stdout, "%s", msg);
178 __attribute__((format(printf, 5, 6)))
179 void logger(struct crypt_device *cd, int level, const char *file,
180 int line, const char *format, ...)
183 char target[LOG_MAX_LEN + 2];
186 va_start(argp, format);
188 len = vsnprintf(&target[0], LOG_MAX_LEN, format, argp);
189 if (len > 0 && len < LOG_MAX_LEN) {
190 /* All verbose and error messages in tools end with EOL. */
191 if (level == CRYPT_LOG_VERBOSE || level == CRYPT_LOG_ERROR ||
192 level == CRYPT_LOG_DEBUG || level == CRYPT_LOG_DEBUG_JSON)
193 strncat(target, "\n", LOG_MAX_LEN);
195 crypt_log(cd, level, target);
201 static const char *mdata_device_path(struct crypt_device *cd)
203 return device_path(cd->metadata_device ?: cd->device);
206 static const char *data_device_path(struct crypt_device *cd)
208 return device_path(cd->device);
212 struct device *crypt_metadata_device(struct crypt_device *cd)
214 return cd->metadata_device ?: cd->device;
217 struct device *crypt_data_device(struct crypt_device *cd)
222 int init_crypto(struct crypt_device *ctx)
227 r = crypt_random_init(ctx);
229 log_err(ctx, _("Cannot initialize crypto RNG backend."));
233 r = crypt_backend_init();
235 log_err(ctx, _("Cannot initialize crypto backend."));
237 if (!r && !_crypto_logged) {
238 log_dbg(ctx, "Crypto backend (%s) initialized in cryptsetup library version %s.",
239 crypt_backend_version(), PACKAGE_VERSION);
241 log_dbg(ctx, "Detected kernel %s %s %s.",
242 uts.sysname, uts.release, uts.machine);
249 static int process_key(struct crypt_device *cd, const char *hash_name,
250 size_t key_size, const char *pass, size_t passLen,
251 struct volume_key **vk)
258 *vk = crypt_alloc_volume_key(key_size, NULL);
263 r = crypt_plain_hash(cd, hash_name, (*vk)->key, key_size, pass, passLen);
266 log_err(cd, _("Hash algorithm %s not supported."),
269 log_err(cd, _("Key processing error (using hash %s)."),
271 crypt_free_volume_key(*vk);
275 } else if (passLen > key_size) {
276 memcpy((*vk)->key, pass, key_size);
278 memcpy((*vk)->key, pass, passLen);
284 static int isPLAIN(const char *type)
286 return (type && !strcmp(CRYPT_PLAIN, type));
289 static int isLUKS1(const char *type)
291 return (type && !strcmp(CRYPT_LUKS1, type));
294 static int isLUKS2(const char *type)
296 return (type && !strcmp(CRYPT_LUKS2, type));
299 static int isLUKS(const char *type)
301 return (isLUKS2(type) || isLUKS1(type));
304 static int isLOOPAES(const char *type)
306 return (type && !strcmp(CRYPT_LOOPAES, type));
309 static int isVERITY(const char *type)
311 return (type && !strcmp(CRYPT_VERITY, type));
314 static int isTCRYPT(const char *type)
316 return (type && !strcmp(CRYPT_TCRYPT, type));
319 static int isINTEGRITY(const char *type)
321 return (type && !strcmp(CRYPT_INTEGRITY, type));
324 static int isBITLK(const char *type)
326 return (type && !strcmp(CRYPT_BITLK, type));
329 static int _onlyLUKS(struct crypt_device *cd, uint32_t cdflags)
333 if (cd && !cd->type) {
334 if (!(cdflags & CRYPT_CD_QUIET))
335 log_err(cd, _("Cannot determine device type. Incompatible activation of device?"));
339 if (!cd || !isLUKS(cd->type)) {
340 if (!(cdflags & CRYPT_CD_QUIET))
341 log_err(cd, _("This operation is supported only for LUKS device."));
345 if (r || (cdflags & CRYPT_CD_UNRESTRICTED) || isLUKS1(cd->type))
348 return LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, 0, cdflags & CRYPT_CD_QUIET);
351 static int onlyLUKS(struct crypt_device *cd)
353 return _onlyLUKS(cd, 0);
356 static int _onlyLUKS2(struct crypt_device *cd, uint32_t cdflags, uint32_t mask)
360 if (cd && !cd->type) {
361 if (!(cdflags & CRYPT_CD_QUIET))
362 log_err(cd, _("Cannot determine device type. Incompatible activation of device?"));
366 if (!cd || !isLUKS2(cd->type)) {
367 if (!(cdflags & CRYPT_CD_QUIET))
368 log_err(cd, _("This operation is supported only for LUKS2 device."));
372 if (r || (cdflags & CRYPT_CD_UNRESTRICTED))
375 return LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, mask, cdflags & CRYPT_CD_QUIET);
379 int onlyLUKS2(struct crypt_device *cd)
381 return _onlyLUKS2(cd, 0, 0);
385 int onlyLUKS2mask(struct crypt_device *cd, uint32_t mask)
387 return _onlyLUKS2(cd, 0, mask);
390 static void crypt_set_null_type(struct crypt_device *cd)
397 cd->u.none.active_name = NULL;
399 cd->metadata_size = 0;
400 cd->keyslots_size = 0;
403 static void crypt_reset_null_type(struct crypt_device *cd)
408 free(cd->u.none.active_name);
409 cd->u.none.active_name = NULL;
412 /* keyslot helpers */
413 static int keyslot_verify_or_find_empty(struct crypt_device *cd, int *keyslot)
415 crypt_keyslot_info ki;
417 if (*keyslot == CRYPT_ANY_SLOT) {
418 if (isLUKS1(cd->type))
419 *keyslot = LUKS_keyslot_find_empty(&cd->u.luks1.hdr);
421 *keyslot = LUKS2_keyslot_find_empty(&cd->u.luks2.hdr);
423 log_err(cd, _("All key slots full."));
428 if (isLUKS1(cd->type))
429 ki = LUKS_keyslot_info(&cd->u.luks1.hdr, *keyslot);
431 ki = LUKS2_keyslot_info(&cd->u.luks2.hdr, *keyslot);
433 case CRYPT_SLOT_INVALID:
434 log_err(cd, _("Key slot %d is invalid, please select between 0 and %d."),
435 *keyslot, LUKS_NUMKEYS - 1);
437 case CRYPT_SLOT_INACTIVE:
440 log_err(cd, _("Key slot %d is full, please select another one."),
445 log_dbg(cd, "Selected keyslot %d.", *keyslot);
450 * compares UUIDs returned by device-mapper (striped by cryptsetup) and uuid in header
452 int crypt_uuid_cmp(const char *dm_uuid, const char *hdr_uuid)
457 if (!dm_uuid || !hdr_uuid)
460 str = strchr(dm_uuid, '-');
464 for (i = 0, j = 1; hdr_uuid[i]; i++) {
465 if (hdr_uuid[i] == '-')
468 if (!str[j] || str[j] == '-')
471 if (str[j] != hdr_uuid[i])
480 * compares type of active device to provided string (only if there is no explicit type)
482 static int crypt_uuid_type_cmp(struct crypt_device *cd, const char *type)
484 struct crypt_dm_active_device dmd;
488 /* Must user header-on-disk if we know type here */
489 if (cd->type || !cd->u.none.active_name)
492 log_dbg(cd, "Checking if active device %s without header has UUID type %s.",
493 cd->u.none.active_name, type);
495 r = dm_query_device(cd, cd->u.none.active_name, DM_ACTIVE_UUID, &dmd);
501 if (dmd.uuid && strlen(dmd.uuid) > len &&
502 !strncmp(dmd.uuid, type, len) && dmd.uuid[len] == '-')
505 free(CONST_CAST(void*)dmd.uuid);
509 int PLAIN_activate(struct crypt_device *cd,
511 struct volume_key *vk,
516 struct crypt_dm_active_device dmd = {
521 log_dbg(cd, "Trying to activate PLAIN device %s using cipher %s.",
522 name, crypt_get_cipher_spec(cd));
524 if (MISALIGNED(size, device_block_size(cd, crypt_data_device(cd)) >> SECTOR_SHIFT)) {
525 log_err(cd, _("Device size is not aligned to device logical block size."));
529 r = dm_crypt_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd),
530 vk, crypt_get_cipher_spec(cd), crypt_get_iv_offset(cd),
531 crypt_get_data_offset(cd), crypt_get_integrity(cd),
532 crypt_get_integrity_tag_size(cd), crypt_get_sector_size(cd));
536 r = create_or_reload_device(cd, name, CRYPT_PLAIN, &dmd);
538 dm_targets_free(cd, &dmd);
542 int crypt_confirm(struct crypt_device *cd, const char *msg)
544 if (!cd || !cd->confirm)
547 return cd->confirm(msg, cd->confirm_usrptr);
550 void crypt_set_log_callback(struct crypt_device *cd,
551 void (*log)(int level, const char *msg, void *usrptr),
558 cd->log_usrptr = usrptr;
562 void crypt_set_confirm_callback(struct crypt_device *cd,
563 int (*confirm)(const char *msg, void *usrptr),
567 cd->confirm = confirm;
568 cd->confirm_usrptr = usrptr;
572 const char *crypt_get_dir(void)
577 int crypt_init(struct crypt_device **cd, const char *device)
579 struct crypt_device *h = NULL;
585 log_dbg(NULL, "Allocating context for crypt device %s.", device ?: "(none)");
586 #if !HAVE_DECL_O_CLOEXEC
587 log_dbg(NULL, "Running without O_CLOEXEC.");
590 if (!(h = malloc(sizeof(struct crypt_device))))
593 memset(h, 0, sizeof(*h));
595 r = device_alloc(NULL, &h->device, device);
599 dm_backend_init(NULL);
601 h->rng_type = crypt_random_default_key_rng();
606 device_free(NULL, h->device);
611 static int crypt_check_data_device_size(struct crypt_device *cd)
614 uint64_t size, size_min;
616 /* Check data device size, require at least header or one sector */
617 size_min = crypt_get_data_offset(cd) << SECTOR_SHIFT ?: SECTOR_SIZE;
619 r = device_size(cd->device, &size);
623 if (size < size_min) {
624 log_err(cd, _("Header detected but device %s is too small."),
625 device_path(cd->device));
632 static int _crypt_set_data_device(struct crypt_device *cd, const char *device)
634 struct device *dev = NULL;
637 r = device_alloc(cd, &dev, device);
641 if (!cd->metadata_device) {
642 cd->metadata_device = cd->device;
644 device_free(cd, cd->device);
648 return crypt_check_data_device_size(cd);
651 int crypt_set_data_device(struct crypt_device *cd, const char *device)
653 /* metadata device must be set */
654 if (!cd || !cd->device || !device)
657 log_dbg(cd, "Setting ciphertext data device to %s.", device ?: "(none)");
659 if (!isLUKS1(cd->type) && !isLUKS2(cd->type) && !isVERITY(cd->type) &&
660 !isINTEGRITY(cd->type) && !isTCRYPT(cd->type)) {
661 log_err(cd, _("This operation is not supported for this device type."));
665 if (isLUKS2(cd->type) && crypt_get_luks2_reencrypt(cd)) {
666 log_err(cd, _("Illegal operation with reencryption in-progress."));
670 return _crypt_set_data_device(cd, device);
673 int crypt_init_data_device(struct crypt_device **cd, const char *device, const char *data_device)
680 r = crypt_init(cd, device);
681 if (r || !data_device || !strcmp(device, data_device))
684 log_dbg(NULL, "Setting ciphertext data device to %s.", data_device);
685 r = _crypt_set_data_device(*cd, data_device);
696 struct crypt_pbkdf_type *crypt_get_pbkdf(struct crypt_device *cd)
702 * crypt_load() helpers
704 static int _crypt_load_luks2(struct crypt_device *cd, int reload, int repair)
708 struct luks2_hdr hdr2 = {};
710 log_dbg(cd, "%soading LUKS2 header (repair %sabled).", reload ? "Rel" : "L", repair ? "en" : "dis");
712 r = LUKS2_hdr_read(cd, &hdr2, repair);
716 if (!reload && !(type = strdup(CRYPT_LUKS2))) {
721 if (verify_pbkdf_params(cd, &cd->pbkdf)) {
722 r = init_pbkdf_type(cd, NULL, CRYPT_LUKS2);
728 LUKS2_hdr_free(cd, &cd->u.luks2.hdr);
729 free(cd->u.luks2.keyslot_cipher);
734 memcpy(&cd->u.luks2.hdr, &hdr2, sizeof(hdr2));
735 cd->u.luks2.keyslot_cipher = NULL;
736 cd->u.luks2.rh = NULL;
741 LUKS2_hdr_free(cd, &hdr2);
746 static void _luks2_reload(struct crypt_device *cd)
748 if (!cd || !isLUKS2(cd->type))
751 (void) _crypt_load_luks2(cd, 1, 0);
754 static int _crypt_load_luks(struct crypt_device *cd, const char *requested_type,
755 int require_header, int repair)
758 struct luks_phdr hdr = {};
765 /* This will return 0 if primary LUKS2 header is damaged */
766 version = LUKS2_hdr_version_unlocked(cd, NULL);
768 if ((isLUKS1(requested_type) && version == 2) ||
769 (isLUKS2(requested_type) && version == 1))
775 if (isLUKS1(requested_type) || version == 1) {
776 if (cd->type && isLUKS2(cd->type)) {
777 log_dbg(cd, "Context is already initialized to type %s", cd->type);
781 if (verify_pbkdf_params(cd, &cd->pbkdf)) {
782 r = init_pbkdf_type(cd, NULL, CRYPT_LUKS1);
787 r = LUKS_read_phdr(&hdr, require_header, repair, cd);
791 if (!cd->type && !(cd->type = strdup(CRYPT_LUKS1))) {
796 /* Set hash to the same as in the loaded header */
797 if (!cd->pbkdf.hash || strcmp(cd->pbkdf.hash, hdr.hashSpec)) {
798 free(CONST_CAST(void*)cd->pbkdf.hash);
799 cd->pbkdf.hash = strdup(hdr.hashSpec);
800 if (!cd->pbkdf.hash) {
806 if (asprintf(&cipher_spec, "%s-%s", hdr.cipherName, hdr.cipherMode) < 0) {
811 free(cd->u.luks1.cipher_spec);
812 cd->u.luks1.cipher_spec = cipher_spec;
814 memcpy(&cd->u.luks1.hdr, &hdr, sizeof(hdr));
815 } else if (isLUKS2(requested_type) || version == 2 || version == 0) {
816 if (cd->type && isLUKS1(cd->type)) {
817 log_dbg(cd, "Context is already initialized to type %s", cd->type);
822 * Current LUKS2 repair just overrides blkid probes
823 * and perform auto-recovery if possible. This is safe
824 * unless future LUKS2 repair code do something more
825 * sophisticated. In such case we would need to check
826 * for LUKS2 requirements and decide if it's safe to
829 r = _crypt_load_luks2(cd, cd->type != NULL, repair);
832 log_err(cd, _("Unsupported LUKS version %d."), version);
836 crypt_safe_memzero(&hdr, sizeof(hdr));
841 static int _crypt_load_tcrypt(struct crypt_device *cd, struct crypt_params_tcrypt *params)
852 memcpy(&cd->u.tcrypt.params, params, sizeof(*params));
854 r = TCRYPT_read_phdr(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
856 cd->u.tcrypt.params.passphrase = NULL;
857 cd->u.tcrypt.params.passphrase_size = 0;
858 cd->u.tcrypt.params.keyfiles = NULL;
859 cd->u.tcrypt.params.keyfiles_count = 0;
860 cd->u.tcrypt.params.veracrypt_pim = 0;
865 if (!cd->type && !(cd->type = strdup(CRYPT_TCRYPT)))
871 static int _crypt_load_verity(struct crypt_device *cd, struct crypt_params_verity *params)
874 size_t sb_offset = 0;
880 if (params && params->flags & CRYPT_VERITY_NO_HEADER)
884 sb_offset = params->hash_area_offset;
886 r = VERITY_read_sb(cd, sb_offset, &cd->u.verity.uuid, &cd->u.verity.hdr);
890 //FIXME: use crypt_free
891 if (!cd->type && !(cd->type = strdup(CRYPT_VERITY))) {
892 free(CONST_CAST(void*)cd->u.verity.hdr.hash_name);
893 free(CONST_CAST(void*)cd->u.verity.hdr.salt);
894 free(cd->u.verity.uuid);
895 crypt_safe_memzero(&cd->u.verity.hdr, sizeof(cd->u.verity.hdr));
900 cd->u.verity.hdr.flags = params->flags;
902 /* Hash availability checked in sb load */
903 cd->u.verity.root_hash_size = crypt_hash_size(cd->u.verity.hdr.hash_name);
904 if (cd->u.verity.root_hash_size > 4096)
907 if (params && params->data_device &&
908 (r = crypt_set_data_device(cd, params->data_device)) < 0)
911 if (params && params->fec_device) {
912 r = device_alloc(cd, &cd->u.verity.fec_device, params->fec_device);
915 cd->u.verity.hdr.fec_area_offset = params->fec_area_offset;
916 cd->u.verity.hdr.fec_roots = params->fec_roots;
922 static int _crypt_load_integrity(struct crypt_device *cd,
923 struct crypt_params_integrity *params)
931 r = INTEGRITY_read_sb(cd, &cd->u.integrity.params, &cd->u.integrity.sb_flags);
935 // FIXME: add checks for fields in integrity sb vs params
938 cd->u.integrity.params.journal_watermark = params->journal_watermark;
939 cd->u.integrity.params.journal_commit_time = params->journal_commit_time;
940 cd->u.integrity.params.buffer_sectors = params->buffer_sectors;
941 // FIXME: check ENOMEM
942 if (params->integrity)
943 cd->u.integrity.params.integrity = strdup(params->integrity);
944 cd->u.integrity.params.integrity_key_size = params->integrity_key_size;
945 if (params->journal_integrity)
946 cd->u.integrity.params.journal_integrity = strdup(params->journal_integrity);
947 if (params->journal_crypt)
948 cd->u.integrity.params.journal_crypt = strdup(params->journal_crypt);
950 if (params->journal_crypt_key) {
951 cd->u.integrity.journal_crypt_key =
952 crypt_alloc_volume_key(params->journal_crypt_key_size,
953 params->journal_crypt_key);
954 if (!cd->u.integrity.journal_crypt_key)
957 if (params->journal_integrity_key) {
958 cd->u.integrity.journal_mac_key =
959 crypt_alloc_volume_key(params->journal_integrity_key_size,
960 params->journal_integrity_key);
961 if (!cd->u.integrity.journal_mac_key)
966 if (!cd->type && !(cd->type = strdup(CRYPT_INTEGRITY))) {
967 free(CONST_CAST(void*)cd->u.integrity.params.integrity);
974 static int _crypt_load_bitlk(struct crypt_device *cd,
975 struct bitlk_metadata *params)
983 r = BITLK_read_sb(cd, &cd->u.bitlk.params);
987 if (asprintf(&cd->u.bitlk.cipher_spec, "%s-%s",
988 cd->u.bitlk.params.cipher, cd->u.bitlk.params.cipher_mode) < 0) {
989 cd->u.bitlk.cipher_spec = NULL;
993 if (!cd->type && !(cd->type = strdup(CRYPT_BITLK)))
999 int crypt_load(struct crypt_device *cd,
1000 const char *requested_type,
1008 log_dbg(cd, "Trying to load %s crypt type from device %s.",
1009 requested_type ?: "any", mdata_device_path(cd) ?: "(none)");
1011 if (!crypt_metadata_device(cd))
1014 crypt_reset_null_type(cd);
1015 cd->data_offset = 0;
1016 cd->metadata_size = 0;
1017 cd->keyslots_size = 0;
1019 if (!requested_type || isLUKS1(requested_type) || isLUKS2(requested_type)) {
1020 if (cd->type && !isLUKS1(cd->type) && !isLUKS2(cd->type)) {
1021 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1025 r = _crypt_load_luks(cd, requested_type, 1, 0);
1026 } else if (isVERITY(requested_type)) {
1027 if (cd->type && !isVERITY(cd->type)) {
1028 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1031 r = _crypt_load_verity(cd, params);
1032 } else if (isTCRYPT(requested_type)) {
1033 if (cd->type && !isTCRYPT(cd->type)) {
1034 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1037 r = _crypt_load_tcrypt(cd, params);
1038 } else if (isINTEGRITY(requested_type)) {
1039 if (cd->type && !isINTEGRITY(cd->type)) {
1040 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1043 r = _crypt_load_integrity(cd, params);
1044 } else if (isBITLK(requested_type)) {
1045 if (cd->type && !isBITLK(cd->type)) {
1046 log_dbg(cd, "Context is already initialized to type %s", cd->type);
1049 r = _crypt_load_bitlk(cd, params);
1057 * crypt_init() helpers
1059 static int _init_by_name_crypt_none(struct crypt_device *cd)
1062 char _mode[MAX_CIPHER_LEN];
1063 struct crypt_dm_active_device dmd;
1064 struct dm_target *tgt = &dmd.segment;
1066 if (cd->type || !cd->u.none.active_name)
1069 r = dm_query_device(cd, cd->u.none.active_name,
1070 DM_ACTIVE_CRYPT_CIPHER |
1071 DM_ACTIVE_CRYPT_KEYSIZE, &dmd);
1074 if (!single_segment(&dmd) || tgt->type != DM_CRYPT)
1077 r = crypt_parse_name_and_mode(tgt->u.crypt.cipher,
1078 cd->u.none.cipher, NULL,
1082 r = snprintf(cd->u.none.cipher_spec, sizeof(cd->u.none.cipher_spec),
1083 "%s-%s", cd->u.none.cipher, _mode);
1084 if (r < 0 || (size_t)r >= sizeof(cd->u.none.cipher_spec))
1087 cd->u.none.cipher_mode = cd->u.none.cipher_spec + strlen(cd->u.none.cipher) + 1;
1088 cd->u.none.key_size = tgt->u.crypt.vk->keylength;
1093 dm_targets_free(cd, &dmd);
1097 static const char *LUKS_UUID(struct crypt_device *cd)
1101 else if (isLUKS1(cd->type))
1102 return cd->u.luks1.hdr.uuid;
1103 else if (isLUKS2(cd->type))
1104 return cd->u.luks2.hdr.uuid;
1109 static void crypt_free_type(struct crypt_device *cd)
1111 if (isPLAIN(cd->type)) {
1112 free(CONST_CAST(void*)cd->u.plain.hdr.hash);
1113 free(cd->u.plain.cipher);
1114 free(cd->u.plain.cipher_spec);
1115 } else if (isLUKS2(cd->type)) {
1116 LUKS2_reencrypt_free(cd, cd->u.luks2.rh);
1117 LUKS2_hdr_free(cd, &cd->u.luks2.hdr);
1118 free(cd->u.luks2.keyslot_cipher);
1119 } else if (isLUKS1(cd->type)) {
1120 free(cd->u.luks1.cipher_spec);
1121 } else if (isLOOPAES(cd->type)) {
1122 free(CONST_CAST(void*)cd->u.loopaes.hdr.hash);
1123 free(cd->u.loopaes.cipher);
1124 free(cd->u.loopaes.cipher_spec);
1125 } else if (isVERITY(cd->type)) {
1126 free(CONST_CAST(void*)cd->u.verity.hdr.hash_name);
1127 free(CONST_CAST(void*)cd->u.verity.hdr.data_device);
1128 free(CONST_CAST(void*)cd->u.verity.hdr.hash_device);
1129 free(CONST_CAST(void*)cd->u.verity.hdr.fec_device);
1130 free(CONST_CAST(void*)cd->u.verity.hdr.salt);
1131 free(CONST_CAST(void*)cd->u.verity.root_hash);
1132 free(cd->u.verity.uuid);
1133 device_free(cd, cd->u.verity.fec_device);
1134 } else if (isINTEGRITY(cd->type)) {
1135 free(CONST_CAST(void*)cd->u.integrity.params.integrity);
1136 free(CONST_CAST(void*)cd->u.integrity.params.journal_integrity);
1137 free(CONST_CAST(void*)cd->u.integrity.params.journal_crypt);
1138 crypt_free_volume_key(cd->u.integrity.journal_crypt_key);
1139 crypt_free_volume_key(cd->u.integrity.journal_mac_key);
1140 } else if (isBITLK(cd->type)) {
1141 free(cd->u.bitlk.cipher_spec);
1142 BITLK_bitlk_metadata_free(&cd->u.bitlk.params);
1143 } else if (!cd->type) {
1144 free(cd->u.none.active_name);
1145 cd->u.none.active_name = NULL;
1148 crypt_set_null_type(cd);
1151 static int _init_by_name_crypt(struct crypt_device *cd, const char *name)
1154 char **dep, *cipher_spec = NULL, cipher[MAX_CIPHER_LEN], cipher_mode[MAX_CIPHER_LEN], deps_uuid_prefix[40], *deps[MAX_DM_DEPS+1] = {};
1155 const char *dev, *namei;
1157 struct crypt_dm_active_device dmd, dmdi = {}, dmdep = {};
1158 struct dm_target *tgt = &dmd.segment, *tgti = &dmdi.segment;
1160 r = dm_query_device(cd, name,
1163 DM_ACTIVE_CRYPT_CIPHER |
1164 DM_ACTIVE_CRYPT_KEYSIZE, &dmd);
1168 if (tgt->type != DM_CRYPT && tgt->type != DM_LINEAR) {
1169 log_dbg(cd, "Unsupported device table detected in %s.", name);
1177 r = snprintf(deps_uuid_prefix, sizeof(deps_uuid_prefix), CRYPT_SUBDEV "-%.32s", dmd.uuid + 6);
1178 if (r < 0 || (size_t)r != (sizeof(deps_uuid_prefix) - 1))
1183 r = dm_device_deps(cd, name, deps_uuid_prefix, deps, ARRAY_SIZE(deps));
1188 r = crypt_parse_name_and_mode(tgt->type == DM_LINEAR ? "null" : tgt->u.crypt.cipher, cipher,
1189 &key_nums, cipher_mode);
1191 log_dbg(cd, "Cannot parse cipher and mode from active device.");
1197 if (tgt->type == DM_CRYPT && tgt->u.crypt.integrity && (namei = device_dm_name(tgt->data_device))) {
1198 r = dm_query_device(cd, namei, DM_ACTIVE_DEVICE, &dmdi);
1201 if (!single_segment(&dmdi) || tgti->type != DM_INTEGRITY) {
1202 log_dbg(cd, "Unsupported device table detected in %s.", namei);
1206 if (!cd->metadata_device) {
1207 device_free(cd, cd->device);
1208 MOVE_REF(cd->device, tgti->data_device);
1212 /* do not try to lookup LUKS2 header in detached header mode */
1213 if (dmd.uuid && !cd->metadata_device && !found) {
1214 while (*dep && !found) {
1215 r = dm_query_device(cd, *dep, DM_ACTIVE_DEVICE, &dmdep);
1219 tgt = &dmdep.segment;
1221 while (tgt && !found) {
1222 dev = device_path(tgt->data_device);
1227 if (!strstr(dev, dm_get_dir()) ||
1228 !crypt_string_in(dev + strlen(dm_get_dir()) + 1, deps, ARRAY_SIZE(deps))) {
1229 device_free(cd, cd->device);
1230 MOVE_REF(cd->device, tgt->data_device);
1236 dm_targets_free(cd, &dmdep);
1240 if (asprintf(&cipher_spec, "%s-%s", cipher, cipher_mode) < 0) {
1249 if (isPLAIN(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) {
1250 cd->u.plain.hdr.hash = NULL; /* no way to get this */
1251 cd->u.plain.hdr.offset = tgt->u.crypt.offset;
1252 cd->u.plain.hdr.skip = tgt->u.crypt.iv_offset;
1253 cd->u.plain.hdr.sector_size = tgt->u.crypt.sector_size;
1254 cd->u.plain.key_size = tgt->u.crypt.vk->keylength;
1255 cd->u.plain.cipher = strdup(cipher);
1256 MOVE_REF(cd->u.plain.cipher_spec, cipher_spec);
1257 cd->u.plain.cipher_mode = cd->u.plain.cipher_spec + strlen(cipher) + 1;
1258 } else if (isLOOPAES(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) {
1259 cd->u.loopaes.hdr.offset = tgt->u.crypt.offset;
1260 cd->u.loopaes.cipher = strdup(cipher);
1261 MOVE_REF(cd->u.loopaes.cipher_spec, cipher_spec);
1262 cd->u.loopaes.cipher_mode = cd->u.loopaes.cipher_spec + strlen(cipher) + 1;
1263 /* version 3 uses last key for IV */
1264 if (tgt->u.crypt.vk->keylength % key_nums)
1266 cd->u.loopaes.key_size = tgt->u.crypt.vk->keylength / key_nums;
1267 } else if (isLUKS1(cd->type) || isLUKS2(cd->type)) {
1268 if (crypt_metadata_device(cd)) {
1269 r = _crypt_load_luks(cd, cd->type, 0, 0);
1271 log_dbg(cd, "LUKS device header does not match active device.");
1272 crypt_set_null_type(cd);
1273 device_close(cd, cd->metadata_device);
1274 device_close(cd, cd->device);
1278 /* check whether UUIDs match each other */
1279 r = crypt_uuid_cmp(dmd.uuid, LUKS_UUID(cd));
1281 log_dbg(cd, "LUKS device header uuid: %s mismatches DM returned uuid %s",
1282 LUKS_UUID(cd), dmd.uuid);
1283 crypt_free_type(cd);
1288 log_dbg(cd, "LUKS device header not available.");
1289 crypt_set_null_type(cd);
1292 } else if (isTCRYPT(cd->type) && single_segment(&dmd) && tgt->type == DM_CRYPT) {
1293 r = TCRYPT_init_by_name(cd, name, dmd.uuid, tgt, &cd->device,
1294 &cd->u.tcrypt.params, &cd->u.tcrypt.hdr);
1295 } else if (isBITLK(cd->type)) {
1296 r = _crypt_load_bitlk(cd, NULL);
1298 log_dbg(cd, "BITLK device header not available.");
1299 crypt_set_null_type(cd);
1304 dm_targets_free(cd, &dmd);
1305 dm_targets_free(cd, &dmdi);
1306 dm_targets_free(cd, &dmdep);
1307 free(CONST_CAST(void*)dmd.uuid);
1315 static int _init_by_name_verity(struct crypt_device *cd, const char *name)
1317 struct crypt_dm_active_device dmd;
1318 struct dm_target *tgt = &dmd.segment;
1321 r = dm_query_device(cd, name,
1323 DM_ACTIVE_VERITY_HASH_DEVICE |
1324 DM_ACTIVE_VERITY_ROOT_HASH |
1325 DM_ACTIVE_VERITY_PARAMS, &dmd);
1328 if (!single_segment(&dmd) || tgt->type != DM_VERITY) {
1329 log_dbg(cd, "Unsupported device table detected in %s.", name);
1336 if (isVERITY(cd->type)) {
1337 cd->u.verity.uuid = NULL; // FIXME
1338 cd->u.verity.hdr.flags = CRYPT_VERITY_NO_HEADER; //FIXME
1339 cd->u.verity.hdr.data_size = tgt->u.verity.vp->data_size;
1340 cd->u.verity.root_hash_size = tgt->u.verity.root_hash_size;
1341 MOVE_REF(cd->u.verity.hdr.hash_name, tgt->u.verity.vp->hash_name);
1342 cd->u.verity.hdr.data_device = NULL;
1343 cd->u.verity.hdr.hash_device = NULL;
1344 cd->u.verity.hdr.data_block_size = tgt->u.verity.vp->data_block_size;
1345 cd->u.verity.hdr.hash_block_size = tgt->u.verity.vp->hash_block_size;
1346 cd->u.verity.hdr.hash_area_offset = tgt->u.verity.hash_offset;
1347 cd->u.verity.hdr.fec_area_offset = tgt->u.verity.fec_offset;
1348 cd->u.verity.hdr.hash_type = tgt->u.verity.vp->hash_type;
1349 cd->u.verity.hdr.flags = tgt->u.verity.vp->flags;
1350 cd->u.verity.hdr.salt_size = tgt->u.verity.vp->salt_size;
1351 MOVE_REF(cd->u.verity.hdr.salt, tgt->u.verity.vp->salt);
1352 MOVE_REF(cd->u.verity.hdr.fec_device, tgt->u.verity.vp->fec_device);
1353 cd->u.verity.hdr.fec_roots = tgt->u.verity.vp->fec_roots;
1354 MOVE_REF(cd->u.verity.fec_device, tgt->u.verity.fec_device);
1355 MOVE_REF(cd->metadata_device, tgt->u.verity.hash_device);
1356 MOVE_REF(cd->u.verity.root_hash, tgt->u.verity.root_hash);
1359 dm_targets_free(cd, &dmd);
1363 static int _init_by_name_integrity(struct crypt_device *cd, const char *name)
1365 struct crypt_dm_active_device dmd;
1366 struct dm_target *tgt = &dmd.segment;
1369 r = dm_query_device(cd, name, DM_ACTIVE_DEVICE |
1370 DM_ACTIVE_CRYPT_KEY |
1371 DM_ACTIVE_CRYPT_KEYSIZE |
1372 DM_ACTIVE_INTEGRITY_PARAMS, &dmd);
1375 if (!single_segment(&dmd) || tgt->type != DM_INTEGRITY) {
1376 log_dbg(cd, "Unsupported device table detected in %s.", name);
1383 if (isINTEGRITY(cd->type)) {
1384 cd->u.integrity.params.tag_size = tgt->u.integrity.tag_size;
1385 cd->u.integrity.params.sector_size = tgt->u.integrity.sector_size;
1386 cd->u.integrity.params.journal_size = tgt->u.integrity.journal_size;
1387 cd->u.integrity.params.journal_watermark = tgt->u.integrity.journal_watermark;
1388 cd->u.integrity.params.journal_commit_time = tgt->u.integrity.journal_commit_time;
1389 cd->u.integrity.params.interleave_sectors = tgt->u.integrity.interleave_sectors;
1390 cd->u.integrity.params.buffer_sectors = tgt->u.integrity.buffer_sectors;
1391 MOVE_REF(cd->u.integrity.params.integrity, tgt->u.integrity.integrity);
1392 MOVE_REF(cd->u.integrity.params.journal_integrity, tgt->u.integrity.journal_integrity);
1393 MOVE_REF(cd->u.integrity.params.journal_crypt, tgt->u.integrity.journal_crypt);
1395 if (tgt->u.integrity.vk)
1396 cd->u.integrity.params.integrity_key_size = tgt->u.integrity.vk->keylength;
1397 if (tgt->u.integrity.journal_integrity_key)
1398 cd->u.integrity.params.journal_integrity_key_size = tgt->u.integrity.journal_integrity_key->keylength;
1399 if (tgt->u.integrity.journal_crypt_key)
1400 cd->u.integrity.params.integrity_key_size = tgt->u.integrity.journal_crypt_key->keylength;
1401 MOVE_REF(cd->metadata_device, tgt->u.integrity.meta_device);
1404 dm_targets_free(cd, &dmd);
1408 int crypt_init_by_name_and_header(struct crypt_device **cd,
1410 const char *header_device)
1412 crypt_status_info ci;
1413 struct crypt_dm_active_device dmd;
1414 struct dm_target *tgt = &dmd.segment;
1420 log_dbg(NULL, "Allocating crypt device context by device %s.", name);
1422 ci = crypt_status(NULL, name);
1423 if (ci == CRYPT_INVALID)
1426 if (ci < CRYPT_ACTIVE) {
1427 log_err(NULL, _("Device %s is not active."), name);
1431 r = dm_query_device(NULL, name, DM_ACTIVE_DEVICE | DM_ACTIVE_UUID, &dmd);
1437 if (header_device) {
1438 r = crypt_init(cd, header_device);
1440 r = crypt_init(cd, device_path(tgt->data_device));
1442 /* Underlying device disappeared but mapping still active */
1443 if (!tgt->data_device || r == -ENOTBLK)
1444 log_verbose(NULL, _("Underlying device for crypt device %s disappeared."),
1447 /* Underlying device is not readable but crypt mapping exists */
1449 r = crypt_init(cd, NULL);
1456 if (!strncmp(CRYPT_PLAIN, dmd.uuid, sizeof(CRYPT_PLAIN)-1))
1457 (*cd)->type = strdup(CRYPT_PLAIN);
1458 else if (!strncmp(CRYPT_LOOPAES, dmd.uuid, sizeof(CRYPT_LOOPAES)-1))
1459 (*cd)->type = strdup(CRYPT_LOOPAES);
1460 else if (!strncmp(CRYPT_LUKS1, dmd.uuid, sizeof(CRYPT_LUKS1)-1))
1461 (*cd)->type = strdup(CRYPT_LUKS1);
1462 else if (!strncmp(CRYPT_LUKS2, dmd.uuid, sizeof(CRYPT_LUKS2)-1))
1463 (*cd)->type = strdup(CRYPT_LUKS2);
1464 else if (!strncmp(CRYPT_VERITY, dmd.uuid, sizeof(CRYPT_VERITY)-1))
1465 (*cd)->type = strdup(CRYPT_VERITY);
1466 else if (!strncmp(CRYPT_TCRYPT, dmd.uuid, sizeof(CRYPT_TCRYPT)-1))
1467 (*cd)->type = strdup(CRYPT_TCRYPT);
1468 else if (!strncmp(CRYPT_INTEGRITY, dmd.uuid, sizeof(CRYPT_INTEGRITY)-1))
1469 (*cd)->type = strdup(CRYPT_INTEGRITY);
1470 else if (!strncmp(CRYPT_BITLK, dmd.uuid, sizeof(CRYPT_BITLK)-1))
1471 (*cd)->type = strdup(CRYPT_BITLK);
1473 log_dbg(NULL, "Unknown UUID set, some parameters are not set.");
1475 log_dbg(NULL, "Active device has no UUID set, some parameters are not set.");
1477 if (header_device) {
1478 r = crypt_set_data_device(*cd, device_path(tgt->data_device));
1483 /* Try to initialize basic parameters from active device */
1485 if (tgt->type == DM_CRYPT || tgt->type == DM_LINEAR)
1486 r = _init_by_name_crypt(*cd, name);
1487 else if (tgt->type == DM_VERITY)
1488 r = _init_by_name_verity(*cd, name);
1489 else if (tgt->type == DM_INTEGRITY)
1490 r = _init_by_name_integrity(*cd, name);
1495 } else if (!(*cd)->type) {
1496 /* For anonymous device (no header found) remember initialized name */
1497 (*cd)->u.none.active_name = strdup(name);
1500 free(CONST_CAST(void*)dmd.uuid);
1501 dm_targets_free(NULL, &dmd);
1505 int crypt_init_by_name(struct crypt_device **cd, const char *name)
1507 return crypt_init_by_name_and_header(cd, name, NULL);
1511 * crypt_format() helpers
1513 static int _crypt_format_plain(struct crypt_device *cd,
1515 const char *cipher_mode,
1517 size_t volume_key_size,
1518 struct crypt_params_plain *params)
1520 unsigned int sector_size = params ? params->sector_size : SECTOR_SIZE;
1523 if (!cipher || !cipher_mode) {
1524 log_err(cd, _("Invalid plain crypt parameters."));
1528 if (volume_key_size > 1024) {
1529 log_err(cd, _("Invalid key size."));
1534 log_err(cd, _("UUID is not supported for this crypt type."));
1538 if (cd->metadata_device) {
1539 log_err(cd, _("Detached metadata device is not supported for this crypt type."));
1543 /* For compatibility with old params structure */
1545 sector_size = SECTOR_SIZE;
1547 if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE ||
1548 NOTPOW2(sector_size)) {
1549 log_err(cd, _("Unsupported encryption sector size."));
1553 if (sector_size > SECTOR_SIZE && !device_size(cd->device, &dev_size)) {
1554 if (params && params->offset)
1555 dev_size -= (params->offset * SECTOR_SIZE);
1556 if (dev_size % sector_size) {
1557 log_err(cd, _("Device size is not aligned to requested sector size."));
1562 if (!(cd->type = strdup(CRYPT_PLAIN)))
1565 cd->u.plain.key_size = volume_key_size;
1566 cd->volume_key = crypt_alloc_volume_key(volume_key_size, NULL);
1567 if (!cd->volume_key)
1570 if (asprintf(&cd->u.plain.cipher_spec, "%s-%s", cipher, cipher_mode) < 0) {
1571 cd->u.plain.cipher_spec = NULL;
1574 cd->u.plain.cipher = strdup(cipher);
1575 cd->u.plain.cipher_mode = cd->u.plain.cipher_spec + strlen(cipher) + 1;
1577 if (params && params->hash)
1578 cd->u.plain.hdr.hash = strdup(params->hash);
1580 cd->u.plain.hdr.offset = params ? params->offset : 0;
1581 cd->u.plain.hdr.skip = params ? params->skip : 0;
1582 cd->u.plain.hdr.size = params ? params->size : 0;
1583 cd->u.plain.hdr.sector_size = sector_size;
1585 if (!cd->u.plain.cipher)
1591 static int _crypt_format_luks1(struct crypt_device *cd,
1593 const char *cipher_mode,
1595 const char *volume_key,
1596 size_t volume_key_size,
1597 struct crypt_params_luks1 *params)
1600 unsigned long required_alignment = DEFAULT_DISK_ALIGNMENT;
1601 unsigned long alignment_offset = 0;
1604 if (!cipher || !cipher_mode)
1607 if (!crypt_metadata_device(cd)) {
1608 log_err(cd, _("Can't format LUKS without device."));
1612 if (params && cd->data_offset && params->data_alignment &&
1613 (cd->data_offset % params->data_alignment)) {
1614 log_err(cd, _("Requested data alignment is not compatible with data offset."));
1618 if (!(cd->type = strdup(CRYPT_LUKS1)))
1622 cd->volume_key = crypt_alloc_volume_key(volume_key_size,
1625 cd->volume_key = crypt_generate_volume_key(cd, volume_key_size);
1627 if (!cd->volume_key)
1630 if (verify_pbkdf_params(cd, &cd->pbkdf)) {
1631 r = init_pbkdf_type(cd, NULL, CRYPT_LUKS1);
1636 if (params && params->hash && strcmp(params->hash, cd->pbkdf.hash)) {
1637 free(CONST_CAST(void*)cd->pbkdf.hash);
1638 cd->pbkdf.hash = strdup(params->hash);
1639 if (!cd->pbkdf.hash)
1643 if (params && params->data_device) {
1644 if (!cd->metadata_device)
1645 cd->metadata_device = cd->device;
1647 device_free(cd, cd->device);
1649 if (device_alloc(cd, &cd->device, params->data_device) < 0)
1653 if (params && cd->metadata_device) {
1654 /* For detached header the alignment is used directly as data offset */
1655 if (!cd->data_offset)
1656 cd->data_offset = params->data_alignment;
1657 required_alignment = params->data_alignment * SECTOR_SIZE;
1658 } else if (params && params->data_alignment) {
1659 required_alignment = params->data_alignment * SECTOR_SIZE;
1661 device_topology_alignment(cd, cd->device,
1662 &required_alignment,
1663 &alignment_offset, DEFAULT_DISK_ALIGNMENT);
1665 r = LUKS_check_cipher(cd, volume_key_size, cipher, cipher_mode);
1669 r = LUKS_generate_phdr(&cd->u.luks1.hdr, cd->volume_key, cipher, cipher_mode,
1670 cd->pbkdf.hash, uuid,
1671 cd->data_offset * SECTOR_SIZE,
1672 alignment_offset, required_alignment, cd);
1676 r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL);
1680 if (!device_size(crypt_data_device(cd), &dev_size) &&
1681 dev_size < (crypt_get_data_offset(cd) * SECTOR_SIZE))
1682 log_std(cd, _("WARNING: Data offset is outside of currently available data device.\n"));
1684 if (asprintf(&cd->u.luks1.cipher_spec, "%s-%s", cipher, cipher_mode) < 0) {
1685 cd->u.luks1.cipher_spec = NULL;
1689 r = LUKS_wipe_header_areas(&cd->u.luks1.hdr, cd);
1691 free(cd->u.luks1.cipher_spec);
1692 log_err(cd, _("Cannot wipe header on device %s."),
1693 mdata_device_path(cd));
1697 r = LUKS_write_phdr(&cd->u.luks1.hdr, cd);
1699 free(cd->u.luks1.cipher_spec);
1704 static int _crypt_format_luks2(struct crypt_device *cd,
1706 const char *cipher_mode,
1708 const char *volume_key,
1709 size_t volume_key_size,
1710 struct crypt_params_luks2 *params)
1712 int r, integrity_key_size = 0;
1713 unsigned long required_alignment = DEFAULT_DISK_ALIGNMENT;
1714 unsigned long alignment_offset = 0;
1715 unsigned int sector_size = params ? params->sector_size : SECTOR_SIZE;
1716 const char *integrity = params ? params->integrity : NULL;
1720 cd->u.luks2.hdr.jobj = NULL;
1721 cd->u.luks2.keyslot_cipher = NULL;
1723 if (!cipher || !cipher_mode)
1726 if (!crypt_metadata_device(cd)) {
1727 log_err(cd, _("Can't format LUKS without device."));
1731 if (params && cd->data_offset && params->data_alignment &&
1732 (cd->data_offset % params->data_alignment)) {
1733 log_err(cd, _("Requested data alignment is not compatible with data offset."));
1737 if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE ||
1738 NOTPOW2(sector_size)) {
1739 log_err(cd, _("Unsupported encryption sector size."));
1742 if (sector_size != SECTOR_SIZE && !dm_flags(cd, DM_CRYPT, &dmc_flags) &&
1743 !(dmc_flags & DM_SECTOR_SIZE_SUPPORTED))
1744 log_std(cd, _("WARNING: The device activation will fail, dm-crypt is missing "
1745 "support for requested encryption sector size.\n"));
1748 if (params->integrity_params) {
1749 /* Standalone dm-integrity must not be used */
1750 if (params->integrity_params->integrity ||
1751 params->integrity_params->integrity_key_size)
1753 /* FIXME: journal encryption and MAC is here not yet supported */
1754 if (params->integrity_params->journal_crypt ||
1755 params->integrity_params->journal_integrity)
1758 if (!INTEGRITY_tag_size(cd, integrity, cipher, cipher_mode)) {
1759 if (!strcmp(integrity, "none"))
1764 integrity_key_size = INTEGRITY_key_size(cd, integrity);
1765 if ((integrity_key_size < 0) || (integrity_key_size >= (int)volume_key_size)) {
1766 log_err(cd, _("Volume key is too small for encryption with integrity extensions."));
1771 r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL);
1775 if (!(cd->type = strdup(CRYPT_LUKS2)))
1779 cd->volume_key = crypt_alloc_volume_key(volume_key_size,
1782 cd->volume_key = crypt_generate_volume_key(cd, volume_key_size);
1784 if (!cd->volume_key)
1787 if (params && params->pbkdf)
1788 r = crypt_set_pbkdf_type(cd, params->pbkdf);
1789 else if (verify_pbkdf_params(cd, &cd->pbkdf))
1790 r = init_pbkdf_type(cd, NULL, CRYPT_LUKS2);
1795 if (params && params->data_device) {
1796 if (!cd->metadata_device)
1797 cd->metadata_device = cd->device;
1799 device_free(cd, cd->device);
1801 if (device_alloc(cd, &cd->device, params->data_device) < 0)
1805 if (params && cd->metadata_device) {
1806 /* For detached header the alignment is used directly as data offset */
1807 if (!cd->data_offset)
1808 cd->data_offset = params->data_alignment;
1809 required_alignment = params->data_alignment * SECTOR_SIZE;
1810 } else if (params && params->data_alignment) {
1811 required_alignment = params->data_alignment * SECTOR_SIZE;
1813 device_topology_alignment(cd, cd->device,
1814 &required_alignment,
1815 &alignment_offset, DEFAULT_DISK_ALIGNMENT);
1817 /* FIXME: allow this later also for normal ciphers (check AF_ALG availability. */
1818 if (integrity && !integrity_key_size) {
1819 r = crypt_cipher_check_kernel(cipher, cipher_mode, integrity, volume_key_size);
1821 log_err(cd, _("Cipher %s-%s (key size %zd bits) is not available."),
1822 cipher, cipher_mode, volume_key_size * 8);
1827 if ((!integrity || integrity_key_size) && !crypt_cipher_wrapped_key(cipher, cipher_mode) &&
1828 !INTEGRITY_tag_size(cd, NULL, cipher, cipher_mode)) {
1829 r = LUKS_check_cipher(cd, volume_key_size - integrity_key_size,
1830 cipher, cipher_mode);
1835 r = LUKS2_generate_hdr(cd, &cd->u.luks2.hdr, cd->volume_key,
1836 cipher, cipher_mode,
1839 cd->data_offset * SECTOR_SIZE,
1842 cd->metadata_size, cd->keyslots_size);
1846 r = device_size(crypt_data_device(cd), &dev_size);
1850 if (dev_size < (crypt_get_data_offset(cd) * SECTOR_SIZE))
1851 log_std(cd, _("WARNING: Data offset is outside of currently available data device.\n"));
1853 if (cd->metadata_size && (cd->metadata_size != LUKS2_metadata_size(&cd->u.luks2.hdr)))
1854 log_std(cd, _("WARNING: LUKS2 metadata size changed to %" PRIu64 " bytes.\n"),
1855 LUKS2_metadata_size(&cd->u.luks2.hdr));
1857 if (cd->keyslots_size && (cd->keyslots_size != LUKS2_keyslots_size(&cd->u.luks2.hdr)))
1858 log_std(cd, _("WARNING: LUKS2 keyslots area size changed to %" PRIu64 " bytes.\n"),
1859 LUKS2_keyslots_size(&cd->u.luks2.hdr));
1861 if (!integrity && sector_size > SECTOR_SIZE) {
1862 dev_size -= (crypt_get_data_offset(cd) * SECTOR_SIZE);
1863 if (dev_size % sector_size) {
1864 log_err(cd, _("Device size is not aligned to requested sector size."));
1870 if (params && (params->label || params->subsystem)) {
1871 r = LUKS2_hdr_labels(cd, &cd->u.luks2.hdr,
1872 params->label, params->subsystem, 0);
1877 r = LUKS2_wipe_header_areas(cd, &cd->u.luks2.hdr, cd->metadata_device != NULL);
1879 log_err(cd, _("Cannot wipe header on device %s."),
1880 mdata_device_path(cd));
1881 if (dev_size < LUKS2_hdr_and_areas_size(&cd->u.luks2.hdr))
1882 log_err(cd, _("Device %s is too small."), device_path(crypt_metadata_device(cd)));
1886 /* Wipe integrity superblock and create integrity superblock */
1887 if (crypt_get_integrity_tag_size(cd)) {
1888 r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_ZERO,
1889 crypt_get_data_offset(cd) * SECTOR_SIZE,
1890 8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL);
1893 log_err(cd, _("Cannot format device %s in use."),
1894 data_device_path(cd));
1895 else if (r == -EACCES) {
1896 log_err(cd, _("Cannot format device %s, permission denied."),
1897 data_device_path(cd));
1900 log_err(cd, _("Cannot wipe header on device %s."),
1901 data_device_path(cd));
1906 r = INTEGRITY_format(cd, params ? params->integrity_params : NULL, NULL, NULL);
1908 log_err(cd, _("Cannot format integrity for device %s."),
1909 data_device_path(cd));
1915 /* override sequence id check with format */
1916 r = LUKS2_hdr_write_force(cd, &cd->u.luks2.hdr);
1919 log_err(cd, _("Cannot format device %s in use."),
1920 mdata_device_path(cd));
1921 else if (r == -EACCES) {
1922 log_err(cd, _("Cannot format device %s, permission denied."),
1923 mdata_device_path(cd));
1926 log_err(cd, _("Cannot format device %s."),
1927 mdata_device_path(cd));
1932 LUKS2_hdr_free(cd, &cd->u.luks2.hdr);
1937 static int _crypt_format_loopaes(struct crypt_device *cd,
1940 size_t volume_key_size,
1941 struct crypt_params_loopaes *params)
1943 if (!crypt_metadata_device(cd)) {
1944 log_err(cd, _("Can't format LOOPAES without device."));
1948 if (volume_key_size > 1024) {
1949 log_err(cd, _("Invalid key size."));
1954 log_err(cd, _("UUID is not supported for this crypt type."));
1958 if (cd->metadata_device) {
1959 log_err(cd, _("Detached metadata device is not supported for this crypt type."));
1963 if (!(cd->type = strdup(CRYPT_LOOPAES)))
1966 cd->u.loopaes.key_size = volume_key_size;
1968 cd->u.loopaes.cipher = strdup(cipher ?: DEFAULT_LOOPAES_CIPHER);
1970 if (params && params->hash)
1971 cd->u.loopaes.hdr.hash = strdup(params->hash);
1973 cd->u.loopaes.hdr.offset = params ? params->offset : 0;
1974 cd->u.loopaes.hdr.skip = params ? params->skip : 0;
1979 static int _crypt_format_verity(struct crypt_device *cd,
1981 struct crypt_params_verity *params)
1983 int r = 0, hash_size;
1984 uint64_t data_device_size, hash_blocks_size;
1985 struct device *fec_device = NULL;
1986 char *fec_device_path = NULL, *hash_name = NULL, *root_hash = NULL, *salt = NULL;
1988 if (!crypt_metadata_device(cd)) {
1989 log_err(cd, _("Can't format VERITY without device."));
1996 if (!params->data_device && !cd->metadata_device)
1999 if (params->hash_type > VERITY_MAX_HASH_TYPE) {
2000 log_err(cd, _("Unsupported VERITY hash type %d."), params->hash_type);
2004 if (VERITY_BLOCK_SIZE_OK(params->data_block_size) ||
2005 VERITY_BLOCK_SIZE_OK(params->hash_block_size)) {
2006 log_err(cd, _("Unsupported VERITY block size."));
2010 if (MISALIGNED_512(params->hash_area_offset)) {
2011 log_err(cd, _("Unsupported VERITY hash offset."));
2015 if (MISALIGNED_512(params->fec_area_offset)) {
2016 log_err(cd, _("Unsupported VERITY FEC offset."));
2020 if (!(cd->type = strdup(CRYPT_VERITY)))
2023 if (params->data_device) {
2024 r = crypt_set_data_device(cd, params->data_device);
2029 if (!params->data_size) {
2030 r = device_size(cd->device, &data_device_size);
2034 cd->u.verity.hdr.data_size = data_device_size / params->data_block_size;
2036 cd->u.verity.hdr.data_size = params->data_size;
2038 if (device_is_identical(crypt_metadata_device(cd), crypt_data_device(cd)) > 0 &&
2039 (cd->u.verity.hdr.data_size * params->data_block_size) > params->hash_area_offset) {
2040 log_err(cd, _("Data area overlaps with hash area."));
2044 hash_size = crypt_hash_size(params->hash_name);
2045 if (hash_size <= 0) {
2046 log_err(cd, _("Hash algorithm %s not supported."),
2050 cd->u.verity.root_hash_size = hash_size;
2052 if (params->fec_device) {
2053 fec_device_path = strdup(params->fec_device);
2054 if (!fec_device_path)
2056 r = device_alloc(cd, &fec_device, params->fec_device);
2062 hash_blocks_size = VERITY_hash_blocks(cd, params) * params->hash_block_size;
2063 if (device_is_identical(crypt_metadata_device(cd), fec_device) > 0 &&
2064 (params->hash_area_offset + hash_blocks_size) > params->fec_area_offset) {
2065 log_err(cd, _("Hash area overlaps with FEC area."));
2070 if (device_is_identical(crypt_data_device(cd), fec_device) > 0 &&
2071 (cd->u.verity.hdr.data_size * params->data_block_size) > params->fec_area_offset) {
2072 log_err(cd, _("Data area overlaps with FEC area."));
2078 root_hash = malloc(cd->u.verity.root_hash_size);
2079 hash_name = strdup(params->hash_name);
2080 salt = malloc(params->salt_size);
2082 if (!root_hash || !hash_name || !salt) {
2087 cd->u.verity.hdr.flags = params->flags;
2088 cd->u.verity.root_hash = root_hash;
2089 cd->u.verity.hdr.hash_name = hash_name;
2090 cd->u.verity.hdr.data_device = NULL;
2091 cd->u.verity.fec_device = fec_device;
2092 cd->u.verity.hdr.fec_device = fec_device_path;
2093 cd->u.verity.hdr.fec_roots = params->fec_roots;
2094 cd->u.verity.hdr.data_block_size = params->data_block_size;
2095 cd->u.verity.hdr.hash_block_size = params->hash_block_size;
2096 cd->u.verity.hdr.hash_area_offset = params->hash_area_offset;
2097 cd->u.verity.hdr.fec_area_offset = params->fec_area_offset;
2098 cd->u.verity.hdr.hash_type = params->hash_type;
2099 cd->u.verity.hdr.flags = params->flags;
2100 cd->u.verity.hdr.salt_size = params->salt_size;
2101 cd->u.verity.hdr.salt = salt;
2104 memcpy(salt, params->salt, params->salt_size);
2106 r = crypt_random_get(cd, salt, params->salt_size, CRYPT_RND_SALT);
2110 if (params->flags & CRYPT_VERITY_CREATE_HASH) {
2111 r = VERITY_create(cd, &cd->u.verity.hdr,
2112 cd->u.verity.root_hash, cd->u.verity.root_hash_size);
2113 if (!r && params->fec_device)
2114 r = VERITY_FEC_process(cd, &cd->u.verity.hdr, cd->u.verity.fec_device, 0, NULL);
2119 if (!(params->flags & CRYPT_VERITY_NO_HEADER)) {
2121 if (!(cd->u.verity.uuid = strdup(uuid)))
2124 r = VERITY_UUID_generate(cd, &cd->u.verity.uuid);
2127 r = VERITY_write_sb(cd, cd->u.verity.hdr.hash_area_offset,
2134 device_free(cd, fec_device);
2137 free(fec_device_path);
2144 static int _crypt_format_integrity(struct crypt_device *cd,
2146 struct crypt_params_integrity *params)
2149 uint32_t integrity_tag_size;
2150 char *integrity = NULL, *journal_integrity = NULL, *journal_crypt = NULL;
2151 struct volume_key *journal_crypt_key = NULL, *journal_mac_key = NULL;
2157 log_err(cd, _("UUID is not supported for this crypt type."));
2161 r = device_check_access(cd, crypt_metadata_device(cd), DEV_EXCL);
2165 /* Wipe first 8 sectors - fs magic numbers etc. */
2166 r = crypt_wipe_device(cd, crypt_metadata_device(cd), CRYPT_WIPE_ZERO, 0,
2167 8 * SECTOR_SIZE, 8 * SECTOR_SIZE, NULL, NULL);
2169 log_err(cd, _("Cannot wipe header on device %s."),
2170 mdata_device_path(cd));
2174 if (!(cd->type = strdup(CRYPT_INTEGRITY)))
2177 if (params->journal_crypt_key) {
2178 journal_crypt_key = crypt_alloc_volume_key(params->journal_crypt_key_size,
2179 params->journal_crypt_key);
2180 if (!journal_crypt_key)
2184 if (params->journal_integrity_key) {
2185 journal_mac_key = crypt_alloc_volume_key(params->journal_integrity_key_size,
2186 params->journal_integrity_key);
2187 if (!journal_mac_key) {
2193 if (params->integrity && !(integrity = strdup(params->integrity))) {
2197 if (params->journal_integrity && !(journal_integrity = strdup(params->journal_integrity))) {
2201 if (params->journal_crypt && !(journal_crypt = strdup(params->journal_crypt))) {
2206 integrity_tag_size = INTEGRITY_hash_tag_size(integrity);
2207 if (integrity_tag_size > 0 && params->tag_size && integrity_tag_size != params->tag_size)
2208 log_std(cd, _("WARNING: Requested tag size %d bytes differs from %s size output (%d bytes).\n"),
2209 params->tag_size, integrity, integrity_tag_size);
2211 if (params->tag_size)
2212 integrity_tag_size = params->tag_size;
2214 cd->u.integrity.journal_crypt_key = journal_crypt_key;
2215 cd->u.integrity.journal_mac_key = journal_mac_key;
2216 cd->u.integrity.params.journal_size = params->journal_size;
2217 cd->u.integrity.params.journal_watermark = params->journal_watermark;
2218 cd->u.integrity.params.journal_commit_time = params->journal_commit_time;
2219 cd->u.integrity.params.interleave_sectors = params->interleave_sectors;
2220 cd->u.integrity.params.buffer_sectors = params->buffer_sectors;
2221 cd->u.integrity.params.sector_size = params->sector_size;
2222 cd->u.integrity.params.tag_size = integrity_tag_size;
2223 cd->u.integrity.params.integrity = integrity;
2224 cd->u.integrity.params.journal_integrity = journal_integrity;
2225 cd->u.integrity.params.journal_crypt = journal_crypt;
2227 r = INTEGRITY_format(cd, params, cd->u.integrity.journal_crypt_key, cd->u.integrity.journal_mac_key);
2229 log_err(cd, _("Cannot format integrity for device %s."),
2230 mdata_device_path(cd));
2233 crypt_free_volume_key(journal_crypt_key);
2234 crypt_free_volume_key(journal_mac_key);
2236 free(journal_integrity);
2237 free(journal_crypt);
2243 int crypt_format(struct crypt_device *cd,
2246 const char *cipher_mode,
2248 const char *volume_key,
2249 size_t volume_key_size,
2258 log_dbg(cd, "Context already formatted as %s.", cd->type);
2262 log_dbg(cd, "Formatting device %s as type %s.", mdata_device_path(cd) ?: "(none)", type);
2264 crypt_reset_null_type(cd);
2266 r = init_crypto(cd);
2271 r = _crypt_format_plain(cd, cipher, cipher_mode,
2272 uuid, volume_key_size, params);
2273 else if (isLUKS1(type))
2274 r = _crypt_format_luks1(cd, cipher, cipher_mode,
2275 uuid, volume_key, volume_key_size, params);
2276 else if (isLUKS2(type))
2277 r = _crypt_format_luks2(cd, cipher, cipher_mode,
2278 uuid, volume_key, volume_key_size, params);
2279 else if (isLOOPAES(type))
2280 r = _crypt_format_loopaes(cd, cipher, uuid, volume_key_size, params);
2281 else if (isVERITY(type))
2282 r = _crypt_format_verity(cd, uuid, params);
2283 else if (isINTEGRITY(type))
2284 r = _crypt_format_integrity(cd, uuid, params);
2286 log_err(cd, _("Unknown crypt device type %s requested."), type);
2291 crypt_set_null_type(cd);
2292 crypt_free_volume_key(cd->volume_key);
2293 cd->volume_key = NULL;
2299 int crypt_repair(struct crypt_device *cd,
2300 const char *requested_type,
2301 void *params __attribute__((unused)))
2308 log_dbg(cd, "Trying to repair %s crypt type from device %s.",
2309 requested_type ?: "any", mdata_device_path(cd) ?: "(none)");
2311 if (!crypt_metadata_device(cd))
2314 if (requested_type && !isLUKS(requested_type))
2317 /* Load with repair */
2318 r = _crypt_load_luks(cd, requested_type, 1, 1);
2322 /* cd->type and header must be set in context */
2323 r = crypt_check_data_device_size(cd);
2325 crypt_set_null_type(cd);
2330 /* compare volume keys */
2331 static int _compare_volume_keys(struct volume_key *svk, unsigned skeyring_only, struct volume_key *tvk, unsigned tkeyring_only)
2335 else if (!svk || !tvk)
2338 if (svk->keylength != tvk->keylength)
2341 if (!skeyring_only && !tkeyring_only)
2342 return memcmp(svk->key, tvk->key, svk->keylength);
2344 if (svk->key_description && tvk->key_description)
2345 return strcmp(svk->key_description, tvk->key_description);
2350 static int _compare_device_types(struct crypt_device *cd,
2351 const struct crypt_dm_active_device *src,
2352 const struct crypt_dm_active_device *tgt)
2355 log_dbg(cd, "Missing device uuid in target device.");
2359 if (isLUKS2(cd->type) && !strncmp("INTEGRITY-", tgt->uuid, strlen("INTEGRITY-"))) {
2360 if (crypt_uuid_cmp(tgt->uuid, src->uuid)) {
2361 log_dbg(cd, "LUKS UUID mismatch.");
2364 } else if (isLUKS(cd->type)) {
2365 if (!src->uuid || strncmp(cd->type, tgt->uuid, strlen(cd->type)) ||
2366 crypt_uuid_cmp(tgt->uuid, src->uuid)) {
2367 log_dbg(cd, "LUKS UUID mismatch.");
2370 } else if (isPLAIN(cd->type) || isLOOPAES(cd->type)) {
2371 if (strncmp(cd->type, tgt->uuid, strlen(cd->type))) {
2372 log_dbg(cd, "Unexpected uuid prefix %s in target device.", tgt->uuid);
2376 log_dbg(cd, "Unsupported device type %s for reload.", cd->type ?: "<empty>");
2383 static int _compare_crypt_devices(struct crypt_device *cd,
2384 const struct dm_target *src,
2385 const struct dm_target *tgt)
2387 /* for crypt devices keys are mandatory */
2388 if (!src->u.crypt.vk || !tgt->u.crypt.vk)
2392 if (!src->u.crypt.cipher || !tgt->u.crypt.cipher)
2394 if (strcmp(src->u.crypt.cipher, tgt->u.crypt.cipher)) {
2395 log_dbg(cd, "Cipher specs do not match.");
2399 if (tgt->u.crypt.vk->keylength == 0 && crypt_is_cipher_null(tgt->u.crypt.cipher))
2400 log_dbg(cd, "Existing device uses cipher null. Skipping key comparison.");
2401 else if (_compare_volume_keys(src->u.crypt.vk, 0, tgt->u.crypt.vk, tgt->u.crypt.vk->key_description != NULL)) {
2402 log_dbg(cd, "Keys in context and target device do not match.");
2406 if (crypt_strcmp(src->u.crypt.integrity, tgt->u.crypt.integrity)) {
2407 log_dbg(cd, "Integrity parameters do not match.");
2411 if (src->u.crypt.offset != tgt->u.crypt.offset ||
2412 src->u.crypt.sector_size != tgt->u.crypt.sector_size ||
2413 src->u.crypt.iv_offset != tgt->u.crypt.iv_offset ||
2414 src->u.crypt.tag_size != tgt->u.crypt.tag_size) {
2415 log_dbg(cd, "Integer parameters do not match.");
2419 if (device_is_identical(src->data_device, tgt->data_device) <= 0) {
2420 log_dbg(cd, "Data devices do not match.");
2427 static int _compare_integrity_devices(struct crypt_device *cd,
2428 const struct dm_target *src,
2429 const struct dm_target *tgt)
2432 * some parameters may be implicit (and set in dm-integrity ctor)
2436 * journal_commit_time
2438 * interleave_sectors
2441 /* check remaining integer values that makes sense */
2442 if (src->u.integrity.tag_size != tgt->u.integrity.tag_size ||
2443 src->u.integrity.offset != tgt->u.integrity.offset ||
2444 src->u.integrity.sector_size != tgt->u.integrity.sector_size) {
2445 log_dbg(cd, "Integer parameters do not match.");
2449 if (crypt_strcmp(src->u.integrity.integrity, tgt->u.integrity.integrity) ||
2450 crypt_strcmp(src->u.integrity.journal_integrity, tgt->u.integrity.journal_integrity) ||
2451 crypt_strcmp(src->u.integrity.journal_crypt, tgt->u.integrity.journal_crypt)) {
2452 log_dbg(cd, "Journal parameters do not match.");
2456 /* unfortunately dm-integrity doesn't support keyring */
2457 if (_compare_volume_keys(src->u.integrity.vk, 0, tgt->u.integrity.vk, 0) ||
2458 _compare_volume_keys(src->u.integrity.journal_integrity_key, 0, tgt->u.integrity.journal_integrity_key, 0) ||
2459 _compare_volume_keys(src->u.integrity.journal_crypt_key, 0, tgt->u.integrity.journal_crypt_key, 0)) {
2460 log_dbg(cd, "Journal keys do not match.");
2464 /* unsupported underneath dm-crypt with auth. encryption */
2465 if (src->u.integrity.meta_device || tgt->u.integrity.meta_device)
2468 if (src->size != tgt->size) {
2469 log_dbg(cd, "Device size parameters do not match.");
2473 if (device_is_identical(src->data_device, tgt->data_device) <= 0) {
2474 log_dbg(cd, "Data devices do not match.");
2481 int crypt_compare_dm_devices(struct crypt_device *cd,
2482 const struct crypt_dm_active_device *src,
2483 const struct crypt_dm_active_device *tgt)
2486 const struct dm_target *s, *t;
2491 r = _compare_device_types(cd, src, tgt);
2500 log_dbg(cd, "segments count mismatch.");
2503 if (s->type != t->type) {
2504 log_dbg(cd, "segment type mismatch.");
2511 r = _compare_crypt_devices(cd, s, t);
2514 r = _compare_integrity_devices(cd, s, t);
2517 r = (s->u.linear.offset == t->u.linear.offset) ? 0 : -EINVAL;
2533 static int _reload_device(struct crypt_device *cd, const char *name,
2534 struct crypt_dm_active_device *sdmd)
2537 struct crypt_dm_active_device tdmd;
2538 struct dm_target *src, *tgt = &tdmd.segment;
2540 if (!cd || !cd->type || !name || !(sdmd->flags & CRYPT_ACTIVATE_REFRESH))
2543 r = dm_query_device(cd, name, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
2544 DM_ACTIVE_UUID | DM_ACTIVE_CRYPT_KEYSIZE |
2545 DM_ACTIVE_CRYPT_KEY, &tdmd);
2547 log_err(cd, _("Device %s is not active."), name);
2551 if (!single_segment(&tdmd) || tgt->type != DM_CRYPT || tgt->u.crypt.tag_size) {
2553 log_err(cd, _("Unsupported parameters on device %s."), name);
2557 r = crypt_compare_dm_devices(cd, sdmd, &tdmd);
2559 log_err(cd, _("Mismatching parameters on device %s."), name);
2563 src = &sdmd->segment;
2565 /* Changing read only flag for active device makes no sense */
2566 if (tdmd.flags & CRYPT_ACTIVATE_READONLY)
2567 sdmd->flags |= CRYPT_ACTIVATE_READONLY;
2569 sdmd->flags &= ~CRYPT_ACTIVATE_READONLY;
2571 if (sdmd->flags & CRYPT_ACTIVATE_KEYRING_KEY) {
2572 r = crypt_volume_key_set_description(tgt->u.crypt.vk, src->u.crypt.vk->key_description);
2576 crypt_free_volume_key(tgt->u.crypt.vk);
2577 tgt->u.crypt.vk = crypt_alloc_volume_key(src->u.crypt.vk->keylength, src->u.crypt.vk->key);
2578 if (!tgt->u.crypt.vk) {
2584 r = device_block_adjust(cd, src->data_device, DEV_OK,
2585 src->u.crypt.offset, &sdmd->size, NULL);
2589 tdmd.flags = sdmd->flags;
2590 tgt->size = tdmd.size = sdmd->size;
2592 r = dm_reload_device(cd, name, &tdmd, 0, 1);
2594 dm_targets_free(cd, &tdmd);
2595 free(CONST_CAST(void*)tdmd.uuid);
2600 static int _reload_device_with_integrity(struct crypt_device *cd,
2604 struct crypt_dm_active_device *sdmd,
2605 struct crypt_dm_active_device *sdmdi)
2608 struct crypt_dm_active_device tdmd, tdmdi = {};
2609 struct dm_target *src, *srci, *tgt = &tdmd.segment, *tgti = &tdmdi.segment;
2610 struct device *data_device = NULL;
2612 if (!cd || !cd->type || !name || !iname || !(sdmd->flags & CRYPT_ACTIVATE_REFRESH))
2615 r = dm_query_device(cd, name, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
2616 DM_ACTIVE_UUID | DM_ACTIVE_CRYPT_KEYSIZE |
2617 DM_ACTIVE_CRYPT_KEY, &tdmd);
2619 log_err(cd, _("Device %s is not active."), name);
2623 if (!single_segment(&tdmd) || tgt->type != DM_CRYPT || !tgt->u.crypt.tag_size) {
2625 log_err(cd, _("Unsupported parameters on device %s."), name);
2629 r = dm_query_device(cd, iname, DM_ACTIVE_DEVICE | DM_ACTIVE_UUID, &tdmdi);
2631 log_err(cd, _("Device %s is not active."), iname);
2636 if (!single_segment(&tdmdi) || tgti->type != DM_INTEGRITY) {
2638 log_err(cd, _("Unsupported parameters on device %s."), iname);
2642 r = crypt_compare_dm_devices(cd, sdmdi, &tdmdi);
2644 log_err(cd, _("Mismatching parameters on device %s."), iname);
2648 src = &sdmd->segment;
2649 srci = &sdmdi->segment;
2651 r = device_alloc(cd, &data_device, ipath);
2655 r = device_block_adjust(cd, srci->data_device, DEV_OK,
2656 srci->u.integrity.offset, &sdmdi->size, NULL);
2660 src->data_device = data_device;
2662 r = crypt_compare_dm_devices(cd, sdmd, &tdmd);
2664 log_err(cd, _("Crypt devices mismatch."));
2668 /* Changing read only flag for active device makes no sense */
2669 if (tdmd.flags & CRYPT_ACTIVATE_READONLY)
2670 sdmd->flags |= CRYPT_ACTIVATE_READONLY;
2672 sdmd->flags &= ~CRYPT_ACTIVATE_READONLY;
2674 if (tdmdi.flags & CRYPT_ACTIVATE_READONLY)
2675 sdmdi->flags |= CRYPT_ACTIVATE_READONLY;
2677 sdmdi->flags &= ~CRYPT_ACTIVATE_READONLY;
2679 if (sdmd->flags & CRYPT_ACTIVATE_KEYRING_KEY) {
2680 r = crypt_volume_key_set_description(tgt->u.crypt.vk, src->u.crypt.vk->key_description);
2684 crypt_free_volume_key(tgt->u.crypt.vk);
2685 tgt->u.crypt.vk = crypt_alloc_volume_key(src->u.crypt.vk->keylength, src->u.crypt.vk->key);
2686 if (!tgt->u.crypt.vk) {
2692 r = device_block_adjust(cd, src->data_device, DEV_OK,
2693 src->u.crypt.offset, &sdmd->size, NULL);
2697 tdmd.flags = sdmd->flags;
2698 tdmd.size = sdmd->size;
2700 if ((r = dm_reload_device(cd, iname, sdmdi, 0, 0))) {
2701 log_err(cd, _("Failed to reload device %s."), iname);
2705 if ((r = dm_reload_device(cd, name, &tdmd, 0, 0))) {
2706 log_err(cd, _("Failed to reload device %s."), name);
2710 if ((r = dm_suspend_device(cd, name, 0))) {
2711 log_err(cd, _("Failed to suspend device %s."), name);
2715 if ((r = dm_suspend_device(cd, iname, 0))) {
2716 log_err(cd, _("Failed to suspend device %s."), iname);
2720 if ((r = dm_resume_device(cd, iname, act2dmflags(sdmdi->flags)))) {
2721 log_err(cd, _("Failed to resume device %s."), iname);
2725 r = dm_resume_device(cd, name, act2dmflags(tdmd.flags));
2730 * This is worst case scenario. We have active underlying dm-integrity device with
2731 * new table but dm-crypt resume failed for some reason. Tear everything down and
2735 log_err(cd, _("Fatal error while reloading device %s (on top of device %s)."), name, iname);
2737 if (dm_error_device(cd, name))
2738 log_err(cd, _("Failed to switch device %s to dm-error."), name);
2739 if (dm_error_device(cd, iname))
2740 log_err(cd, _("Failed to switch device %s to dm-error."), iname);
2744 dm_clear_device(cd, name);
2745 dm_clear_device(cd, iname);
2747 if (dm_status_suspended(cd, name) > 0)
2748 dm_resume_device(cd, name, 0);
2749 if (dm_status_suspended(cd, iname) > 0)
2750 dm_resume_device(cd, iname, 0);
2752 dm_targets_free(cd, &tdmd);
2753 dm_targets_free(cd, &tdmdi);
2754 free(CONST_CAST(void*)tdmdi.uuid);
2755 free(CONST_CAST(void*)tdmd.uuid);
2756 device_free(cd, data_device);
2761 int crypt_resize(struct crypt_device *cd, const char *name, uint64_t new_size)
2763 struct crypt_dm_active_device dmdq, dmd = {};
2764 struct dm_target *tgt = &dmdq.segment;
2768 * FIXME: Also with LUKS2 we must not allow resize when there's
2769 * explicit size stored in metadata (length != "dynamic")
2772 /* Device context type must be initialized */
2773 if (!cd || !cd->type || !name)
2776 if (isTCRYPT(cd->type) || isBITLK(cd->type)) {
2777 log_err(cd, _("This operation is not supported for this device type."));
2781 log_dbg(cd, "Resizing device %s to %" PRIu64 " sectors.", name, new_size);
2783 r = dm_query_device(cd, name, DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY, &dmdq);
2785 log_err(cd, _("Device %s is not active."), name);
2788 if (!single_segment(&dmdq) || tgt->type != DM_CRYPT) {
2789 log_dbg(cd, "Unsupported device table detected in %s.", name);
2794 if ((dmdq.flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_key_in_keyring(cd)) {
2799 if (crypt_key_in_keyring(cd)) {
2800 if (!isLUKS2(cd->type)) {
2804 r = LUKS2_key_description_by_segment(cd, &cd->u.luks2.hdr,
2805 tgt->u.crypt.vk, CRYPT_DEFAULT_SEGMENT);
2809 dmdq.flags |= CRYPT_ACTIVATE_KEYRING_KEY;
2812 if (crypt_loop_device(crypt_get_device_name(cd))) {
2813 log_dbg(cd, "Trying to resize underlying loop device %s.",
2814 crypt_get_device_name(cd));
2815 /* Here we always use default size not new_size */
2816 if (crypt_loop_resize(crypt_get_device_name(cd)))
2817 log_err(cd, _("Cannot resize loop device."));
2820 r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
2821 crypt_get_data_offset(cd), &new_size, &dmdq.flags);
2825 if (MISALIGNED(new_size, tgt->u.crypt.sector_size >> SECTOR_SHIFT)) {
2826 log_err(cd, _("Device size is not aligned to requested sector size."));
2831 if (MISALIGNED(new_size, device_block_size(cd, crypt_data_device(cd)) >> SECTOR_SHIFT)) {
2832 log_err(cd, _("Device size is not aligned to device logical block size."));
2837 dmd.uuid = crypt_get_uuid(cd);
2838 dmd.size = new_size;
2839 dmd.flags = dmdq.flags | CRYPT_ACTIVATE_REFRESH;
2840 r = dm_crypt_target_set(&dmd.segment, 0, new_size, crypt_data_device(cd),
2841 tgt->u.crypt.vk, crypt_get_cipher_spec(cd),
2842 crypt_get_iv_offset(cd), crypt_get_data_offset(cd),
2843 crypt_get_integrity(cd), crypt_get_integrity_tag_size(cd),
2844 crypt_get_sector_size(cd));
2848 if (new_size == dmdq.size) {
2849 log_dbg(cd, "Device has already requested size %" PRIu64
2850 " sectors.", dmdq.size);
2853 if (isTCRYPT(cd->type))
2855 else if (isLUKS2(cd->type))
2856 r = LUKS2_unmet_requirements(cd, &cd->u.luks2.hdr, 0, 0);
2858 r = _reload_device(cd, name, &dmd);
2861 dm_targets_free(cd, &dmd);
2862 dm_targets_free(cd, &dmdq);
2867 int crypt_set_uuid(struct crypt_device *cd, const char *uuid)
2869 const char *active_uuid;
2872 log_dbg(cd, "%s device uuid.", uuid ? "Setting new" : "Refreshing");
2874 if ((r = onlyLUKS(cd)))
2877 active_uuid = crypt_get_uuid(cd);
2879 if (uuid && active_uuid && !strncmp(uuid, active_uuid, UUID_STRING_L)) {
2880 log_dbg(cd, "UUID is the same as requested (%s) for device %s.",
2881 uuid, mdata_device_path(cd));
2886 log_dbg(cd, "Requested new UUID change to %s for %s.", uuid, mdata_device_path(cd));
2888 log_dbg(cd, "Requested new UUID refresh for %s.", mdata_device_path(cd));
2890 if (!crypt_confirm(cd, _("Do you really want to change UUID of device?")))
2893 if (isLUKS1(cd->type))
2894 return LUKS_hdr_uuid_set(&cd->u.luks1.hdr, uuid, cd);
2896 return LUKS2_hdr_uuid(cd, &cd->u.luks2.hdr, uuid);
2899 int crypt_set_label(struct crypt_device *cd, const char *label, const char *subsystem)
2903 log_dbg(cd, "Setting new labels.");
2905 if ((r = onlyLUKS2(cd)))
2908 return LUKS2_hdr_labels(cd, &cd->u.luks2.hdr, label, subsystem, 1);
2911 int crypt_header_backup(struct crypt_device *cd,
2912 const char *requested_type,
2913 const char *backup_file)
2917 if (requested_type && !isLUKS(requested_type))
2923 /* Load with repair */
2924 r = _crypt_load_luks(cd, requested_type, 1, 0);
2928 log_dbg(cd, "Requested header backup of device %s (%s) to "
2929 "file %s.", mdata_device_path(cd), requested_type ?: "any type", backup_file);
2931 if (isLUKS1(cd->type) && (!requested_type || isLUKS1(requested_type)))
2932 r = LUKS_hdr_backup(backup_file, cd);
2933 else if (isLUKS2(cd->type) && (!requested_type || isLUKS2(requested_type)))
2934 r = LUKS2_hdr_backup(cd, &cd->u.luks2.hdr, backup_file);
2941 int crypt_header_restore(struct crypt_device *cd,
2942 const char *requested_type,
2943 const char *backup_file)
2945 struct luks_phdr hdr1;
2946 struct luks2_hdr hdr2;
2949 if (requested_type && !isLUKS(requested_type))
2952 if (!cd || (cd->type && !isLUKS(cd->type)) || !backup_file)
2955 r = init_crypto(cd);
2959 log_dbg(cd, "Requested header restore to device %s (%s) from "
2960 "file %s.", mdata_device_path(cd), requested_type ?: "any type", backup_file);
2962 version = LUKS2_hdr_version_unlocked(cd, backup_file);
2964 (requested_type && version == 1 && !isLUKS1(requested_type)) ||
2965 (requested_type && version == 2 && !isLUKS2(requested_type))) {
2966 log_err(cd, _("Header backup file does not contain compatible LUKS header."));
2970 memset(&hdr2, 0, sizeof(hdr2));
2974 r = LUKS_hdr_restore(backup_file, &hdr1, cd);
2976 r = LUKS2_hdr_restore(cd, &hdr2, backup_file);
2978 crypt_safe_memzero(&hdr1, sizeof(hdr1));
2979 crypt_safe_memzero(&hdr2, sizeof(hdr2));
2980 } else if (isLUKS2(cd->type) && (!requested_type || isLUKS2(requested_type))) {
2981 r = LUKS2_hdr_restore(cd, &cd->u.luks2.hdr, backup_file);
2984 } else if (isLUKS1(cd->type) && (!requested_type || isLUKS1(requested_type)))
2985 r = LUKS_hdr_restore(backup_file, &cd->u.luks1.hdr, cd);
2990 r = _crypt_load_luks(cd, version == 1 ? CRYPT_LUKS1 : CRYPT_LUKS2, 1, 1);
2995 void crypt_free(struct crypt_device *cd)
3000 log_dbg(cd, "Releasing crypt device %s context.", mdata_device_path(cd));
3002 dm_backend_exit(cd);
3003 crypt_free_volume_key(cd->volume_key);
3005 crypt_free_type(cd);
3007 device_free(cd, cd->device);
3008 device_free(cd, cd->metadata_device);
3010 free(CONST_CAST(void*)cd->pbkdf.type);
3011 free(CONST_CAST(void*)cd->pbkdf.hash);
3013 /* Some structures can contain keys (TCRYPT), wipe it */
3014 crypt_safe_memzero(cd, sizeof(*cd));
3018 static char *crypt_get_device_key_description(struct crypt_device *cd, const char *name)
3021 struct crypt_dm_active_device dmd;
3022 struct dm_target *tgt = &dmd.segment;
3024 if (dm_query_device(cd, name, DM_ACTIVE_CRYPT_KEY | DM_ACTIVE_CRYPT_KEYSIZE, &dmd) < 0)
3027 if (single_segment(&dmd) && tgt->type == DM_CRYPT &&
3028 (dmd.flags & CRYPT_ACTIVATE_KEYRING_KEY) && tgt->u.crypt.vk->key_description)
3029 desc = strdup(tgt->u.crypt.vk->key_description);
3031 dm_targets_free(cd, &dmd);
3036 int crypt_suspend(struct crypt_device *cd,
3040 crypt_status_info ci;
3042 uint32_t dmflags = DM_SUSPEND_WIPE_KEY;
3044 /* FIXME: check context uuid matches the dm-crypt device uuid (onlyLUKS branching) */
3049 log_dbg(cd, "Suspending volume %s.", name);
3054 r = crypt_uuid_type_cmp(cd, CRYPT_LUKS1);
3056 r = crypt_uuid_type_cmp(cd, CRYPT_LUKS2);
3058 log_err(cd, _("This operation is supported only for LUKS device."));
3064 ci = crypt_status(NULL, name);
3065 if (ci < CRYPT_ACTIVE) {
3066 log_err(cd, _("Volume %s is not active."), name);
3070 dm_backend_init(cd);
3072 r = dm_status_suspended(cd, name);
3077 log_err(cd, _("Volume %s is already suspended."), name);
3082 key_desc = crypt_get_device_key_description(cd, name);
3084 /* we can't simply wipe wrapped keys */
3085 if (crypt_cipher_wrapped_key(crypt_get_cipher(cd), crypt_get_cipher_mode(cd)))
3086 dmflags &= ~DM_SUSPEND_WIPE_KEY;
3088 r = dm_suspend_device(cd, name, dmflags);
3090 log_err(cd, _("Suspend is not supported for device %s."), name);
3092 log_err(cd, _("Error during suspending device %s."), name);
3094 crypt_drop_keyring_key_by_description(cd, key_desc, LOGON_KEY);
3097 dm_backend_exit(cd);
3101 /* key must be properly verified */
3102 static int resume_by_volume_key(struct crypt_device *cd,
3103 struct volume_key *vk,
3107 struct volume_key *zerokey = NULL;
3109 if (crypt_is_cipher_null(crypt_get_cipher_spec(cd))) {
3110 zerokey = crypt_alloc_volume_key(0, NULL);
3114 } else if (crypt_use_keyring_for_vk(cd)) {
3115 /* LUKS2 path only */
3116 digest = LUKS2_digest_by_segment(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
3119 r = LUKS2_volume_key_load_in_keyring_by_digest(cd,
3120 &cd->u.luks2.hdr, vk, digest);
3125 r = dm_resume_and_reinstate_key(cd, name, vk);
3128 log_err(cd, _("Resume is not supported for device %s."), name);
3130 log_err(cd, _("Error during resuming device %s."), name);
3133 crypt_drop_keyring_key(cd, vk);
3135 crypt_free_volume_key(zerokey);
3140 int crypt_resume_by_passphrase(struct crypt_device *cd,
3143 const char *passphrase,
3144 size_t passphrase_size)
3146 struct volume_key *vk = NULL;
3149 /* FIXME: check context uuid matches the dm-crypt device uuid */
3151 if (!passphrase || !name)
3154 log_dbg(cd, "Resuming volume %s.", name);
3156 if ((r = onlyLUKS(cd)))
3159 r = dm_status_suspended(cd, name);
3164 log_err(cd, _("Volume %s is not suspended."), name);
3168 if (isLUKS1(cd->type))
3169 r = LUKS_open_key_with_hdr(keyslot, passphrase, passphrase_size,
3170 &cd->u.luks1.hdr, &vk, cd);
3172 r = LUKS2_keyslot_open(cd, keyslot, CRYPT_DEFAULT_SEGMENT, passphrase, passphrase_size, &vk);
3179 r = resume_by_volume_key(cd, vk, name);
3181 crypt_free_volume_key(vk);
3182 return r < 0 ? r : keyslot;
3185 int crypt_resume_by_keyfile_device_offset(struct crypt_device *cd,
3188 const char *keyfile,
3189 size_t keyfile_size,
3190 uint64_t keyfile_offset)
3192 struct volume_key *vk = NULL;
3193 char *passphrase_read = NULL;
3194 size_t passphrase_size_read;
3197 /* FIXME: check context uuid matches the dm-crypt device uuid */
3199 if (!name || !keyfile)
3202 log_dbg(cd, "Resuming volume %s.", name);
3204 if ((r = onlyLUKS(cd)))
3207 r = dm_status_suspended(cd, name);
3212 log_err(cd, _("Volume %s is not suspended."), name);
3216 r = crypt_keyfile_device_read(cd, keyfile,
3217 &passphrase_read, &passphrase_size_read,
3218 keyfile_offset, keyfile_size, 0);
3222 if (isLUKS1(cd->type))
3223 r = LUKS_open_key_with_hdr(keyslot, passphrase_read, passphrase_size_read,
3224 &cd->u.luks1.hdr, &vk, cd);
3226 r = LUKS2_keyslot_open(cd, keyslot, CRYPT_DEFAULT_SEGMENT, passphrase_read, passphrase_size_read, &vk);
3228 crypt_safe_free(passphrase_read);
3234 r = resume_by_volume_key(cd, vk, name);
3236 crypt_free_volume_key(vk);
3237 return r < 0 ? r : keyslot;
3240 int crypt_resume_by_keyfile(struct crypt_device *cd,
3243 const char *keyfile,
3244 size_t keyfile_size)
3246 return crypt_resume_by_keyfile_device_offset(cd, name, keyslot,
3247 keyfile, keyfile_size, 0);
3250 int crypt_resume_by_keyfile_offset(struct crypt_device *cd,
3253 const char *keyfile,
3254 size_t keyfile_size,
3255 size_t keyfile_offset)
3257 return crypt_resume_by_keyfile_device_offset(cd, name, keyslot,
3258 keyfile, keyfile_size, keyfile_offset);
3261 int crypt_resume_by_volume_key(struct crypt_device *cd,
3263 const char *volume_key,
3264 size_t volume_key_size)
3266 struct volume_key *vk = NULL;
3269 if (!name || !volume_key)
3272 log_dbg(cd, "Resuming volume %s by volume key.", name);
3274 if ((r = onlyLUKS(cd)))
3277 r = dm_status_suspended(cd, name);
3282 log_err(cd, _("Volume %s is not suspended."), name);
3286 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
3290 if (isLUKS1(cd->type))
3291 r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
3292 else if (isLUKS2(cd->type))
3293 r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
3296 if (r == -EPERM || r == -ENOENT)
3297 log_err(cd, _("Volume key does not match the volume."));
3300 r = resume_by_volume_key(cd, vk, name);
3302 crypt_free_volume_key(vk);
3307 * Keyslot manipulation
3309 int crypt_keyslot_add_by_passphrase(struct crypt_device *cd,
3310 int keyslot, // -1 any
3311 const char *passphrase,
3312 size_t passphrase_size,
3313 const char *new_passphrase,
3314 size_t new_passphrase_size)
3316 int digest, r, active_slots;
3317 struct luks2_keyslot_params params;
3318 struct volume_key *vk = NULL;
3320 log_dbg(cd, "Adding new keyslot, existing passphrase %sprovided,"
3321 "new passphrase %sprovided.",
3322 passphrase ? "" : "not ", new_passphrase ? "" : "not ");
3324 if ((r = onlyLUKS(cd)))
3327 if (!passphrase || !new_passphrase)
3330 r = keyslot_verify_or_find_empty(cd, &keyslot);
3334 if (isLUKS1(cd->type))
3335 active_slots = LUKS_keyslot_active_count(&cd->u.luks1.hdr);
3337 active_slots = LUKS2_keyslot_active_count(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
3338 if (active_slots == 0) {
3339 /* No slots used, try to use pre-generated key in header */
3340 if (cd->volume_key) {
3341 vk = crypt_alloc_volume_key(cd->volume_key->keylength, cd->volume_key->key);
3342 r = vk ? 0 : -ENOMEM;
3344 log_err(cd, _("Cannot add key slot, all slots disabled and no volume key provided."));
3347 } else if (active_slots < 0)
3350 /* Passphrase provided, use it to unlock existing keyslot */
3351 if (isLUKS1(cd->type))
3352 r = LUKS_open_key_with_hdr(CRYPT_ANY_SLOT, passphrase,
3353 passphrase_size, &cd->u.luks1.hdr, &vk, cd);
3355 r = LUKS2_keyslot_open(cd, CRYPT_ANY_SLOT, CRYPT_DEFAULT_SEGMENT, passphrase,
3356 passphrase_size, &vk);
3362 if (isLUKS1(cd->type))
3363 r = LUKS_set_key(keyslot, CONST_CAST(char*)new_passphrase,
3364 new_passphrase_size, &cd->u.luks1.hdr, vk, cd);
3366 r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
3370 r = LUKS2_keyslot_params_default(cd, &cd->u.luks2.hdr, ¶ms);
3373 r = LUKS2_digest_assign(cd, &cd->u.luks2.hdr, keyslot, digest, 1, 0);
3376 r = LUKS2_keyslot_store(cd, &cd->u.luks2.hdr, keyslot,
3377 CONST_CAST(char*)new_passphrase,
3378 new_passphrase_size, vk, ¶ms);
3386 crypt_free_volume_key(vk);
3394 int crypt_keyslot_change_by_passphrase(struct crypt_device *cd,
3397 const char *passphrase,
3398 size_t passphrase_size,
3399 const char *new_passphrase,
3400 size_t new_passphrase_size)
3403 struct luks2_keyslot_params params;
3404 struct volume_key *vk = NULL;
3406 if (!passphrase || !new_passphrase)
3409 log_dbg(cd, "Changing passphrase from old keyslot %d to new %d.",
3410 keyslot_old, keyslot_new);
3412 if ((r = onlyLUKS(cd)))
3415 if (isLUKS1(cd->type))
3416 r = LUKS_open_key_with_hdr(keyslot_old, passphrase, passphrase_size,
3417 &cd->u.luks1.hdr, &vk, cd);
3418 else if (isLUKS2(cd->type)) {
3419 r = LUKS2_keyslot_open(cd, keyslot_old, CRYPT_ANY_SEGMENT, passphrase, passphrase_size, &vk);
3420 /* will fail for keyslots w/o digest. fix if supported in a future */
3422 digest = LUKS2_digest_by_keyslot(&cd->u.luks2.hdr, r);
3431 if (keyslot_old != CRYPT_ANY_SLOT && keyslot_old != r) {
3432 log_dbg(cd, "Keyslot mismatch.");
3437 if (keyslot_new == CRYPT_ANY_SLOT) {
3438 if (isLUKS1(cd->type))
3439 keyslot_new = LUKS_keyslot_find_empty(&cd->u.luks1.hdr);
3440 else if (isLUKS2(cd->type))
3441 keyslot_new = LUKS2_keyslot_find_empty(&cd->u.luks2.hdr);
3442 if (keyslot_new < 0)
3443 keyslot_new = keyslot_old;
3445 log_dbg(cd, "Key change, old slot %d, new slot %d.", keyslot_old, keyslot_new);
3447 if (isLUKS1(cd->type)) {
3448 if (keyslot_old == keyslot_new) {
3449 log_dbg(cd, "Key slot %d is going to be overwritten.", keyslot_old);
3450 (void)crypt_keyslot_destroy(cd, keyslot_old);
3452 r = LUKS_set_key(keyslot_new, new_passphrase, new_passphrase_size,
3453 &cd->u.luks1.hdr, vk, cd);
3454 } else if (isLUKS2(cd->type)) {
3455 r = LUKS2_keyslot_params_default(cd, &cd->u.luks2.hdr, ¶ms);
3459 if (keyslot_old != keyslot_new) {
3460 r = LUKS2_digest_assign(cd, &cd->u.luks2.hdr, keyslot_new, digest, 1, 0);
3463 r = LUKS2_token_assignment_copy(cd, &cd->u.luks2.hdr, keyslot_old, keyslot_new, 0);
3467 log_dbg(cd, "Key slot %d is going to be overwritten.", keyslot_old);
3468 /* FIXME: improve return code so that we can detect area is damaged */
3469 r = LUKS2_keyslot_wipe(cd, &cd->u.luks2.hdr, keyslot_old, 1);
3471 /* (void)crypt_keyslot_destroy(cd, keyslot_old); */
3477 r = LUKS2_keyslot_store(cd, &cd->u.luks2.hdr,
3478 keyslot_new, new_passphrase,
3479 new_passphrase_size, vk, ¶ms);
3483 if (r >= 0 && keyslot_old != keyslot_new)
3484 r = crypt_keyslot_destroy(cd, keyslot_old);
3487 log_err(cd, _("Failed to swap new key slot."));
3489 crypt_free_volume_key(vk);
3497 int crypt_keyslot_add_by_keyfile_device_offset(struct crypt_device *cd,
3499 const char *keyfile,
3500 size_t keyfile_size,
3501 uint64_t keyfile_offset,
3502 const char *new_keyfile,
3503 size_t new_keyfile_size,
3504 uint64_t new_keyfile_offset)
3506 int digest, r, active_slots;
3507 size_t passwordLen, new_passwordLen;
3508 struct luks2_keyslot_params params;
3509 char *password = NULL, *new_password = NULL;
3510 struct volume_key *vk = NULL;
3512 if (!keyfile || !new_keyfile)
3515 log_dbg(cd, "Adding new keyslot, existing keyfile %s, new keyfile %s.",
3516 keyfile, new_keyfile);
3518 if ((r = onlyLUKS(cd)))
3521 r = keyslot_verify_or_find_empty(cd, &keyslot);
3525 if (isLUKS1(cd->type))
3526 active_slots = LUKS_keyslot_active_count(&cd->u.luks1.hdr);
3528 active_slots = LUKS2_keyslot_active_count(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
3529 if (active_slots == 0) {
3530 /* No slots used, try to use pre-generated key in header */
3531 if (cd->volume_key) {
3532 vk = crypt_alloc_volume_key(cd->volume_key->keylength, cd->volume_key->key);
3533 r = vk ? 0 : -ENOMEM;
3535 log_err(cd, _("Cannot add key slot, all slots disabled and no volume key provided."));
3539 r = crypt_keyfile_device_read(cd, keyfile,
3540 &password, &passwordLen,
3541 keyfile_offset, keyfile_size, 0);
3545 if (isLUKS1(cd->type))
3546 r = LUKS_open_key_with_hdr(CRYPT_ANY_SLOT, password, passwordLen,
3547 &cd->u.luks1.hdr, &vk, cd);
3549 r = LUKS2_keyslot_open(cd, CRYPT_ANY_SLOT, CRYPT_DEFAULT_SEGMENT, password, passwordLen, &vk);
3555 r = crypt_keyfile_device_read(cd, new_keyfile,
3556 &new_password, &new_passwordLen,
3557 new_keyfile_offset, new_keyfile_size, 0);
3561 if (isLUKS1(cd->type))
3562 r = LUKS_set_key(keyslot, new_password, new_passwordLen,
3563 &cd->u.luks1.hdr, vk, cd);
3565 r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
3569 r = LUKS2_keyslot_params_default(cd, &cd->u.luks2.hdr, ¶ms);
3572 r = LUKS2_digest_assign(cd, &cd->u.luks2.hdr, keyslot, digest, 1, 0);
3575 r = LUKS2_keyslot_store(cd, &cd->u.luks2.hdr, keyslot,
3576 new_password, new_passwordLen, vk, ¶ms);
3579 crypt_safe_free(password);
3580 crypt_safe_free(new_password);
3581 crypt_free_volume_key(vk);
3589 int crypt_keyslot_add_by_keyfile(struct crypt_device *cd,
3591 const char *keyfile,
3592 size_t keyfile_size,
3593 const char *new_keyfile,
3594 size_t new_keyfile_size)
3596 return crypt_keyslot_add_by_keyfile_device_offset(cd, keyslot,
3597 keyfile, keyfile_size, 0,
3598 new_keyfile, new_keyfile_size, 0);
3601 int crypt_keyslot_add_by_keyfile_offset(struct crypt_device *cd,
3603 const char *keyfile,
3604 size_t keyfile_size,
3605 size_t keyfile_offset,
3606 const char *new_keyfile,
3607 size_t new_keyfile_size,
3608 size_t new_keyfile_offset)
3610 return crypt_keyslot_add_by_keyfile_device_offset(cd, keyslot,
3611 keyfile, keyfile_size, keyfile_offset,
3612 new_keyfile, new_keyfile_size, new_keyfile_offset);
3615 int crypt_keyslot_add_by_volume_key(struct crypt_device *cd,
3617 const char *volume_key,
3618 size_t volume_key_size,
3619 const char *passphrase,
3620 size_t passphrase_size)
3622 struct volume_key *vk = NULL;
3628 log_dbg(cd, "Adding new keyslot %d using volume key.", keyslot);
3630 if ((r = onlyLUKS(cd)))
3633 if (isLUKS2(cd->type))
3634 return crypt_keyslot_add_by_key(cd, keyslot,
3635 volume_key, volume_key_size, passphrase,
3636 passphrase_size, 0);
3638 r = keyslot_verify_or_find_empty(cd, &keyslot);
3643 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
3644 else if (cd->volume_key)
3645 vk = crypt_alloc_volume_key(cd->volume_key->keylength, cd->volume_key->key);
3650 r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
3652 log_err(cd, _("Volume key does not match the volume."));
3654 r = LUKS_set_key(keyslot, passphrase, passphrase_size,
3655 &cd->u.luks1.hdr, vk, cd);
3657 crypt_free_volume_key(vk);
3658 return (r < 0) ? r : keyslot;
3661 int crypt_keyslot_destroy(struct crypt_device *cd, int keyslot)
3663 crypt_keyslot_info ki;
3666 log_dbg(cd, "Destroying keyslot %d.", keyslot);
3668 if ((r = _onlyLUKS(cd, CRYPT_CD_UNRESTRICTED)))
3671 ki = crypt_keyslot_status(cd, keyslot);
3672 if (ki == CRYPT_SLOT_INVALID) {
3673 log_err(cd, _("Key slot %d is invalid."), keyslot);
3677 if (isLUKS1(cd->type)) {
3678 if (ki == CRYPT_SLOT_INACTIVE) {
3679 log_err(cd, _("Keyslot %d is not active."), keyslot);
3682 return LUKS_del_key(keyslot, &cd->u.luks1.hdr, cd);
3685 return LUKS2_keyslot_wipe(cd, &cd->u.luks2.hdr, keyslot, 0);
3688 static int _check_header_data_overlap(struct crypt_device *cd, const char *name)
3690 if (!name || !isLUKS(cd->type))
3693 if (device_is_identical(crypt_data_device(cd), crypt_metadata_device(cd)) <= 0)
3696 /* FIXME: check real header size */
3697 if (crypt_get_data_offset(cd) == 0) {
3698 log_err(cd, _("Device header overlaps with data area."));
3705 static int check_devices(struct crypt_device *cd, const char *name, const char *iname, uint32_t *flags)
3709 if (!flags || !name)
3713 r = dm_status_device(cd, iname);
3714 if (r >= 0 && !(*flags & CRYPT_ACTIVATE_REFRESH))
3716 if (r < 0 && r != -ENODEV)
3719 *flags &= ~CRYPT_ACTIVATE_REFRESH;
3722 r = dm_status_device(cd, name);
3723 if (r >= 0 && !(*flags & CRYPT_ACTIVATE_REFRESH))
3725 if (r < 0 && r != -ENODEV)
3728 *flags &= ~CRYPT_ACTIVATE_REFRESH;
3733 static int _create_device_with_integrity(struct crypt_device *cd,
3734 const char *type, const char *name, const char *iname,
3735 const char *ipath, struct crypt_dm_active_device *dmd,
3736 struct crypt_dm_active_device *dmdi)
3739 enum devcheck device_check;
3740 struct dm_target *tgt;
3741 struct device *device = NULL;
3743 if (!single_segment(dmd))
3746 tgt = &dmd->segment;
3747 if (tgt->type != DM_CRYPT)
3750 device_check = dmd->flags & CRYPT_ACTIVATE_SHARED ? DEV_OK : DEV_EXCL;
3752 r = INTEGRITY_activate_dmd_device(cd, iname, CRYPT_INTEGRITY, dmdi, 0);
3756 r = device_alloc(cd, &device, ipath);
3759 tgt->data_device = device;
3761 r = device_block_adjust(cd, tgt->data_device, device_check,
3762 tgt->u.crypt.offset, &dmd->size, &dmd->flags);
3765 r = dm_create_device(cd, name, type, dmd);
3768 dm_remove_device(cd, iname, 0);
3770 device_free(cd, device);
3774 static int kernel_keyring_support(void)
3776 static unsigned _checked = 0;
3779 _kernel_keyring_supported = keyring_check();
3783 return _kernel_keyring_supported;
3786 static int dmcrypt_keyring_bug(void)
3790 if (kernel_version(&kversion))
3792 return kversion < version(4,15,0,0);
3795 int create_or_reload_device(struct crypt_device *cd, const char *name,
3796 const char *type, struct crypt_dm_active_device *dmd)
3799 enum devcheck device_check;
3800 struct dm_target *tgt;
3802 if (!type || !name || !single_segment(dmd))
3805 tgt = &dmd->segment;
3806 if (tgt->type != DM_CRYPT)
3809 /* drop CRYPT_ACTIVATE_REFRESH flag if any device is inactive */
3810 r = check_devices(cd, name, NULL, &dmd->flags);
3814 if (dmd->flags & CRYPT_ACTIVATE_REFRESH)
3815 r = _reload_device(cd, name, dmd);
3817 device_check = dmd->flags & CRYPT_ACTIVATE_SHARED ? DEV_OK : DEV_EXCL;
3819 r = device_block_adjust(cd, tgt->data_device, device_check,
3820 tgt->u.crypt.offset, &dmd->size, &dmd->flags);
3822 tgt->size = dmd->size;
3823 r = dm_create_device(cd, name, type, dmd);
3830 int create_or_reload_device_with_integrity(struct crypt_device *cd, const char *name,
3831 const char *type, struct crypt_dm_active_device *dmd,
3832 struct crypt_dm_active_device *dmdi)
3835 const char *iname = NULL;
3838 if (!type || !name || !dmd || !dmdi)
3841 if (asprintf(&ipath, "%s/%s_dif", dm_get_dir(), name) < 0)
3843 iname = ipath + strlen(dm_get_dir()) + 1;
3845 /* drop CRYPT_ACTIVATE_REFRESH flag if any device is inactive */
3846 r = check_devices(cd, name, iname, &dmd->flags);
3850 if (dmd->flags & CRYPT_ACTIVATE_REFRESH)
3851 r = _reload_device_with_integrity(cd, name, iname, ipath, dmd, dmdi);
3853 r = _create_device_with_integrity(cd, type, name, iname, ipath, dmd, dmdi);
3860 /* See fixmes in _open_and_activate_luks2 */
3861 int update_reencryption_flag(struct crypt_device *cd, int enable, bool commit);
3863 /* TODO: This function should 1:1 with pre-reencryption code */
3864 static int _open_and_activate(struct crypt_device *cd,
3867 const char *passphrase,
3868 size_t passphrase_size,
3873 struct volume_key *vk = NULL;
3875 r = LUKS2_keyslot_open(cd, keyslot,
3876 (flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) ?
3877 CRYPT_ANY_SEGMENT : CRYPT_DEFAULT_SEGMENT,
3878 passphrase, passphrase_size, &vk);
3883 if (!crypt_use_keyring_for_vk(cd))
3884 use_keyring = false;
3886 use_keyring = ((name && !crypt_is_cipher_null(crypt_get_cipher(cd))) ||
3887 (flags & CRYPT_ACTIVATE_KEYRING_KEY));
3890 r = LUKS2_volume_key_load_in_keyring_by_keyslot(cd,
3891 &cd->u.luks2.hdr, vk, keyslot);
3894 flags |= CRYPT_ACTIVATE_KEYRING_KEY;
3898 r = LUKS2_activate(cd, name, vk, flags);
3901 crypt_drop_keyring_key(cd, vk);
3902 crypt_free_volume_key(vk);
3904 return r < 0 ? r : keyslot;
3907 #if USE_LUKS2_REENCRYPTION
3908 static int load_all_keys(struct crypt_device *cd, struct luks2_hdr *hdr, struct volume_key *vks)
3911 struct volume_key *vk = vks;
3914 r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk));
3917 vk = crypt_volume_key_next(vk);
3923 static int _open_all_keys(struct crypt_device *cd,
3924 struct luks2_hdr *hdr,
3926 const char *passphrase,
3927 size_t passphrase_size,
3929 struct volume_key **vks)
3932 struct volume_key *_vks = NULL;
3933 crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
3935 segment = (flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) ? CRYPT_ANY_SEGMENT : CRYPT_DEFAULT_SEGMENT;
3938 case CRYPT_REENCRYPT_NONE:
3939 r = LUKS2_keyslot_open(cd, keyslot, segment, passphrase, passphrase_size, &_vks);
3941 case CRYPT_REENCRYPT_CLEAN:
3942 case CRYPT_REENCRYPT_CRASH:
3943 if (segment == CRYPT_ANY_SEGMENT)
3944 r = LUKS2_keyslot_open(cd, keyslot, segment, passphrase,
3945 passphrase_size, &_vks);
3947 r = LUKS2_keyslot_open_all_segments(cd, keyslot,
3948 keyslot, passphrase, passphrase_size,
3955 if (keyslot == CRYPT_ANY_SLOT)
3958 if (r >= 0 && (flags & CRYPT_ACTIVATE_KEYRING_KEY))
3959 r = load_all_keys(cd, hdr, _vks);
3962 MOVE_REF(*vks, _vks);
3965 crypt_drop_keyring_key(cd, _vks);
3966 crypt_free_volume_key(_vks);
3968 return r < 0 ? r : keyslot;
3971 static int _open_and_activate_reencrypt_device(struct crypt_device *cd,
3972 struct luks2_hdr *hdr,
3975 const char *passphrase,
3976 size_t passphrase_size,
3980 crypt_reencrypt_info ri;
3981 uint64_t minimal_size, device_size;
3982 struct volume_key *vks = NULL;
3984 struct crypt_lock_handle *reencrypt_lock = NULL;
3986 if (crypt_use_keyring_for_vk(cd))
3987 flags |= CRYPT_ACTIVATE_KEYRING_KEY;
3989 r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
3992 log_err(cd, _("Reencryption in-progress. Cannot activate device."));
3994 log_err(cd, _("Failed to get reencryption lock."));
3998 if ((r = crypt_load(cd, CRYPT_LUKS2, NULL)))
4001 ri = LUKS2_reencrypt_status(hdr);
4003 if (ri == CRYPT_REENCRYPT_CRASH) {
4004 r = LUKS2_reencrypt_locked_recovery_by_passphrase(cd, keyslot,
4005 keyslot, passphrase, passphrase_size, flags, &vks);
4007 log_err(cd, _("LUKS2 reencryption recovery failed."));
4012 ri = LUKS2_reencrypt_status(hdr);
4015 /* recovery finished reencryption or it's already finished */
4016 if (ri == CRYPT_REENCRYPT_NONE) {
4017 crypt_drop_keyring_key(cd, vks);
4018 crypt_free_volume_key(vks);
4019 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
4020 return _open_and_activate(cd, keyslot, name, passphrase, passphrase_size, flags);
4023 if (ri > CRYPT_REENCRYPT_CLEAN) {
4028 if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic_size))
4032 r = _open_all_keys(cd, hdr, keyslot, passphrase, passphrase_size, flags, &vks);
4038 r = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
4043 log_dbg(cd, "Entering clean reencryption state mode.");
4046 r = LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, true, dynamic_size);
4049 r = LUKS2_activate_multi(cd, name, vks, device_size >> SECTOR_SHIFT, flags);
4051 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
4053 crypt_drop_keyring_key(cd, vks);
4054 crypt_free_volume_key(vks);
4056 return r < 0 ? r : keyslot;
4060 * Activation/deactivation of a device
4062 static int _open_and_activate_luks2(struct crypt_device *cd,
4065 const char *passphrase,
4066 size_t passphrase_size,
4069 crypt_reencrypt_info ri;
4071 struct luks2_hdr *hdr = &cd->u.luks2.hdr;
4072 struct volume_key *vks = NULL;
4074 ri = LUKS2_reencrypt_status(hdr);
4075 if (ri == CRYPT_REENCRYPT_INVALID)
4078 if (ri > CRYPT_REENCRYPT_NONE) {
4080 r = _open_and_activate_reencrypt_device(cd, hdr, keyslot, name, passphrase,
4081 passphrase_size, flags);
4083 r = _open_all_keys(cd, hdr, keyslot, passphrase,
4084 passphrase_size, flags, &vks);
4088 rv = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
4089 crypt_free_volume_key(vks);
4094 r = _open_and_activate(cd, keyslot, name, passphrase,
4095 passphrase_size, flags);
4100 static int _open_and_activate_luks2(struct crypt_device *cd,
4103 const char *passphrase,
4104 size_t passphrase_size,
4107 crypt_reencrypt_info ri;
4109 ri = LUKS2_reencrypt_status(&cd->u.luks2.hdr);
4110 if (ri == CRYPT_REENCRYPT_INVALID)
4113 if (ri > CRYPT_REENCRYPT_NONE) {
4114 log_err(cd, _("This operation is not supported for this device type."));
4118 return _open_and_activate(cd, keyslot, name, passphrase, passphrase_size, flags);
4122 static int _activate_by_passphrase(struct crypt_device *cd,
4125 const char *passphrase,
4126 size_t passphrase_size,
4130 struct volume_key *vk = NULL;
4132 if ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd))
4135 if ((flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) && name)
4138 r = _check_header_data_overlap(cd, name);
4142 if (flags & CRYPT_ACTIVATE_SERIALIZE_MEMORY_HARD_PBKDF)
4143 cd->memory_hard_pbkdf_lock_enabled = true;
4145 /* plain, use hashed passphrase */
4146 if (isPLAIN(cd->type)) {
4151 r = process_key(cd, cd->u.plain.hdr.hash,
4152 cd->u.plain.key_size,
4153 passphrase, passphrase_size, &vk);
4157 r = PLAIN_activate(cd, name, vk, cd->u.plain.hdr.size, flags);
4159 } else if (isLUKS1(cd->type)) {
4160 r = LUKS_open_key_with_hdr(keyslot, passphrase,
4161 passphrase_size, &cd->u.luks1.hdr, &vk, cd);
4165 r = LUKS1_activate(cd, name, vk, flags);
4167 } else if (isLUKS2(cd->type)) {
4168 r = _open_and_activate_luks2(cd, keyslot, name, passphrase, passphrase_size, flags);
4170 } else if (isBITLK(cd->type)) {
4171 r = BITLK_activate(cd, name, passphrase, passphrase_size,
4172 &cd->u.bitlk.params, flags);
4175 log_err(cd, _("Device type is not properly initialized."));
4180 crypt_drop_keyring_key(cd, vk);
4181 crypt_free_volume_key(vk);
4183 cd->memory_hard_pbkdf_lock_enabled = false;
4185 return r < 0 ? r : keyslot;
4188 static int _activate_loopaes(struct crypt_device *cd,
4195 unsigned int key_count = 0;
4196 struct volume_key *vk = NULL;
4198 r = LOOPAES_parse_keyfile(cd, &vk, cd->u.loopaes.hdr.hash, &key_count,
4199 buffer, buffer_size);
4202 r = LOOPAES_activate(cd, name, cd->u.loopaes.cipher, key_count,
4205 crypt_free_volume_key(vk);
4210 static int _activate_check_status(struct crypt_device *cd, const char *name, unsigned reload)
4217 r = dm_status_device(cd, name);
4219 if (r >= 0 && reload)
4222 if (r >= 0 || r == -EEXIST) {
4223 log_err(cd, _("Device %s already exists."), name);
4230 log_err(cd, _("Cannot use device %s, name is invalid or still in use."), name);
4234 // activation/deactivation of device mapping
4235 int crypt_activate_by_passphrase(struct crypt_device *cd,
4238 const char *passphrase,
4239 size_t passphrase_size,
4244 if (!cd || !passphrase || (!name && (flags & CRYPT_ACTIVATE_REFRESH)))
4247 log_dbg(cd, "%s volume %s [keyslot %d] using passphrase.",
4248 name ? "Activating" : "Checking", name ?: "passphrase",
4251 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
4255 return _activate_by_passphrase(cd, name, keyslot, passphrase, passphrase_size, flags);
4258 int crypt_activate_by_keyfile_device_offset(struct crypt_device *cd,
4261 const char *keyfile,
4262 size_t keyfile_size,
4263 uint64_t keyfile_offset,
4266 char *passphrase_read = NULL;
4267 size_t passphrase_size_read;
4270 if (!cd || !keyfile ||
4271 ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd)))
4274 log_dbg(cd, "%s volume %s [keyslot %d] using keyfile %s.",
4275 name ? "Activating" : "Checking", name ?: "passphrase", keyslot, keyfile);
4277 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
4281 r = crypt_keyfile_device_read(cd, keyfile,
4282 &passphrase_read, &passphrase_size_read,
4283 keyfile_offset, keyfile_size, 0);
4287 if (isLOOPAES(cd->type))
4288 r = _activate_loopaes(cd, name, passphrase_read, passphrase_size_read, flags);
4290 r = _activate_by_passphrase(cd, name, keyslot, passphrase_read, passphrase_size_read, flags);
4293 crypt_safe_free(passphrase_read);
4297 int crypt_activate_by_keyfile(struct crypt_device *cd,
4300 const char *keyfile,
4301 size_t keyfile_size,
4304 return crypt_activate_by_keyfile_device_offset(cd, name, keyslot, keyfile,
4305 keyfile_size, 0, flags);
4308 int crypt_activate_by_keyfile_offset(struct crypt_device *cd,
4311 const char *keyfile,
4312 size_t keyfile_size,
4313 size_t keyfile_offset,
4316 return crypt_activate_by_keyfile_device_offset(cd, name, keyslot, keyfile,
4317 keyfile_size, keyfile_offset, flags);
4319 int crypt_activate_by_volume_key(struct crypt_device *cd,
4321 const char *volume_key,
4322 size_t volume_key_size,
4326 struct volume_key *vk = NULL;
4330 ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd)))
4333 log_dbg(cd, "%s volume %s by volume key.", name ? "Activating" : "Checking",
4336 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
4340 r = _check_header_data_overlap(cd, name);
4344 /* use key directly, no hash */
4345 if (isPLAIN(cd->type)) {
4349 if (!volume_key || !volume_key_size || volume_key_size != cd->u.plain.key_size) {
4350 log_err(cd, _("Incorrect volume key specified for plain device."));
4354 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4358 r = PLAIN_activate(cd, name, vk, cd->u.plain.hdr.size, flags);
4359 } else if (isLUKS1(cd->type)) {
4360 /* If key is not provided, try to use internal key */
4362 if (!cd->volume_key) {
4363 log_err(cd, _("Volume key does not match the volume."));
4366 volume_key_size = cd->volume_key->keylength;
4367 volume_key = cd->volume_key->key;
4370 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4373 r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
4376 log_err(cd, _("Volume key does not match the volume."));
4379 r = LUKS1_activate(cd, name, vk, flags);
4380 } else if (isLUKS2(cd->type)) {
4381 /* If key is not provided, try to use internal key */
4383 if (!cd->volume_key) {
4384 log_err(cd, _("Volume key does not match the volume."));
4387 volume_key_size = cd->volume_key->keylength;
4388 volume_key = cd->volume_key->key;
4391 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4395 r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
4396 if (r == -EPERM || r == -ENOENT)
4397 log_err(cd, _("Volume key does not match the volume."));
4401 if (!crypt_use_keyring_for_vk(cd))
4402 use_keyring = false;
4404 use_keyring = (name && !crypt_is_cipher_null(crypt_get_cipher(cd))) || (flags & CRYPT_ACTIVATE_KEYRING_KEY);
4406 if (!r && use_keyring) {
4407 r = LUKS2_key_description_by_segment(cd,
4408 &cd->u.luks2.hdr, vk, CRYPT_DEFAULT_SEGMENT);
4410 r = crypt_volume_key_load_in_keyring(cd, vk);
4412 flags |= CRYPT_ACTIVATE_KEYRING_KEY;
4416 r = LUKS2_activate(cd, name, vk, flags);
4417 } else if (isVERITY(cd->type)) {
4418 r = crypt_activate_by_signed_key(cd, name, volume_key, volume_key_size, NULL, 0, flags);
4419 } else if (isTCRYPT(cd->type)) {
4422 r = TCRYPT_activate(cd, name, &cd->u.tcrypt.hdr,
4423 &cd->u.tcrypt.params, flags);
4424 } else if (isINTEGRITY(cd->type)) {
4428 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4432 r = INTEGRITY_activate(cd, name, &cd->u.integrity.params, vk,
4433 cd->u.integrity.journal_crypt_key,
4434 cd->u.integrity.journal_mac_key, flags,
4435 cd->u.integrity.sb_flags);
4437 log_err(cd, _("Device type is not properly initialized."));
4442 crypt_drop_keyring_key(cd, vk);
4443 crypt_free_volume_key(vk);
4448 int crypt_activate_by_signed_key(struct crypt_device *cd,
4450 const char *volume_key,
4451 size_t volume_key_size,
4452 const char *signature,
4453 size_t signature_size,
4456 char description[512];
4459 if (!cd || !isVERITY(cd->type))
4462 if (!volume_key || !volume_key_size || (!name && signature)) {
4463 log_err(cd, _("Incorrect root hash specified for verity device."));
4467 log_dbg(cd, "%s volume %s by %skey.", name ? "Activating" : "Checking", name ?: "", signature ? "signed " : "");
4469 if (cd->u.verity.hdr.flags & CRYPT_VERITY_ROOT_HASH_SIGNATURE && !signature) {
4470 log_err(cd, _("Root hash signature required."));
4474 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
4478 if (signature && !kernel_keyring_support()) {
4479 log_err(cd, _("Kernel keyring missing: required for passing signature to kernel."));
4483 /* volume_key == root hash */
4484 free(CONST_CAST(void*)cd->u.verity.root_hash);
4485 cd->u.verity.root_hash = NULL;
4488 r = snprintf(description, sizeof(description)-1, "cryptsetup:%s%s%s",
4489 crypt_get_uuid(cd) ?: "", crypt_get_uuid(cd) ? "-" : "", name);
4493 log_dbg(cd, "Adding signature into keyring %s", description);
4494 r = keyring_add_key_in_thread_keyring(USER_KEY, description, signature, signature_size);
4496 log_err(cd, _("Failed to load key in kernel keyring."));
4501 r = VERITY_activate(cd, name, volume_key, volume_key_size,
4502 signature ? description : NULL,
4503 cd->u.verity.fec_device,
4504 &cd->u.verity.hdr, flags | CRYPT_ACTIVATE_READONLY);
4507 cd->u.verity.root_hash_size = volume_key_size;
4508 cd->u.verity.root_hash = malloc(volume_key_size);
4509 if (cd->u.verity.root_hash)
4510 memcpy(CONST_CAST(void*)cd->u.verity.root_hash, volume_key, volume_key_size);
4514 crypt_drop_keyring_key_by_description(cd, description, USER_KEY);
4519 int crypt_deactivate_by_name(struct crypt_device *cd, const char *name, uint32_t flags)
4521 struct crypt_device *fake_cd = NULL;
4522 struct luks2_hdr *hdr2 = NULL;
4523 struct crypt_dm_active_device dmd = {};
4525 uint32_t get_flags = DM_ACTIVE_DEVICE | DM_ACTIVE_UUID | DM_ACTIVE_HOLDERS;
4530 log_dbg(cd, "Deactivating volume %s.", name);
4533 r = crypt_init_by_name(&fake_cd, name);
4539 /* skip holders detection and early abort when some flags raised */
4540 if (flags & (CRYPT_DEACTIVATE_FORCE | CRYPT_DEACTIVATE_DEFERRED))
4541 get_flags &= ~DM_ACTIVE_HOLDERS;
4543 switch (crypt_status(cd, name)) {
4546 r = dm_query_device(cd, name, get_flags, &dmd);
4549 log_err(cd, _("Device %s is still in use."), name);
4555 if (isLUKS2(cd->type))
4556 hdr2 = crypt_get_hdr(cd, CRYPT_LUKS2);
4558 if ((dmd.uuid && !strncmp(CRYPT_LUKS2, dmd.uuid, sizeof(CRYPT_LUKS2)-1)) || hdr2)
4559 r = LUKS2_deactivate(cd, name, hdr2, &dmd, flags);
4560 else if (isTCRYPT(cd->type))
4561 r = TCRYPT_deactivate(cd, name, flags);
4563 r = dm_remove_device(cd, name, flags);
4564 if (r < 0 && crypt_status(cd, name) == CRYPT_BUSY) {
4565 log_err(cd, _("Device %s is still in use."), name);
4569 case CRYPT_INACTIVE:
4570 log_err(cd, _("Device %s is not active."), name);
4574 log_err(cd, _("Invalid device %s."), name);
4578 dm_targets_free(cd, &dmd);
4579 free(CONST_CAST(void*)dmd.uuid);
4580 crypt_free(fake_cd);
4585 int crypt_deactivate(struct crypt_device *cd, const char *name)
4587 return crypt_deactivate_by_name(cd, name, 0);
4590 int crypt_get_active_device(struct crypt_device *cd, const char *name,
4591 struct crypt_active_device *cad)
4594 struct crypt_dm_active_device dmd, dmdi = {};
4595 const char *namei = NULL;
4596 struct dm_target *tgt = &dmd.segment;
4597 uint64_t min_offset = UINT64_MAX;
4599 if (!cd || !name || !cad)
4602 r = dm_query_device(cd, name, DM_ACTIVE_DEVICE, &dmd);
4606 /* For LUKS2 with integrity we need flags from underlying dm-integrity */
4607 if (isLUKS2(cd->type) && crypt_get_integrity_tag_size(cd) && single_segment(&dmd)) {
4608 namei = device_dm_name(tgt->data_device);
4609 if (namei && dm_query_device(cd, namei, 0, &dmdi) >= 0)
4610 dmd.flags |= dmdi.flags;
4613 if (cd && isTCRYPT(cd->type)) {
4614 cad->offset = TCRYPT_get_data_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
4615 cad->iv_offset = TCRYPT_get_iv_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
4618 if (tgt->type == DM_CRYPT && (min_offset > tgt->u.crypt.offset)) {
4619 min_offset = tgt->u.crypt.offset;
4620 cad->iv_offset = tgt->u.crypt.iv_offset;
4621 } else if (tgt->type == DM_INTEGRITY && (min_offset > tgt->u.integrity.offset)) {
4622 min_offset = tgt->u.integrity.offset;
4624 } else if (tgt->type == DM_LINEAR && (min_offset > tgt->u.linear.offset)) {
4625 min_offset = tgt->u.linear.offset;
4632 if (min_offset != UINT64_MAX)
4633 cad->offset = min_offset;
4635 cad->size = dmd.size;
4636 cad->flags = dmd.flags;
4639 dm_targets_free(cd, &dmd);
4640 dm_targets_free(cd, &dmdi);
4645 uint64_t crypt_get_active_integrity_failures(struct crypt_device *cd, const char *name)
4647 struct crypt_dm_active_device dmd;
4648 uint64_t failures = 0;
4653 /* FIXME: LUKS2 / dm-crypt does not provide this count. */
4654 if (dm_query_device(cd, name, 0, &dmd) < 0)
4657 if (single_segment(&dmd) && dmd.segment.type == DM_INTEGRITY)
4658 (void)dm_status_integrity_failures(cd, name, &failures);
4660 dm_targets_free(cd, &dmd);
4666 * Volume key handling
4668 int crypt_volume_key_get(struct crypt_device *cd,
4671 size_t *volume_key_size,
4672 const char *passphrase,
4673 size_t passphrase_size)
4675 struct volume_key *vk = NULL;
4676 int key_len, r = -EINVAL;
4678 if (!cd || !volume_key || !volume_key_size || (!isTCRYPT(cd->type) && !isVERITY(cd->type) && !passphrase))
4681 if (isLUKS2(cd->type) && keyslot != CRYPT_ANY_SLOT)
4682 key_len = LUKS2_get_keyslot_stored_key_size(&cd->u.luks2.hdr, keyslot);
4684 key_len = crypt_get_volume_key_size(cd);
4689 if (key_len > (int)*volume_key_size) {
4690 log_err(cd, _("Volume key buffer too small."));
4694 if (isPLAIN(cd->type) && cd->u.plain.hdr.hash) {
4695 r = process_key(cd, cd->u.plain.hdr.hash, key_len,
4696 passphrase, passphrase_size, &vk);
4698 log_err(cd, _("Cannot retrieve volume key for plain device."));
4699 } else if (isLUKS1(cd->type)) {
4700 r = LUKS_open_key_with_hdr(keyslot, passphrase,
4701 passphrase_size, &cd->u.luks1.hdr, &vk, cd);
4702 } else if (isLUKS2(cd->type)) {
4703 r = LUKS2_keyslot_open(cd, keyslot,
4704 keyslot == CRYPT_ANY_SLOT ? CRYPT_DEFAULT_SEGMENT : CRYPT_ANY_SEGMENT,
4705 passphrase, passphrase_size, &vk);
4706 } else if (isTCRYPT(cd->type)) {
4707 r = TCRYPT_get_volume_key(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params, &vk);
4708 } else if (isVERITY(cd->type)) {
4709 /* volume_key == root hash */
4710 if (cd->u.verity.root_hash) {
4711 memcpy(volume_key, cd->u.verity.root_hash, cd->u.verity.root_hash_size);
4712 *volume_key_size = cd->u.verity.root_hash_size;
4715 log_err(cd, _("Cannot retrieve root hash for verity device."));
4717 log_err(cd, _("This operation is not supported for %s crypt device."), cd->type ?: "(none)");
4720 memcpy(volume_key, vk->key, vk->keylength);
4721 *volume_key_size = vk->keylength;
4724 crypt_free_volume_key(vk);
4728 int crypt_volume_key_verify(struct crypt_device *cd,
4729 const char *volume_key,
4730 size_t volume_key_size)
4732 struct volume_key *vk;
4735 if ((r = _onlyLUKS(cd, CRYPT_CD_UNRESTRICTED)))
4738 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
4742 if (isLUKS1(cd->type))
4743 r = LUKS_verify_volume_key(&cd->u.luks1.hdr, vk);
4744 else if (isLUKS2(cd->type))
4745 r = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
4751 log_err(cd, _("Volume key does not match the volume."));
4753 crypt_free_volume_key(vk);
4755 return r >= 0 ? 0 : r;
4759 * RNG and memory locking
4761 void crypt_set_rng_type(struct crypt_device *cd, int rng_type)
4767 case CRYPT_RNG_URANDOM:
4768 case CRYPT_RNG_RANDOM:
4769 log_dbg(cd, "RNG set to %d (%s).", rng_type, rng_type ? "random" : "urandom");
4770 cd->rng_type = rng_type;
4774 int crypt_get_rng_type(struct crypt_device *cd)
4779 return cd->rng_type;
4782 int crypt_memory_lock(struct crypt_device *cd, int lock)
4784 return lock ? crypt_memlock_inc(cd) : crypt_memlock_dec(cd);
4787 void crypt_set_compatibility(struct crypt_device *cd, uint32_t flags)
4790 cd->compatibility = flags;
4793 uint32_t crypt_get_compatibility(struct crypt_device *cd)
4796 return cd->compatibility;
4804 crypt_status_info crypt_status(struct crypt_device *cd, const char *name)
4809 return CRYPT_INVALID;
4812 dm_backend_init(cd);
4814 r = dm_status_device(cd, name);
4817 dm_backend_exit(cd);
4819 if (r < 0 && r != -ENODEV)
4820 return CRYPT_INVALID;
4823 return CRYPT_ACTIVE;
4828 return CRYPT_INACTIVE;
4831 static void hexprint(struct crypt_device *cd, const char *d, int n, const char *sep)
4834 for(i = 0; i < n; i++)
4835 log_std(cd, "%02hhx%s", (const char)d[i], sep);
4838 static int _luks_dump(struct crypt_device *cd)
4842 log_std(cd, "LUKS header information for %s\n\n", mdata_device_path(cd));
4843 log_std(cd, "Version: \t%" PRIu16 "\n", cd->u.luks1.hdr.version);
4844 log_std(cd, "Cipher name: \t%s\n", cd->u.luks1.hdr.cipherName);
4845 log_std(cd, "Cipher mode: \t%s\n", cd->u.luks1.hdr.cipherMode);
4846 log_std(cd, "Hash spec: \t%s\n", cd->u.luks1.hdr.hashSpec);
4847 log_std(cd, "Payload offset:\t%" PRIu32 "\n", cd->u.luks1.hdr.payloadOffset);
4848 log_std(cd, "MK bits: \t%" PRIu32 "\n", cd->u.luks1.hdr.keyBytes * 8);
4849 log_std(cd, "MK digest: \t");
4850 hexprint(cd, cd->u.luks1.hdr.mkDigest, LUKS_DIGESTSIZE, " ");
4852 log_std(cd, "MK salt: \t");
4853 hexprint(cd, cd->u.luks1.hdr.mkDigestSalt, LUKS_SALTSIZE/2, " ");
4854 log_std(cd, "\n \t");
4855 hexprint(cd, cd->u.luks1.hdr.mkDigestSalt+LUKS_SALTSIZE/2, LUKS_SALTSIZE/2, " ");
4857 log_std(cd, "MK iterations: \t%" PRIu32 "\n", cd->u.luks1.hdr.mkDigestIterations);
4858 log_std(cd, "UUID: \t%s\n\n", cd->u.luks1.hdr.uuid);
4859 for(i = 0; i < LUKS_NUMKEYS; i++) {
4860 if(cd->u.luks1.hdr.keyblock[i].active == LUKS_KEY_ENABLED) {
4861 log_std(cd, "Key Slot %d: ENABLED\n",i);
4862 log_std(cd, "\tIterations: \t%" PRIu32 "\n",
4863 cd->u.luks1.hdr.keyblock[i].passwordIterations);
4864 log_std(cd, "\tSalt: \t");
4865 hexprint(cd, cd->u.luks1.hdr.keyblock[i].passwordSalt,
4866 LUKS_SALTSIZE/2, " ");
4867 log_std(cd, "\n\t \t");
4868 hexprint(cd, cd->u.luks1.hdr.keyblock[i].passwordSalt +
4869 LUKS_SALTSIZE/2, LUKS_SALTSIZE/2, " ");
4872 log_std(cd, "\tKey material offset:\t%" PRIu32 "\n",
4873 cd->u.luks1.hdr.keyblock[i].keyMaterialOffset);
4874 log_std(cd, "\tAF stripes: \t%" PRIu32 "\n",
4875 cd->u.luks1.hdr.keyblock[i].stripes);
4878 log_std(cd, "Key Slot %d: DISABLED\n", i);
4883 static int _verity_dump(struct crypt_device *cd)
4885 log_std(cd, "VERITY header information for %s\n", mdata_device_path(cd));
4886 log_std(cd, "UUID: \t%s\n", cd->u.verity.uuid ?: "");
4887 log_std(cd, "Hash type: \t%u\n", cd->u.verity.hdr.hash_type);
4888 log_std(cd, "Data blocks: \t%" PRIu64 "\n", cd->u.verity.hdr.data_size);
4889 log_std(cd, "Data block size: \t%u\n", cd->u.verity.hdr.data_block_size);
4890 log_std(cd, "Hash block size: \t%u\n", cd->u.verity.hdr.hash_block_size);
4891 log_std(cd, "Hash algorithm: \t%s\n", cd->u.verity.hdr.hash_name);
4892 log_std(cd, "Salt: \t");
4893 if (cd->u.verity.hdr.salt_size)
4894 hexprint(cd, cd->u.verity.hdr.salt, cd->u.verity.hdr.salt_size, "");
4898 if (cd->u.verity.root_hash) {
4899 log_std(cd, "Root hash: \t");
4900 hexprint(cd, cd->u.verity.root_hash, cd->u.verity.root_hash_size, "");
4906 int crypt_dump(struct crypt_device *cd)
4910 if (isLUKS1(cd->type))
4911 return _luks_dump(cd);
4912 else if (isLUKS2(cd->type))
4913 return LUKS2_hdr_dump(cd, &cd->u.luks2.hdr);
4914 else if (isVERITY(cd->type))
4915 return _verity_dump(cd);
4916 else if (isTCRYPT(cd->type))
4917 return TCRYPT_dump(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
4918 else if (isINTEGRITY(cd->type))
4919 return INTEGRITY_dump(cd, crypt_data_device(cd), 0);
4920 else if (isBITLK(cd->type))
4921 return BITLK_dump(cd, crypt_data_device(cd), &cd->u.bitlk.params);
4923 log_err(cd, _("Dump operation is not supported for this device type."));
4928 const char *crypt_get_cipher_spec(struct crypt_device *cd)
4932 else if (isLUKS2(cd->type))
4933 return LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
4934 else if (isLUKS1(cd->type))
4935 return cd->u.luks1.cipher_spec;
4936 else if (isPLAIN(cd->type))
4937 return cd->u.plain.cipher_spec;
4938 else if (isLOOPAES(cd->type))
4939 return cd->u.loopaes.cipher_spec;
4940 else if (isBITLK(cd->type))
4941 return cd->u.bitlk.cipher_spec;
4942 else if (!cd->type && !_init_by_name_crypt_none(cd))
4943 return cd->u.none.cipher_spec;
4948 const char *crypt_get_cipher(struct crypt_device *cd)
4953 if (isPLAIN(cd->type))
4954 return cd->u.plain.cipher;
4956 if (isLUKS1(cd->type))
4957 return cd->u.luks1.hdr.cipherName;
4959 if (isLUKS2(cd->type)) {
4960 if (crypt_parse_name_and_mode(LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT),
4961 cd->u.luks2.cipher, NULL, cd->u.luks2.cipher_mode))
4963 return cd->u.luks2.cipher;
4966 if (isLOOPAES(cd->type))
4967 return cd->u.loopaes.cipher;
4969 if (isTCRYPT(cd->type))
4970 return cd->u.tcrypt.params.cipher;
4972 if (isBITLK(cd->type))
4973 return cd->u.bitlk.params.cipher;
4975 if (!cd->type && !_init_by_name_crypt_none(cd))
4976 return cd->u.none.cipher;
4981 const char *crypt_get_cipher_mode(struct crypt_device *cd)
4986 if (isPLAIN(cd->type))
4987 return cd->u.plain.cipher_mode;
4989 if (isLUKS1(cd->type))
4990 return cd->u.luks1.hdr.cipherMode;
4992 if (isLUKS2(cd->type)) {
4993 if (crypt_parse_name_and_mode(LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT),
4994 cd->u.luks2.cipher, NULL, cd->u.luks2.cipher_mode))
4996 return cd->u.luks2.cipher_mode;
4999 if (isLOOPAES(cd->type))
5000 return cd->u.loopaes.cipher_mode;
5002 if (isTCRYPT(cd->type))
5003 return cd->u.tcrypt.params.mode;
5005 if (isBITLK(cd->type))
5006 return cd->u.bitlk.params.cipher_mode;
5008 if (!cd->type && !_init_by_name_crypt_none(cd))
5009 return cd->u.none.cipher_mode;
5015 const char *crypt_get_integrity(struct crypt_device *cd)
5020 if (isINTEGRITY(cd->type))
5021 return cd->u.integrity.params.integrity;
5023 if (isLUKS2(cd->type))
5024 return LUKS2_get_integrity(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
5030 int crypt_get_integrity_key_size(struct crypt_device *cd)
5032 if (isINTEGRITY(cd->type))
5033 return INTEGRITY_key_size(cd, crypt_get_integrity(cd));
5035 if (isLUKS2(cd->type))
5036 return INTEGRITY_key_size(cd, crypt_get_integrity(cd));
5042 int crypt_get_integrity_tag_size(struct crypt_device *cd)
5044 if (isINTEGRITY(cd->type))
5045 return cd->u.integrity.params.tag_size;
5047 if (isLUKS2(cd->type))
5048 return INTEGRITY_tag_size(cd, crypt_get_integrity(cd),
5049 crypt_get_cipher(cd),
5050 crypt_get_cipher_mode(cd));
5054 int crypt_get_sector_size(struct crypt_device *cd)
5059 if (isPLAIN(cd->type))
5060 return cd->u.plain.hdr.sector_size;
5062 if (isINTEGRITY(cd->type))
5063 return cd->u.integrity.params.sector_size;
5065 if (isLUKS2(cd->type))
5066 return LUKS2_get_sector_size(&cd->u.luks2.hdr);
5071 const char *crypt_get_uuid(struct crypt_device *cd)
5076 if (isLUKS1(cd->type))
5077 return cd->u.luks1.hdr.uuid;
5079 if (isLUKS2(cd->type))
5080 return cd->u.luks2.hdr.uuid;
5082 if (isVERITY(cd->type))
5083 return cd->u.verity.uuid;
5085 if (isBITLK(cd->type))
5086 return cd->u.bitlk.params.guid;
5091 const char *crypt_get_device_name(struct crypt_device *cd)
5098 path = device_block_path(cd->device);
5100 path = device_path(cd->device);
5105 const char *crypt_get_metadata_device_name(struct crypt_device *cd)
5109 if (!cd || !cd->metadata_device)
5112 path = device_block_path(cd->metadata_device);
5114 path = device_path(cd->metadata_device);
5119 int crypt_get_volume_key_size(struct crypt_device *cd)
5126 if (isPLAIN(cd->type))
5127 return cd->u.plain.key_size;
5129 if (isLUKS1(cd->type))
5130 return cd->u.luks1.hdr.keyBytes;
5132 if (isLUKS2(cd->type)) {
5133 r = LUKS2_get_volume_key_size(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
5134 if (r < 0 && cd->volume_key)
5135 r = cd->volume_key->keylength;
5136 return r < 0 ? 0 : r;
5139 if (isLOOPAES(cd->type))
5140 return cd->u.loopaes.key_size;
5142 if (isVERITY(cd->type))
5143 return cd->u.verity.root_hash_size;
5145 if (isTCRYPT(cd->type))
5146 return cd->u.tcrypt.params.key_size;
5148 if (isBITLK(cd->type))
5149 return cd->u.bitlk.params.key_size / 8;
5151 if (!cd->type && !_init_by_name_crypt_none(cd))
5152 return cd->u.none.key_size;
5157 int crypt_keyslot_get_key_size(struct crypt_device *cd, int keyslot)
5159 if (!cd || !isLUKS(cd->type))
5162 if (keyslot < 0 || keyslot >= crypt_keyslot_max(cd->type))
5165 if (isLUKS1(cd->type))
5166 return cd->u.luks1.hdr.keyBytes;
5168 if (isLUKS2(cd->type))
5169 return LUKS2_get_keyslot_stored_key_size(&cd->u.luks2.hdr, keyslot);
5174 int crypt_keyslot_set_encryption(struct crypt_device *cd,
5180 if (!cd || !cipher || ! key_size || !isLUKS2(cd->type))
5183 if (LUKS2_keyslot_cipher_incompatible(cd, cipher))
5186 tmp = strdup(cipher);
5187 free(cd->u.luks2.keyslot_cipher);
5188 cd->u.luks2.keyslot_cipher = tmp;
5189 if (!cd->u.luks2.keyslot_cipher)
5191 cd->u.luks2.keyslot_key_size = key_size;
5196 const char *crypt_keyslot_get_encryption(struct crypt_device *cd, int keyslot, size_t *key_size)
5200 if (!cd || !isLUKS(cd->type) || !key_size)
5203 if (isLUKS1(cd->type)) {
5204 if (keyslot != CRYPT_ANY_SLOT &&
5205 LUKS_keyslot_info(&cd->u.luks1.hdr, keyslot) < CRYPT_SLOT_ACTIVE)
5207 *key_size = crypt_get_volume_key_size(cd);
5208 return cd->u.luks1.cipher_spec;
5211 if (keyslot != CRYPT_ANY_SLOT)
5212 return LUKS2_get_keyslot_cipher(&cd->u.luks2.hdr, keyslot, key_size);
5214 /* Keyslot encryption was set through crypt_keyslot_set_encryption() */
5215 if (cd->u.luks2.keyslot_cipher) {
5216 *key_size = cd->u.luks2.keyslot_key_size;
5217 return cd->u.luks2.keyslot_cipher;
5220 /* Try to reuse volume encryption parameters */
5221 cipher = LUKS2_get_cipher(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
5222 if (!LUKS2_keyslot_cipher_incompatible(cd, cipher)) {
5223 *key_size = crypt_get_volume_key_size(cd);
5228 /* Fallback to default LUKS2 keyslot encryption */
5229 *key_size = DEFAULT_LUKS2_KEYSLOT_KEYBITS / 8;
5230 return DEFAULT_LUKS2_KEYSLOT_CIPHER;
5233 int crypt_keyslot_get_pbkdf(struct crypt_device *cd, int keyslot, struct crypt_pbkdf_type *pbkdf)
5235 if (!cd || !pbkdf || keyslot == CRYPT_ANY_SLOT)
5238 if (isLUKS1(cd->type))
5239 return LUKS_keyslot_pbkdf(&cd->u.luks1.hdr, keyslot, pbkdf);
5240 else if (isLUKS2(cd->type))
5241 return LUKS2_keyslot_pbkdf(&cd->u.luks2.hdr, keyslot, pbkdf);
5246 int crypt_set_data_offset(struct crypt_device *cd, uint64_t data_offset)
5250 if (data_offset % (MAX_SECTOR_SIZE >> SECTOR_SHIFT)) {
5251 log_err(cd, _("Data offset is not multiple of %u bytes."), MAX_SECTOR_SIZE);
5255 cd->data_offset = data_offset;
5256 log_dbg(cd, "Data offset set to %" PRIu64 " (512-byte) sectors.", data_offset);
5261 int crypt_set_metadata_size(struct crypt_device *cd,
5262 uint64_t metadata_size,
5263 uint64_t keyslots_size)
5268 if (cd->type && !isLUKS2(cd->type))
5271 if (metadata_size && LUKS2_check_metadata_area_size(metadata_size))
5274 if (keyslots_size && LUKS2_check_keyslots_area_size(keyslots_size))
5277 cd->metadata_size = metadata_size;
5278 cd->keyslots_size = keyslots_size;
5283 int crypt_get_metadata_size(struct crypt_device *cd,
5284 uint64_t *metadata_size,
5285 uint64_t *keyslots_size)
5287 uint64_t msize, ksize;
5293 msize = cd->metadata_size;
5294 ksize = cd->keyslots_size;
5295 } else if (isLUKS1(cd->type)) {
5296 msize = LUKS_ALIGN_KEYSLOTS;
5297 ksize = LUKS_device_sectors(&cd->u.luks1.hdr) * SECTOR_SIZE - msize;
5298 } else if (isLUKS2(cd->type)) {
5299 msize = LUKS2_metadata_size(&cd->u.luks2.hdr);
5300 ksize = LUKS2_keyslots_size(&cd->u.luks2.hdr);
5305 *metadata_size = msize;
5307 *keyslots_size = ksize;
5312 uint64_t crypt_get_data_offset(struct crypt_device *cd)
5317 if (isPLAIN(cd->type))
5318 return cd->u.plain.hdr.offset;
5320 if (isLUKS1(cd->type))
5321 return cd->u.luks1.hdr.payloadOffset;
5323 if (isLUKS2(cd->type))
5324 return LUKS2_get_data_offset(&cd->u.luks2.hdr);
5326 if (isLOOPAES(cd->type))
5327 return cd->u.loopaes.hdr.offset;
5329 if (isTCRYPT(cd->type))
5330 return TCRYPT_get_data_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
5332 if (isBITLK(cd->type))
5333 return cd->u.bitlk.params.volume_header_size / SECTOR_SIZE;
5335 return cd->data_offset;
5338 uint64_t crypt_get_iv_offset(struct crypt_device *cd)
5343 if (isPLAIN(cd->type))
5344 return cd->u.plain.hdr.skip;
5346 if (isLOOPAES(cd->type))
5347 return cd->u.loopaes.hdr.skip;
5349 if (isTCRYPT(cd->type))
5350 return TCRYPT_get_iv_offset(cd, &cd->u.tcrypt.hdr, &cd->u.tcrypt.params);
5355 crypt_keyslot_info crypt_keyslot_status(struct crypt_device *cd, int keyslot)
5357 if (_onlyLUKS(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED) < 0)
5358 return CRYPT_SLOT_INVALID;
5360 if (isLUKS1(cd->type))
5361 return LUKS_keyslot_info(&cd->u.luks1.hdr, keyslot);
5362 else if(isLUKS2(cd->type))
5363 return LUKS2_keyslot_info(&cd->u.luks2.hdr, keyslot);
5365 return CRYPT_SLOT_INVALID;
5368 int crypt_keyslot_max(const char *type)
5370 if (type && isLUKS1(type))
5371 return LUKS_NUMKEYS;
5373 if (type && isLUKS2(type))
5374 return LUKS2_KEYSLOTS_MAX;
5379 int crypt_keyslot_area(struct crypt_device *cd,
5384 if (_onlyLUKS(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED) || !offset || !length)
5387 if (isLUKS2(cd->type))
5388 return LUKS2_keyslot_area(&cd->u.luks2.hdr, keyslot, offset, length);
5390 return LUKS_keyslot_area(&cd->u.luks1.hdr, keyslot, offset, length);
5393 crypt_keyslot_priority crypt_keyslot_get_priority(struct crypt_device *cd, int keyslot)
5395 if (_onlyLUKS(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED))
5396 return CRYPT_SLOT_PRIORITY_INVALID;
5398 if (keyslot < 0 || keyslot >= crypt_keyslot_max(cd->type))
5399 return CRYPT_SLOT_PRIORITY_INVALID;
5401 if (isLUKS2(cd->type))
5402 return LUKS2_keyslot_priority_get(cd, &cd->u.luks2.hdr, keyslot);
5404 return CRYPT_SLOT_PRIORITY_NORMAL;
5407 int crypt_keyslot_set_priority(struct crypt_device *cd, int keyslot, crypt_keyslot_priority priority)
5411 log_dbg(cd, "Setting keyslot %d to priority %d.", keyslot, priority);
5413 if (priority == CRYPT_SLOT_PRIORITY_INVALID)
5416 if (keyslot < 0 || keyslot >= crypt_keyslot_max(cd->type))
5419 if ((r = onlyLUKS2(cd)))
5422 return LUKS2_keyslot_priority_set(cd, &cd->u.luks2.hdr, keyslot, priority, 1);
5425 const char *crypt_get_type(struct crypt_device *cd)
5427 return cd ? cd->type : NULL;
5430 const char *crypt_get_default_type(void)
5432 return DEFAULT_LUKS_FORMAT;
5435 int crypt_get_verity_info(struct crypt_device *cd,
5436 struct crypt_params_verity *vp)
5438 if (!cd || !isVERITY(cd->type) || !vp)
5441 vp->data_device = device_path(cd->device);
5442 vp->hash_device = mdata_device_path(cd);
5443 vp->fec_device = device_path(cd->u.verity.fec_device);
5444 vp->fec_area_offset = cd->u.verity.hdr.fec_area_offset;
5445 vp->fec_roots = cd->u.verity.hdr.fec_roots;
5446 vp->hash_name = cd->u.verity.hdr.hash_name;
5447 vp->salt = cd->u.verity.hdr.salt;
5448 vp->salt_size = cd->u.verity.hdr.salt_size;
5449 vp->data_block_size = cd->u.verity.hdr.data_block_size;
5450 vp->hash_block_size = cd->u.verity.hdr.hash_block_size;
5451 vp->data_size = cd->u.verity.hdr.data_size;
5452 vp->hash_area_offset = cd->u.verity.hdr.hash_area_offset;
5453 vp->hash_type = cd->u.verity.hdr.hash_type;
5454 vp->flags = cd->u.verity.hdr.flags & (CRYPT_VERITY_NO_HEADER | CRYPT_VERITY_ROOT_HASH_SIGNATURE);
5458 int crypt_get_integrity_info(struct crypt_device *cd,
5459 struct crypt_params_integrity *ip)
5464 if (isINTEGRITY(cd->type)) {
5465 ip->journal_size = cd->u.integrity.params.journal_size;
5466 ip->journal_watermark = cd->u.integrity.params.journal_watermark;
5467 ip->journal_commit_time = cd->u.integrity.params.journal_commit_time;
5468 ip->interleave_sectors = cd->u.integrity.params.interleave_sectors;
5469 ip->tag_size = cd->u.integrity.params.tag_size;
5470 ip->sector_size = cd->u.integrity.params.sector_size;
5471 ip->buffer_sectors = cd->u.integrity.params.buffer_sectors;
5473 ip->integrity = cd->u.integrity.params.integrity;
5474 ip->integrity_key_size = crypt_get_integrity_key_size(cd);
5476 ip->journal_integrity = cd->u.integrity.params.journal_integrity;
5477 ip->journal_integrity_key_size = cd->u.integrity.params.journal_integrity_key_size;
5478 ip->journal_integrity_key = NULL;
5480 ip->journal_crypt = cd->u.integrity.params.journal_crypt;
5481 ip->journal_crypt_key_size = cd->u.integrity.params.journal_crypt_key_size;
5482 ip->journal_crypt_key = NULL;
5484 } else if (isLUKS2(cd->type)) {
5485 ip->journal_size = 0; // FIXME
5486 ip->journal_watermark = 0; // FIXME
5487 ip->journal_commit_time = 0; // FIXME
5488 ip->interleave_sectors = 0; // FIXME
5489 ip->sector_size = crypt_get_sector_size(cd);
5490 ip->buffer_sectors = 0; // FIXME
5492 ip->integrity = LUKS2_get_integrity(&cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT);
5493 ip->integrity_key_size = crypt_get_integrity_key_size(cd);
5494 ip->tag_size = INTEGRITY_tag_size(cd, ip->integrity, crypt_get_cipher(cd), crypt_get_cipher_mode(cd));
5496 ip->journal_integrity = NULL;
5497 ip->journal_integrity_key_size = 0;
5498 ip->journal_integrity_key = NULL;
5500 ip->journal_crypt = NULL;
5501 ip->journal_crypt_key_size = 0;
5502 ip->journal_crypt_key = NULL;
5509 int crypt_convert(struct crypt_device *cd,
5513 struct luks_phdr hdr1;
5514 struct luks2_hdr hdr2;
5520 log_dbg(cd, "Converting LUKS device to type %s", type);
5522 if ((r = onlyLUKS(cd)))
5525 if (isLUKS1(cd->type) && isLUKS2(type))
5526 r = LUKS2_luks1_to_luks2(cd, &cd->u.luks1.hdr, &hdr2);
5527 else if (isLUKS2(cd->type) && isLUKS1(type))
5528 r = LUKS2_luks2_to_luks1(cd, &cd->u.luks2.hdr, &hdr1);
5533 /* in-memory header may be invalid after failed conversion */
5536 log_err(cd, _("Cannot convert device %s which is still in use."), mdata_device_path(cd));
5540 crypt_free_type(cd);
5542 return crypt_load(cd, type, params);
5545 /* Internal access function to header pointer */
5546 void *crypt_get_hdr(struct crypt_device *cd, const char *type)
5548 /* If requested type differs, ignore it */
5549 if (strcmp(cd->type, type))
5552 if (isPLAIN(cd->type))
5553 return &cd->u.plain;
5555 if (isLUKS1(cd->type))
5556 return &cd->u.luks1.hdr;
5558 if (isLUKS2(cd->type))
5559 return &cd->u.luks2.hdr;
5561 if (isLOOPAES(cd->type))
5562 return &cd->u.loopaes;
5564 if (isVERITY(cd->type))
5565 return &cd->u.verity;
5567 if (isTCRYPT(cd->type))
5568 return &cd->u.tcrypt;
5574 struct luks2_reencrypt *crypt_get_luks2_reencrypt(struct crypt_device *cd)
5576 return cd->u.luks2.rh;
5580 void crypt_set_luks2_reencrypt(struct crypt_device *cd, struct luks2_reencrypt *rh)
5582 cd->u.luks2.rh = rh;
5588 int crypt_activate_by_token(struct crypt_device *cd,
5589 const char *name, int token, void *usrptr, uint32_t flags)
5593 log_dbg(cd, "%s volume %s using token %d.",
5594 name ? "Activating" : "Checking", name ?: "passphrase", token);
5596 if ((r = _onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0)))
5599 if ((flags & CRYPT_ACTIVATE_KEYRING_KEY) && !crypt_use_keyring_for_vk(cd))
5602 if ((flags & CRYPT_ACTIVATE_ALLOW_UNBOUND_KEY) && name)
5605 if (token == CRYPT_ANY_TOKEN)
5606 return LUKS2_token_open_and_activate_any(cd, &cd->u.luks2.hdr, name, flags);
5608 return LUKS2_token_open_and_activate(cd, &cd->u.luks2.hdr, token, name, flags, usrptr);
5611 int crypt_token_json_get(struct crypt_device *cd, int token, const char **json)
5618 log_dbg(cd, "Requesting JSON for token %d.", token);
5620 if ((r = _onlyLUKS2(cd, CRYPT_CD_UNRESTRICTED, 0)))
5623 return LUKS2_token_json_get(cd, &cd->u.luks2.hdr, token, json) ?: token;
5626 int crypt_token_json_set(struct crypt_device *cd, int token, const char *json)
5630 log_dbg(cd, "Updating JSON for token %d.", token);
5632 if ((r = onlyLUKS2(cd)))
5635 return LUKS2_token_create(cd, &cd->u.luks2.hdr, token, json, 1);
5638 crypt_token_info crypt_token_status(struct crypt_device *cd, int token, const char **type)
5640 if (_onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0))
5641 return CRYPT_TOKEN_INVALID;
5643 return LUKS2_token_status(cd, &cd->u.luks2.hdr, token, type);
5646 int crypt_token_luks2_keyring_get(struct crypt_device *cd,
5648 struct crypt_token_params_luks2_keyring *params)
5650 crypt_token_info token_info;
5657 log_dbg(cd, "Requesting LUKS2 keyring token %d.", token);
5659 if ((r = _onlyLUKS2(cd, CRYPT_CD_UNRESTRICTED, 0)))
5662 token_info = LUKS2_token_status(cd, &cd->u.luks2.hdr, token, &type);
5663 switch (token_info) {
5664 case CRYPT_TOKEN_INVALID:
5665 log_dbg(cd, "Token %d is invalid.", token);
5667 case CRYPT_TOKEN_INACTIVE:
5668 log_dbg(cd, "Token %d is inactive.", token);
5670 case CRYPT_TOKEN_INTERNAL:
5671 if (!strcmp(type, LUKS2_TOKEN_KEYRING))
5674 case CRYPT_TOKEN_INTERNAL_UNKNOWN:
5675 case CRYPT_TOKEN_EXTERNAL:
5676 case CRYPT_TOKEN_EXTERNAL_UNKNOWN:
5677 log_dbg(cd, "Token %d has unexpected type %s.", token, type);
5681 return LUKS2_builtin_token_get(cd, &cd->u.luks2.hdr, token, LUKS2_TOKEN_KEYRING, params);
5684 int crypt_token_luks2_keyring_set(struct crypt_device *cd,
5686 const struct crypt_token_params_luks2_keyring *params)
5693 log_dbg(cd, "Creating new LUKS2 keyring token (%d).", token);
5695 if ((r = onlyLUKS2(cd)))
5698 return LUKS2_builtin_token_create(cd, &cd->u.luks2.hdr, token, LUKS2_TOKEN_KEYRING, params, 1);
5701 int crypt_token_assign_keyslot(struct crypt_device *cd, int token, int keyslot)
5705 if ((r = onlyLUKS2(cd)))
5708 return LUKS2_token_assign(cd, &cd->u.luks2.hdr, keyslot, token, 1, 1);
5711 int crypt_token_unassign_keyslot(struct crypt_device *cd, int token, int keyslot)
5715 if ((r = onlyLUKS2(cd)))
5718 return LUKS2_token_assign(cd, &cd->u.luks2.hdr, keyslot, token, 0, 1);
5721 int crypt_token_is_assigned(struct crypt_device *cd, int token, int keyslot)
5725 if ((r = _onlyLUKS2(cd, CRYPT_CD_QUIET | CRYPT_CD_UNRESTRICTED, 0)))
5728 return LUKS2_token_is_assigned(cd, &cd->u.luks2.hdr, keyslot, token);
5732 int crypt_metadata_locking_enabled(void)
5734 return _metadata_locking;
5737 int crypt_metadata_locking(struct crypt_device *cd, int enable)
5739 if (enable && !_metadata_locking)
5742 _metadata_locking = enable ? 1 : 0;
5746 int crypt_persistent_flags_set(struct crypt_device *cd, crypt_flags_type type, uint32_t flags)
5750 if ((r = onlyLUKS2(cd)))
5753 if (type == CRYPT_FLAGS_ACTIVATION)
5754 return LUKS2_config_set_flags(cd, &cd->u.luks2.hdr, flags);
5756 if (type == CRYPT_FLAGS_REQUIREMENTS)
5757 return LUKS2_config_set_requirements(cd, &cd->u.luks2.hdr, flags, true);
5762 int crypt_persistent_flags_get(struct crypt_device *cd, crypt_flags_type type, uint32_t *flags)
5769 if ((r = _onlyLUKS2(cd, CRYPT_CD_UNRESTRICTED, 0)))
5772 if (type == CRYPT_FLAGS_ACTIVATION)
5773 return LUKS2_config_get_flags(cd, &cd->u.luks2.hdr, flags);
5775 if (type == CRYPT_FLAGS_REQUIREMENTS)
5776 return LUKS2_config_get_requirements(cd, &cd->u.luks2.hdr, flags);
5781 static int update_volume_key_segment_digest(struct crypt_device *cd, struct luks2_hdr *hdr, int digest, int commit)
5785 /* Remove any assignments in memory */
5786 r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_DEFAULT_SEGMENT, CRYPT_ANY_DIGEST, 0, 0);
5790 /* Assign it to the specific digest */
5791 return LUKS2_digest_segment_assign(cd, hdr, CRYPT_DEFAULT_SEGMENT, digest, 1, commit);
5794 static int verify_and_update_segment_digest(struct crypt_device *cd,
5795 struct luks2_hdr *hdr, int keyslot,
5796 const char *volume_key, size_t volume_key_size,
5797 const char *password, size_t password_size)
5800 struct volume_key *vk = NULL;
5802 if (keyslot < 0 || (volume_key && !volume_key_size))
5806 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
5808 r = LUKS2_keyslot_open(cd, keyslot, CRYPT_ANY_SEGMENT, password, password_size, &vk);
5818 /* check volume_key (param) digest matches keyslot digest */
5819 r = LUKS2_digest_verify(cd, hdr, vk, keyslot);
5824 /* nothing to do, volume key in keyslot is already assigned to default segment */
5825 r = LUKS2_digest_verify_by_segment(cd, hdr, CRYPT_DEFAULT_SEGMENT, vk);
5829 /* FIXME: check new volume key is usable with current default segment */
5831 r = update_volume_key_segment_digest(cd, &cd->u.luks2.hdr, digest, 1);
5833 log_err(cd, _("Failed to assign keyslot %u as the new volume key."), keyslot);
5835 crypt_free_volume_key(vk);
5836 return r < 0 ? r : keyslot;
5840 int crypt_keyslot_add_by_key(struct crypt_device *cd,
5842 const char *volume_key,
5843 size_t volume_key_size,
5844 const char *passphrase,
5845 size_t passphrase_size,
5849 struct luks2_keyslot_params params;
5850 struct volume_key *vk = NULL;
5852 if (!passphrase || ((flags & CRYPT_VOLUME_KEY_NO_SEGMENT) &&
5853 (flags & CRYPT_VOLUME_KEY_SET)))
5856 log_dbg(cd, "Adding new keyslot %d with volume key %sassigned to a crypt segment.",
5857 keyslot, flags & CRYPT_VOLUME_KEY_NO_SEGMENT ? "un" : "");
5859 if ((r = onlyLUKS2(cd)))
5862 /* new volume key assignment */
5863 if ((flags & CRYPT_VOLUME_KEY_SET) && crypt_keyslot_status(cd, keyslot) > CRYPT_SLOT_INACTIVE)
5864 return verify_and_update_segment_digest(cd, &cd->u.luks2.hdr,
5865 keyslot, volume_key, volume_key_size, passphrase, passphrase_size);
5867 r = keyslot_verify_or_find_empty(cd, &keyslot);
5872 vk = crypt_alloc_volume_key(volume_key_size, volume_key);
5873 else if (flags & CRYPT_VOLUME_KEY_NO_SEGMENT)
5874 vk = crypt_generate_volume_key(cd, volume_key_size);
5875 else if (cd->volume_key)
5876 vk = crypt_alloc_volume_key(cd->volume_key->keylength, cd->volume_key->key);
5883 /* if key matches volume key digest tear down new vk flag */
5884 digest = LUKS2_digest_verify_by_segment(cd, &cd->u.luks2.hdr, CRYPT_DEFAULT_SEGMENT, vk);
5886 flags &= ~CRYPT_VOLUME_KEY_SET;
5888 /* if key matches any existing digest, do not create new digest */
5889 if (digest < 0 && (flags & CRYPT_VOLUME_KEY_DIGEST_REUSE))
5890 digest = LUKS2_digest_any_matching(cd, &cd->u.luks2.hdr, vk);
5892 /* no segment flag or new vk flag requires new key digest */
5893 if (flags & (CRYPT_VOLUME_KEY_NO_SEGMENT | CRYPT_VOLUME_KEY_SET)) {
5894 if (digest < 0 || !(flags & CRYPT_VOLUME_KEY_DIGEST_REUSE))
5895 digest = LUKS2_digest_create(cd, "pbkdf2", &cd->u.luks2.hdr, vk);
5900 log_err(cd, _("Volume key does not match the volume."));
5904 r = LUKS2_keyslot_params_default(cd, &cd->u.luks2.hdr, ¶ms);
5906 log_err(cd, _("Failed to initialize default LUKS2 keyslot parameters."));
5910 r = LUKS2_digest_assign(cd, &cd->u.luks2.hdr, keyslot, digest, 1, 0);
5912 log_err(cd, _("Failed to assign keyslot %d to digest."), keyslot);
5916 r = LUKS2_keyslot_store(cd, &cd->u.luks2.hdr, keyslot,
5917 passphrase, passphrase_size, vk, ¶ms);
5919 if (r >= 0 && (flags & CRYPT_VOLUME_KEY_SET))
5920 r = update_volume_key_segment_digest(cd, &cd->u.luks2.hdr, digest, 1);
5922 crypt_free_volume_key(vk);
5934 int crypt_use_keyring_for_vk(struct crypt_device *cd)
5938 /* dm backend must be initialized */
5939 if (!cd || !isLUKS2(cd->type))
5942 if (!_vk_via_keyring || !kernel_keyring_support())
5945 if (dm_flags(cd, DM_CRYPT, &dmc_flags))
5946 return dmcrypt_keyring_bug() ? 0 : 1;
5948 return (dmc_flags & DM_KERNEL_KEYRING_SUPPORTED);
5951 int crypt_volume_key_keyring(struct crypt_device *cd, int enable)
5953 _vk_via_keyring = enable ? 1 : 0;
5958 int crypt_volume_key_load_in_keyring(struct crypt_device *cd, struct volume_key *vk)
5961 const char *type_name = key_type_name(LOGON_KEY);
5963 if (!vk || !cd || !type_name)
5966 if (!vk->key_description) {
5967 log_dbg(cd, "Invalid key description");
5971 log_dbg(cd, "Loading key (%zu bytes, type %s) in thread keyring.", vk->keylength, type_name);
5973 r = keyring_add_key_in_thread_keyring(LOGON_KEY, vk->key_description, vk->key, vk->keylength);
5975 log_dbg(cd, "keyring_add_key_in_thread_keyring failed (error %d)", r);
5976 log_err(cd, _("Failed to load key in kernel keyring."));
5978 crypt_set_key_in_keyring(cd, 1);
5984 int crypt_key_in_keyring(struct crypt_device *cd)
5986 return cd ? cd->key_in_keyring : 0;
5990 void crypt_set_key_in_keyring(struct crypt_device *cd, unsigned key_in_keyring)
5995 cd->key_in_keyring = key_in_keyring;
5999 void crypt_drop_keyring_key_by_description(struct crypt_device *cd, const char *key_description, key_type_t ktype)
6002 const char *type_name = key_type_name(ktype);
6004 if (!key_description || !type_name)
6007 log_dbg(cd, "Requesting keyring %s key for revoke and unlink.", type_name);
6009 r = keyring_revoke_and_unlink_key(ktype, key_description);
6011 log_dbg(cd, "keyring_revoke_and_unlink_key failed (error %d)", r);
6012 crypt_set_key_in_keyring(cd, 0);
6016 void crypt_drop_keyring_key(struct crypt_device *cd, struct volume_key *vks)
6018 struct volume_key *vk = vks;
6021 crypt_drop_keyring_key_by_description(cd, vk->key_description, LOGON_KEY);
6022 vk = crypt_volume_key_next(vk);
6026 int crypt_activate_by_keyring(struct crypt_device *cd,
6028 const char *key_description,
6033 size_t passphrase_size;
6036 if (!cd || !key_description)
6039 log_dbg(cd, "%s volume %s [keyslot %d] using passphrase in keyring.",
6040 name ? "Activating" : "Checking", name ?: "passphrase", keyslot);
6042 if (!kernel_keyring_support()) {
6043 log_err(cd, _("Kernel keyring is not supported by the kernel."));
6047 r = _activate_check_status(cd, name, flags & CRYPT_ACTIVATE_REFRESH);
6051 r = keyring_get_passphrase(key_description, &passphrase, &passphrase_size);
6053 log_err(cd, _("Failed to read passphrase from keyring (error %d)."), r);
6057 r = _activate_by_passphrase(cd, name, keyslot, passphrase, passphrase_size, flags);
6059 crypt_safe_memzero(passphrase, passphrase_size);
6066 * Workaround for serialization of parallel activation and memory-hard PBKDF
6067 * In specific situation (systemd activation) this causes OOM killer activation.
6068 * For now, let's provide this ugly way to serialize unlocking of devices.
6070 int crypt_serialize_lock(struct crypt_device *cd)
6072 if (!cd->memory_hard_pbkdf_lock_enabled)
6075 log_dbg(cd, "Taking global memory-hard access serialization lock.");
6076 if (crypt_write_lock(cd, "memory-hard-access", true, &cd->pbkdf_memory_hard_lock)) {
6077 log_err(cd, _("Failed to acquire global memory-hard access serialization lock."));
6078 cd->pbkdf_memory_hard_lock = NULL;
6085 void crypt_serialize_unlock(struct crypt_device *cd)
6087 if (!cd->memory_hard_pbkdf_lock_enabled)
6090 crypt_unlock_internal(cd, cd->pbkdf_memory_hard_lock);
6091 cd->pbkdf_memory_hard_lock = NULL;
6094 crypt_reencrypt_info crypt_reencrypt_status(struct crypt_device *cd,
6095 struct crypt_params_reencrypt *params)
6097 if (!cd || !isLUKS2(cd->type))
6098 return CRYPT_REENCRYPT_NONE;
6100 if (_onlyLUKS2(cd, CRYPT_CD_QUIET, CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
6101 return CRYPT_REENCRYPT_INVALID;
6103 return LUKS2_reencrypt_get_params(&cd->u.luks2.hdr, params);
6106 static void __attribute__((destructor)) libcryptsetup_exit(void)
6108 crypt_backend_destroy();
6109 crypt_random_exit();