2 * LUKS - Linux Unified Key Setup v2, reencryption helpers
4 * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2015-2023 Ondrej Kozina
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #include "luks2_internal.h"
23 #include "utils_device_locking.h"
25 struct luks2_reencrypt {
26 /* reencryption window attributes */
33 crypt_reencrypt_direction_info direction;
34 crypt_reencrypt_mode_info mode;
41 /* reencryption window persistence attributes */
42 struct reenc_protection rp;
43 struct reenc_protection rp_moved_segment;
47 /* already running reencryption */
48 json_object *jobj_segs_hot;
49 struct json_object *jobj_segs_post;
52 json_object *jobj_segment_new;
54 json_object *jobj_segment_old;
56 json_object *jobj_segment_moved;
58 struct volume_key *vks;
63 struct crypt_storage_wrapper *cw1;
64 struct crypt_storage_wrapper *cw2;
69 struct crypt_lock_handle *reenc_lock;
71 #if USE_LUKS2_REENCRYPTION
72 static uint64_t data_shift_value(struct reenc_protection *rp)
74 return rp->type == REENC_PROTECTION_DATASHIFT ? rp->p.ds.data_shift : 0;
77 static json_object *reencrypt_segment(struct luks2_hdr *hdr, unsigned new)
79 return LUKS2_get_segment_by_flag(hdr, new ? "backup-final" : "backup-previous");
82 static json_object *reencrypt_segment_new(struct luks2_hdr *hdr)
84 return reencrypt_segment(hdr, 1);
87 static json_object *reencrypt_segment_old(struct luks2_hdr *hdr)
89 return reencrypt_segment(hdr, 0);
92 static json_object *reencrypt_segments_old(struct luks2_hdr *hdr)
94 json_object *jobj_segments, *jobj = NULL;
96 if (json_object_copy(reencrypt_segment_old(hdr), &jobj))
99 json_segment_remove_flag(jobj, "backup-previous");
101 jobj_segments = json_object_new_object();
102 if (!jobj_segments) {
103 json_object_put(jobj);
107 if (json_object_object_add_by_uint(jobj_segments, 0, jobj)) {
108 json_object_put(jobj);
109 json_object_put(jobj_segments);
113 return jobj_segments;
116 static const char *reencrypt_segment_cipher_new(struct luks2_hdr *hdr)
118 return json_segment_get_cipher(reencrypt_segment(hdr, 1));
121 static const char *reencrypt_segment_cipher_old(struct luks2_hdr *hdr)
123 return json_segment_get_cipher(reencrypt_segment(hdr, 0));
126 static uint32_t reencrypt_get_sector_size_new(struct luks2_hdr *hdr)
128 return json_segment_get_sector_size(reencrypt_segment(hdr, 1));
131 static uint32_t reencrypt_get_sector_size_old(struct luks2_hdr *hdr)
133 return json_segment_get_sector_size(reencrypt_segment(hdr, 0));
136 static uint64_t reencrypt_data_offset(struct luks2_hdr *hdr, unsigned new)
138 json_object *jobj = reencrypt_segment(hdr, new);
140 return json_segment_get_offset(jobj, 0);
142 return LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
145 static uint64_t LUKS2_reencrypt_get_data_offset_moved(struct luks2_hdr *hdr)
147 json_object *jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-moved-segment");
152 return json_segment_get_offset(jobj_segment, 0);
155 static uint64_t reencrypt_get_data_offset_new(struct luks2_hdr *hdr)
157 return reencrypt_data_offset(hdr, 1);
160 static uint64_t reencrypt_get_data_offset_old(struct luks2_hdr *hdr)
162 return reencrypt_data_offset(hdr, 0);
165 static int reencrypt_digest(struct luks2_hdr *hdr, unsigned new)
167 int segment = LUKS2_get_segment_id_by_flag(hdr, new ? "backup-final" : "backup-previous");
172 return LUKS2_digest_by_segment(hdr, segment);
175 int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr)
177 return reencrypt_digest(hdr, 1);
180 int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr)
182 return reencrypt_digest(hdr, 0);
185 /* none, checksums, journal or shift */
186 static const char *reencrypt_resilience_type(struct luks2_hdr *hdr)
188 json_object *jobj_keyslot, *jobj_area, *jobj_type;
189 int ks = LUKS2_find_keyslot(hdr, "reencrypt");
194 jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
196 json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
197 if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
200 return json_object_get_string(jobj_type);
203 static const char *reencrypt_resilience_hash(struct luks2_hdr *hdr)
205 json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash;
206 int ks = LUKS2_find_keyslot(hdr, "reencrypt");
211 jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
213 json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
214 if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
216 if (strcmp(json_object_get_string(jobj_type), "checksum"))
218 if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
221 return json_object_get_string(jobj_hash);
223 #if USE_LUKS2_REENCRYPTION
224 static json_object *_enc_create_segments_shift_after(struct luks2_reencrypt *rh, uint64_t data_offset)
226 int reenc_seg, i = 0;
227 json_object *jobj_copy, *jobj_seg_new = NULL, *jobj_segs_post = json_object_new_object();
230 if (!rh->jobj_segs_hot || !jobj_segs_post)
233 if (json_segments_count(rh->jobj_segs_hot) == 0)
234 return jobj_segs_post;
236 reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
240 while (i < reenc_seg) {
241 jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, i);
244 json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
247 if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1), &jobj_seg_new)) {
248 if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg), &jobj_seg_new))
250 json_segment_remove_flag(jobj_seg_new, "in-reencryption");
253 json_object_object_add(jobj_seg_new, "offset", crypt_jobj_new_uint64(rh->offset + data_offset));
254 json_object_object_add(jobj_seg_new, "iv_tweak", crypt_jobj_new_uint64(rh->offset >> SECTOR_SHIFT));
255 tmp = json_segment_get_size(jobj_seg_new, 0) + rh->length;
258 /* alter size of new segment, reenc_seg == 0 we're finished */
259 json_object_object_add(jobj_seg_new, "size", reenc_seg > 0 ? crypt_jobj_new_uint64(tmp) : json_object_new_string("dynamic"));
260 json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_seg_new);
262 return jobj_segs_post;
264 json_object_put(jobj_segs_post);
268 static json_object *reencrypt_make_hot_segments_encrypt_shift(struct luks2_hdr *hdr,
269 struct luks2_reencrypt *rh,
270 uint64_t data_offset)
272 int sg, crypt_seg, i = 0;
273 uint64_t segment_size;
274 json_object *jobj_seg_shrunk, *jobj_seg_new, *jobj_copy, *jobj_enc_seg = NULL,
275 *jobj_segs_hot = json_object_new_object();
280 crypt_seg = LUKS2_segment_by_type(hdr, "crypt");
282 /* FIXME: This is hack. Find proper way to fix it. */
283 sg = LUKS2_last_segment_by_type(hdr, "linear");
284 if (rh->offset && sg < 0)
287 return jobj_segs_hot;
289 jobj_enc_seg = json_segment_create_crypt(data_offset + rh->offset,
290 rh->offset >> SECTOR_SHIFT,
292 reencrypt_segment_cipher_new(hdr),
293 reencrypt_get_sector_size_new(hdr),
297 jobj_copy = LUKS2_get_segment_jobj(hdr, i);
300 json_object_object_add_by_uint(jobj_segs_hot, i++, json_object_get(jobj_copy));
303 segment_size = LUKS2_segment_size(hdr, sg, 0);
304 if (segment_size > rh->length) {
305 jobj_seg_shrunk = NULL;
306 if (json_object_copy(LUKS2_get_segment_jobj(hdr, sg), &jobj_seg_shrunk))
308 json_object_object_add(jobj_seg_shrunk, "size", crypt_jobj_new_uint64(segment_size - rh->length));
309 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_seg_shrunk);
312 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_enc_seg);
313 jobj_enc_seg = NULL; /* see err: label */
315 /* first crypt segment after encryption ? */
316 if (crypt_seg >= 0) {
317 jobj_seg_new = LUKS2_get_segment_jobj(hdr, crypt_seg);
320 json_object_object_add_by_uint(jobj_segs_hot, sg, json_object_get(jobj_seg_new));
323 return jobj_segs_hot;
325 json_object_put(jobj_enc_seg);
326 json_object_put(jobj_segs_hot);
331 static json_object *reencrypt_make_segment_new(struct crypt_device *cd,
332 struct luks2_hdr *hdr,
333 const struct luks2_reencrypt *rh,
334 uint64_t data_offset,
335 uint64_t segment_offset,
337 const uint64_t *segment_length)
340 case CRYPT_REENCRYPT_REENCRYPT:
341 case CRYPT_REENCRYPT_ENCRYPT:
342 return json_segment_create_crypt(data_offset + segment_offset,
343 crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
345 reencrypt_segment_cipher_new(hdr),
346 reencrypt_get_sector_size_new(hdr), 0);
347 case CRYPT_REENCRYPT_DECRYPT:
348 return json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
354 static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd,
355 struct luks2_hdr *hdr,
356 struct luks2_reencrypt *rh,
357 uint64_t data_offset)
360 json_object *jobj_new_seg_after, *jobj_old_seg, *jobj_old_seg_copy = NULL,
361 *jobj_segs_post = json_object_new_object();
362 uint64_t fixed_length = rh->offset + rh->length;
364 if (!rh->jobj_segs_hot || !jobj_segs_post)
367 reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
371 jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
374 * if there's no old segment after reencryption, we're done.
375 * Set size to 'dynamic' again.
377 jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &fixed_length : NULL);
378 if (!jobj_new_seg_after)
380 json_object_object_add_by_uint(jobj_segs_post, 0, jobj_new_seg_after);
383 if (rh->fixed_length) {
384 if (json_object_copy(jobj_old_seg, &jobj_old_seg_copy))
386 jobj_old_seg = jobj_old_seg_copy;
387 fixed_length = rh->device_size - fixed_length;
388 json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(fixed_length));
390 json_object_get(jobj_old_seg);
391 json_object_object_add_by_uint(jobj_segs_post, 1, jobj_old_seg);
394 return jobj_segs_post;
396 json_object_put(jobj_segs_post);
400 static json_object *reencrypt_make_post_segments_backward(struct crypt_device *cd,
401 struct luks2_hdr *hdr,
402 struct luks2_reencrypt *rh,
403 uint64_t data_offset)
406 uint64_t fixed_length;
408 json_object *jobj_new_seg_after, *jobj_old_seg,
409 *jobj_segs_post = json_object_new_object();
411 if (!rh->jobj_segs_hot || !jobj_segs_post)
414 reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
418 jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg - 1);
420 json_object_object_add_by_uint(jobj_segs_post, reenc_seg - 1, json_object_get(jobj_old_seg));
421 if (rh->fixed_length && rh->offset) {
422 fixed_length = rh->device_size - rh->offset;
423 jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, &fixed_length);
425 jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL);
426 if (!jobj_new_seg_after)
428 json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_new_seg_after);
430 return jobj_segs_post;
432 json_object_put(jobj_segs_post);
436 static json_object *reencrypt_make_segment_reencrypt(struct crypt_device *cd,
437 struct luks2_hdr *hdr,
438 const struct luks2_reencrypt *rh,
439 uint64_t data_offset,
440 uint64_t segment_offset,
442 const uint64_t *segment_length)
445 case CRYPT_REENCRYPT_REENCRYPT:
446 case CRYPT_REENCRYPT_ENCRYPT:
447 return json_segment_create_crypt(data_offset + segment_offset,
448 crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
450 reencrypt_segment_cipher_new(hdr),
451 reencrypt_get_sector_size_new(hdr), 1);
452 case CRYPT_REENCRYPT_DECRYPT:
453 return json_segment_create_linear(data_offset + segment_offset, segment_length, 1);
459 static json_object *reencrypt_make_segment_old(struct crypt_device *cd,
460 struct luks2_hdr *hdr,
461 const struct luks2_reencrypt *rh,
462 uint64_t data_offset,
463 uint64_t segment_offset,
464 const uint64_t *segment_length)
466 json_object *jobj_old_seg = NULL;
469 case CRYPT_REENCRYPT_REENCRYPT:
470 case CRYPT_REENCRYPT_DECRYPT:
471 jobj_old_seg = json_segment_create_crypt(data_offset + segment_offset,
472 crypt_get_iv_offset(cd) + (segment_offset >> SECTOR_SHIFT),
474 reencrypt_segment_cipher_old(hdr),
475 reencrypt_get_sector_size_old(hdr),
478 case CRYPT_REENCRYPT_ENCRYPT:
479 jobj_old_seg = json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
485 static json_object *reencrypt_make_hot_segments_forward(struct crypt_device *cd,
486 struct luks2_hdr *hdr,
487 struct luks2_reencrypt *rh,
488 uint64_t device_size,
489 uint64_t data_offset)
491 json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
492 uint64_t fixed_length, tmp = rh->offset + rh->length;
495 jobj_segs_hot = json_object_new_object();
500 jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, &rh->offset);
503 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
506 jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
510 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
512 if (tmp < device_size) {
513 fixed_length = device_size - tmp;
514 jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh, data_offset + data_shift_value(&rh->rp),
515 rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
518 json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
521 return jobj_segs_hot;
523 json_object_put(jobj_segs_hot);
527 static json_object *reencrypt_make_hot_segments_decrypt_shift(struct crypt_device *cd,
528 struct luks2_hdr *hdr, struct luks2_reencrypt *rh,
529 uint64_t device_size, uint64_t data_offset)
531 json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
532 uint64_t fixed_length, tmp = rh->offset + rh->length, linear_length = rh->progress;
535 jobj_segs_hot = json_object_new_object();
540 jobj_new_seg = LUKS2_get_segment_jobj(hdr, 0);
543 json_object_object_add_by_uint(jobj_segs_hot, sg++, json_object_get(jobj_new_seg));
546 jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh,
548 json_segment_get_size(jobj_new_seg, 0),
553 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
557 jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset,
564 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
566 if (!rh->offset && (jobj_new_seg = LUKS2_get_segment_jobj(hdr, 1)) &&
567 !json_segment_is_backup(jobj_new_seg))
568 json_object_object_add_by_uint(jobj_segs_hot, sg++, json_object_get(jobj_new_seg));
569 else if (tmp < device_size) {
570 fixed_length = device_size - tmp;
571 jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh,
572 data_offset + data_shift_value(&rh->rp),
573 rh->offset + rh->length,
574 rh->fixed_length ? &fixed_length : NULL);
577 json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
580 return jobj_segs_hot;
582 json_object_put(jobj_segs_hot);
586 static json_object *_dec_create_segments_shift_after(struct crypt_device *cd,
587 struct luks2_hdr *hdr,
588 struct luks2_reencrypt *rh,
589 uint64_t data_offset)
591 int reenc_seg, i = 0;
592 json_object *jobj_copy, *jobj_seg_old, *jobj_seg_new,
593 *jobj_segs_post = json_object_new_object();
597 if (!rh->jobj_segs_hot || !jobj_segs_post)
600 segs = json_segments_count(rh->jobj_segs_hot);
602 return jobj_segs_post;
604 reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
608 if (reenc_seg == 0) {
609 jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, NULL);
612 json_object_object_add_by_uint(jobj_segs_post, 0, jobj_seg_new);
614 return jobj_segs_post;
617 jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, 0);
620 json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
622 jobj_seg_old = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
624 tmp = rh->length + rh->progress;
625 jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset,
626 json_segment_get_size(rh->jobj_segment_moved, 0),
627 data_shift_value(&rh->rp),
628 jobj_seg_old ? &tmp : NULL);
629 json_object_object_add_by_uint(jobj_segs_post, i++, jobj_seg_new);
632 json_object_object_add_by_uint(jobj_segs_post, i, json_object_get(jobj_seg_old));
634 return jobj_segs_post;
636 json_object_put(jobj_segs_post);
640 static json_object *reencrypt_make_hot_segments_backward(struct crypt_device *cd,
641 struct luks2_hdr *hdr,
642 struct luks2_reencrypt *rh,
643 uint64_t device_size,
644 uint64_t data_offset)
646 json_object *jobj_reenc_seg, *jobj_new_seg, *jobj_old_seg = NULL,
647 *jobj_segs_hot = json_object_new_object();
649 uint64_t fixed_length, tmp = rh->offset + rh->length;
655 if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_old_seg))
657 json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(rh->offset));
659 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_old_seg);
662 jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
666 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
668 if (tmp < device_size) {
669 fixed_length = device_size - tmp;
670 jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length,
671 rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
674 json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_new_seg);
677 return jobj_segs_hot;
679 json_object_put(jobj_segs_hot);
683 static int reencrypt_make_hot_segments(struct crypt_device *cd,
684 struct luks2_hdr *hdr,
685 struct luks2_reencrypt *rh,
686 uint64_t device_size,
687 uint64_t data_offset)
689 rh->jobj_segs_hot = NULL;
691 if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
692 rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
693 log_dbg(cd, "Calculating hot segments for encryption with data move.");
694 rh->jobj_segs_hot = reencrypt_make_hot_segments_encrypt_shift(hdr, rh, data_offset);
695 } else if (rh->mode == CRYPT_REENCRYPT_DECRYPT && rh->direction == CRYPT_REENCRYPT_FORWARD &&
696 rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
697 log_dbg(cd, "Calculating hot segments for decryption with data move.");
698 rh->jobj_segs_hot = reencrypt_make_hot_segments_decrypt_shift(cd, hdr, rh, device_size, data_offset);
699 } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
700 log_dbg(cd, "Calculating hot segments (forward direction).");
701 rh->jobj_segs_hot = reencrypt_make_hot_segments_forward(cd, hdr, rh, device_size, data_offset);
702 } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
703 log_dbg(cd, "Calculating hot segments (backward direction).");
704 rh->jobj_segs_hot = reencrypt_make_hot_segments_backward(cd, hdr, rh, device_size, data_offset);
707 return rh->jobj_segs_hot ? 0 : -EINVAL;
710 static int reencrypt_make_post_segments(struct crypt_device *cd,
711 struct luks2_hdr *hdr,
712 struct luks2_reencrypt *rh,
713 uint64_t data_offset)
715 rh->jobj_segs_post = NULL;
717 if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
718 rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
719 log_dbg(cd, "Calculating post segments for encryption with data move.");
720 rh->jobj_segs_post = _enc_create_segments_shift_after(rh, data_offset);
721 } else if (rh->mode == CRYPT_REENCRYPT_DECRYPT && rh->direction == CRYPT_REENCRYPT_FORWARD &&
722 rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
723 log_dbg(cd, "Calculating post segments for decryption with data move.");
724 rh->jobj_segs_post = _dec_create_segments_shift_after(cd, hdr, rh, data_offset);
725 } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
726 log_dbg(cd, "Calculating post segments (forward direction).");
727 rh->jobj_segs_post = reencrypt_make_post_segments_forward(cd, hdr, rh, data_offset);
728 } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
729 log_dbg(cd, "Calculating segments (backward direction).");
730 rh->jobj_segs_post = reencrypt_make_post_segments_backward(cd, hdr, rh, data_offset);
733 return rh->jobj_segs_post ? 0 : -EINVAL;
736 static uint64_t reencrypt_data_shift(struct luks2_hdr *hdr)
738 json_object *jobj_keyslot, *jobj_area, *jobj_data_shift;
739 int ks = LUKS2_find_keyslot(hdr, "reencrypt");
744 jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
746 json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
747 if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj_data_shift))
750 return crypt_jobj_get_uint64(jobj_data_shift);
753 static crypt_reencrypt_mode_info reencrypt_mode(struct luks2_hdr *hdr)
756 crypt_reencrypt_mode_info mi = CRYPT_REENCRYPT_REENCRYPT;
757 json_object *jobj_keyslot, *jobj_mode;
759 jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
763 json_object_object_get_ex(jobj_keyslot, "mode", &jobj_mode);
764 mode = json_object_get_string(jobj_mode);
766 /* validation enforces allowed values */
767 if (!strcmp(mode, "encrypt"))
768 mi = CRYPT_REENCRYPT_ENCRYPT;
769 else if (!strcmp(mode, "decrypt"))
770 mi = CRYPT_REENCRYPT_DECRYPT;
775 static crypt_reencrypt_direction_info reencrypt_direction(struct luks2_hdr *hdr)
778 json_object *jobj_keyslot, *jobj_mode;
779 crypt_reencrypt_direction_info di = CRYPT_REENCRYPT_FORWARD;
781 jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
785 json_object_object_get_ex(jobj_keyslot, "direction", &jobj_mode);
786 value = json_object_get_string(jobj_mode);
788 /* validation enforces allowed values */
789 if (strcmp(value, "forward"))
790 di = CRYPT_REENCRYPT_BACKWARD;
795 typedef enum { REENC_OK = 0, REENC_ERR, REENC_ROLLBACK, REENC_FATAL } reenc_status_t;
797 void LUKS2_reencrypt_protection_erase(struct reenc_protection *rp)
799 if (!rp || rp->type != REENC_PROTECTION_CHECKSUM)
803 crypt_hash_destroy(rp->p.csum.ch);
804 rp->p.csum.ch = NULL;
807 if (rp->p.csum.checksums) {
808 crypt_safe_memzero(rp->p.csum.checksums, rp->p.csum.checksums_len);
809 free(rp->p.csum.checksums);
810 rp->p.csum.checksums = NULL;
814 void LUKS2_reencrypt_free(struct crypt_device *cd, struct luks2_reencrypt *rh)
819 LUKS2_reencrypt_protection_erase(&rh->rp);
820 LUKS2_reencrypt_protection_erase(&rh->rp_moved_segment);
822 json_object_put(rh->jobj_segs_hot);
823 rh->jobj_segs_hot = NULL;
824 json_object_put(rh->jobj_segs_post);
825 rh->jobj_segs_post = NULL;
826 json_object_put(rh->jobj_segment_old);
827 rh->jobj_segment_old = NULL;
828 json_object_put(rh->jobj_segment_new);
829 rh->jobj_segment_new = NULL;
830 json_object_put(rh->jobj_segment_moved);
831 rh->jobj_segment_moved = NULL;
833 free(rh->reenc_buffer);
834 rh->reenc_buffer = NULL;
835 crypt_storage_wrapper_destroy(rh->cw1);
837 crypt_storage_wrapper_destroy(rh->cw2);
840 free(rh->device_name);
841 free(rh->overlay_name);
842 free(rh->hotzone_name);
843 crypt_drop_keyring_key(cd, rh->vks);
844 crypt_free_volume_key(rh->vks);
845 device_release_excl(cd, crypt_data_device(cd));
846 crypt_unlock_internal(cd, rh->reenc_lock);
850 int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd,
851 struct luks2_hdr *hdr,
852 const struct reenc_protection *rp,
853 int reencrypt_keyslot,
856 #if USE_LUKS2_REENCRYPTION
858 uint64_t dummy, area_length;
864 if (rp->type <= REENC_PROTECTION_NONE) {
865 *r_length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
869 if (rp->type == REENC_PROTECTION_DATASHIFT) {
870 *r_length = rp->p.ds.data_shift;
874 r = LUKS2_keyslot_area(hdr, reencrypt_keyslot, &dummy, &area_length);
878 if (rp->type == REENC_PROTECTION_JOURNAL) {
879 *r_length = area_length;
883 if (rp->type == REENC_PROTECTION_CHECKSUM) {
884 *r_length = (area_length / rp->p.csum.hash_size) * rp->p.csum.block_size;
893 #if USE_LUKS2_REENCRYPTION
894 static size_t reencrypt_get_alignment(struct crypt_device *cd,
895 struct luks2_hdr *hdr)
897 size_t ss, alignment = device_block_size(cd, crypt_data_device(cd));
899 ss = reencrypt_get_sector_size_old(hdr);
902 ss = reencrypt_get_sector_size_new(hdr);
909 /* returns void because it must not fail on valid LUKS2 header */
910 static void _load_backup_segments(struct luks2_hdr *hdr,
911 struct luks2_reencrypt *rh)
913 int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
916 rh->jobj_segment_new = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
917 rh->digest_new = LUKS2_digest_by_segment(hdr, segment);
919 rh->jobj_segment_new = NULL;
920 rh->digest_new = -ENOENT;
923 segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
925 rh->jobj_segment_old = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
926 rh->digest_old = LUKS2_digest_by_segment(hdr, segment);
928 rh->jobj_segment_old = NULL;
929 rh->digest_old = -ENOENT;
932 segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
934 rh->jobj_segment_moved = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
936 rh->jobj_segment_moved = NULL;
939 static int reencrypt_offset_backward_moved(struct luks2_hdr *hdr, json_object *jobj_segments,
940 uint64_t *reencrypt_length, uint64_t data_shift, uint64_t *offset)
942 uint64_t tmp, linear_length = 0;
943 int sg, segs = json_segments_count(jobj_segments);
945 /* find reencrypt offset with data shift */
946 for (sg = 0; sg < segs; sg++)
947 if (LUKS2_segment_is_type(hdr, sg, "linear"))
948 linear_length += LUKS2_segment_size(hdr, sg, 0);
950 /* all active linear segments length */
951 if (linear_length && segs > 1) {
952 if (linear_length < data_shift)
954 tmp = linear_length - data_shift;
955 if (tmp && tmp < data_shift) {
956 *offset = data_shift;
957 *reencrypt_length = tmp;
968 /* should be unreachable */
973 static int reencrypt_offset_forward_moved(struct luks2_hdr *hdr,
974 json_object *jobj_segments,
978 int last_crypt = LUKS2_last_segment_by_type(hdr, "crypt");
980 /* if last crypt segment exists and it's first one, just return offset = 0 */
981 if (last_crypt <= 0) {
986 *offset = LUKS2_segment_offset(hdr, last_crypt, 0) - data_shift;
990 static int _offset_forward(json_object *jobj_segments, uint64_t *offset)
992 int segs = json_segments_count(jobj_segments);
996 else if (segs == 2) {
997 *offset = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
1006 static int _offset_backward(json_object *jobj_segments, uint64_t device_size, uint64_t *length, uint64_t *offset)
1008 int segs = json_segments_count(jobj_segments);
1012 if (device_size < *length)
1013 *length = device_size;
1014 *offset = device_size - *length;
1015 } else if (segs == 2) {
1016 tmp = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
1019 *offset = tmp - *length;
1026 /* must be always relative to data offset */
1027 /* the LUKS2 header MUST be valid */
1028 static int reencrypt_offset(struct luks2_hdr *hdr,
1029 crypt_reencrypt_direction_info di,
1030 uint64_t device_size,
1031 uint64_t *reencrypt_length,
1035 json_object *jobj_segments;
1036 uint64_t data_shift = reencrypt_data_shift(hdr);
1041 /* if there's segment in reencryption return directly offset of it */
1042 json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments);
1043 sg = json_segments_segment_in_reencrypt(jobj_segments);
1045 *offset = LUKS2_segment_offset(hdr, sg, 0) - (reencrypt_get_data_offset_new(hdr));
1049 if (di == CRYPT_REENCRYPT_FORWARD) {
1050 if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT &&
1051 LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0) {
1052 r = reencrypt_offset_forward_moved(hdr, jobj_segments, data_shift, offset);
1053 if (!r && *offset > device_size)
1054 *offset = device_size;
1057 return _offset_forward(jobj_segments, offset);
1058 } else if (di == CRYPT_REENCRYPT_BACKWARD) {
1059 if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_ENCRYPT &&
1060 LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
1061 return reencrypt_offset_backward_moved(hdr, jobj_segments, reencrypt_length, data_shift, offset);
1062 return _offset_backward(jobj_segments, device_size, reencrypt_length, offset);
1068 static uint64_t reencrypt_length(struct crypt_device *cd,
1069 struct reenc_protection *rp,
1070 uint64_t keyslot_area_length,
1071 uint64_t length_max,
1074 unsigned long dummy, optimal_alignment;
1075 uint64_t length, soft_mem_limit;
1077 if (rp->type == REENC_PROTECTION_NONE)
1078 length = length_max ?: LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
1079 else if (rp->type == REENC_PROTECTION_CHECKSUM)
1080 length = (keyslot_area_length / rp->p.csum.hash_size) * rp->p.csum.block_size;
1081 else if (rp->type == REENC_PROTECTION_DATASHIFT)
1082 return rp->p.ds.data_shift;
1084 length = keyslot_area_length;
1087 if (length > LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH)
1088 length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
1090 /* soft limit is 1/4 of system memory */
1091 soft_mem_limit = crypt_getphysmemory_kb() << 8; /* multiply by (1024/4) */
1093 if (soft_mem_limit && length > soft_mem_limit)
1094 length = soft_mem_limit;
1096 if (length_max && length > length_max)
1097 length = length_max;
1099 length -= (length % alignment);
1101 /* Emits error later */
1105 device_topology_alignment(cd, crypt_data_device(cd), &optimal_alignment, &dummy, length);
1107 /* we have to stick with encryption sector size alignment */
1108 if (optimal_alignment % alignment)
1111 /* align to opt-io size only if remaining size allows it */
1112 if (length > optimal_alignment)
1113 length -= (length % optimal_alignment);
1118 static int reencrypt_context_init(struct crypt_device *cd,
1119 struct luks2_hdr *hdr,
1120 struct luks2_reencrypt *rh,
1121 uint64_t device_size,
1122 uint64_t max_hotzone_size,
1123 uint64_t fixed_device_size)
1127 uint64_t dummy, area_length;
1129 rh->reenc_keyslot = LUKS2_find_keyslot(hdr, "reencrypt");
1130 if (rh->reenc_keyslot < 0)
1132 if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &dummy, &area_length) < 0)
1135 rh->mode = reencrypt_mode(hdr);
1137 rh->direction = reencrypt_direction(hdr);
1139 r = LUKS2_keyslot_reencrypt_load(cd, hdr, rh->reenc_keyslot, &rh->rp, true);
1143 if (rh->rp.type == REENC_PROTECTION_CHECKSUM)
1144 alignment = rh->rp.p.csum.block_size;
1146 alignment = reencrypt_get_alignment(cd, hdr);
1151 if ((max_hotzone_size << SECTOR_SHIFT) % alignment) {
1152 log_err(cd, _("Hotzone size must be multiple of calculated zone alignment (%zu bytes)."), alignment);
1156 if ((fixed_device_size << SECTOR_SHIFT) % alignment) {
1157 log_err(cd, _("Device size must be multiple of calculated zone alignment (%zu bytes)."), alignment);
1161 if (fixed_device_size) {
1162 log_dbg(cd, "Switching reencryption to fixed size mode.");
1163 device_size = fixed_device_size << SECTOR_SHIFT;
1164 rh->fixed_length = true;
1166 rh->fixed_length = false;
1168 rh->length = reencrypt_length(cd, &rh->rp, area_length, max_hotzone_size << SECTOR_SHIFT, alignment);
1170 log_dbg(cd, "Invalid reencryption length.");
1174 if (reencrypt_offset(hdr, rh->direction, device_size, &rh->length, &rh->offset)) {
1175 log_dbg(cd, "Failed to get reencryption offset.");
1179 if (rh->offset > device_size)
1181 if (rh->length > device_size - rh->offset)
1182 rh->length = device_size - rh->offset;
1184 _load_backup_segments(hdr, rh);
1186 r = LUKS2_keyslot_reencrypt_load(cd, hdr, rh->reenc_keyslot, &rh->rp_moved_segment, false);
1190 if (rh->rp_moved_segment.type == REENC_PROTECTION_NOT_SET)
1191 log_dbg(cd, "No moved segment resilience configured.");
1193 if (rh->direction == CRYPT_REENCRYPT_BACKWARD)
1194 rh->progress = device_size - rh->offset - rh->length;
1195 else if (rh->jobj_segment_moved && rh->direction == CRYPT_REENCRYPT_FORWARD) {
1196 if (rh->offset == json_segment_get_offset(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), false))
1197 rh->progress = device_size - json_segment_get_size(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), false);
1199 rh->progress = rh->offset - json_segment_get_size(rh->jobj_segment_moved, 0);
1201 rh->progress = rh->offset;
1203 log_dbg(cd, "reencrypt-direction: %s", rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward");
1204 log_dbg(cd, "backup-previous digest id: %d", rh->digest_old);
1205 log_dbg(cd, "backup-final digest id: %d", rh->digest_new);
1206 log_dbg(cd, "reencrypt length: %" PRIu64, rh->length);
1207 log_dbg(cd, "reencrypt offset: %" PRIu64, rh->offset);
1208 log_dbg(cd, "reencrypt shift: %s%" PRIu64,
1209 (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->direction == CRYPT_REENCRYPT_BACKWARD ? "-" : ""),
1210 data_shift_value(&rh->rp));
1211 log_dbg(cd, "reencrypt alignment: %zu", alignment);
1212 log_dbg(cd, "reencrypt progress: %" PRIu64, rh->progress);
1214 rh->device_size = device_size;
1216 return rh->length < 512 ? -EINVAL : 0;
1219 static size_t reencrypt_buffer_length(struct luks2_reencrypt *rh)
1221 if (rh->rp.type == REENC_PROTECTION_DATASHIFT)
1222 return data_shift_value(&rh->rp);
1226 static int reencrypt_load_clean(struct crypt_device *cd,
1227 struct luks2_hdr *hdr,
1228 uint64_t device_size,
1229 uint64_t max_hotzone_size,
1230 uint64_t fixed_device_size,
1231 struct luks2_reencrypt **rh)
1234 struct luks2_reencrypt *tmp = crypt_zalloc(sizeof (*tmp));
1239 log_dbg(cd, "Loading stored reencryption context.");
1241 r = reencrypt_context_init(cd, hdr, tmp, device_size, max_hotzone_size, fixed_device_size);
1245 if (posix_memalign(&tmp->reenc_buffer, device_alignment(crypt_data_device(cd)),
1246 reencrypt_buffer_length(tmp))) {
1255 LUKS2_reencrypt_free(cd, tmp);
1260 static int reencrypt_make_segments(struct crypt_device *cd,
1261 struct luks2_hdr *hdr,
1262 struct luks2_reencrypt *rh,
1263 uint64_t device_size)
1266 uint64_t data_offset = reencrypt_get_data_offset_new(hdr);
1268 log_dbg(cd, "Calculating segments.");
1270 r = reencrypt_make_hot_segments(cd, hdr, rh, device_size, data_offset);
1272 r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
1274 json_object_put(rh->jobj_segs_hot);
1278 log_dbg(cd, "Failed to make reencryption segments.");
1283 static int reencrypt_make_segments_crashed(struct crypt_device *cd,
1284 struct luks2_hdr *hdr,
1285 struct luks2_reencrypt *rh)
1288 uint64_t data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
1293 rh->jobj_segs_hot = json_object_new_object();
1294 if (!rh->jobj_segs_hot)
1297 json_object_object_foreach(LUKS2_get_segments_jobj(hdr), key, val) {
1298 if (json_segment_is_backup(val))
1300 json_object_object_add(rh->jobj_segs_hot, key, json_object_get(val));
1303 r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
1305 json_object_put(rh->jobj_segs_hot);
1306 rh->jobj_segs_hot = NULL;
1312 static int reencrypt_load_crashed(struct crypt_device *cd,
1313 struct luks2_hdr *hdr, uint64_t device_size, struct luks2_reencrypt **rh)
1316 uint64_t required_device_size;
1319 if (LUKS2_get_data_size(hdr, &required_device_size, &dynamic))
1323 required_device_size = 0;
1325 required_device_size >>= SECTOR_SHIFT;
1327 r = reencrypt_load_clean(cd, hdr, device_size, 0, required_device_size, rh);
1330 reenc_seg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
1334 (*rh)->length = LUKS2_segment_size(hdr, reenc_seg, 0);
1338 r = reencrypt_make_segments_crashed(cd, hdr, *rh);
1341 LUKS2_reencrypt_free(cd, *rh);
1347 static int reencrypt_init_storage_wrappers(struct crypt_device *cd,
1348 struct luks2_hdr *hdr,
1349 struct luks2_reencrypt *rh,
1350 struct volume_key *vks)
1353 struct volume_key *vk;
1354 uint32_t wrapper_flags = (getuid() || geteuid()) ? 0 : DISABLE_KCAPI;
1356 vk = crypt_volume_key_by_id(vks, rh->digest_old);
1357 r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
1358 reencrypt_get_data_offset_old(hdr),
1359 crypt_get_iv_offset(cd),
1360 reencrypt_get_sector_size_old(hdr),
1361 reencrypt_segment_cipher_old(hdr),
1362 vk, wrapper_flags | OPEN_READONLY);
1364 log_err(cd, _("Failed to initialize old segment storage wrapper."));
1367 rh->wflags1 = wrapper_flags | OPEN_READONLY;
1368 log_dbg(cd, "Old cipher storage wrapper type: %d.", crypt_storage_wrapper_get_type(rh->cw1));
1370 vk = crypt_volume_key_by_id(vks, rh->digest_new);
1371 r = crypt_storage_wrapper_init(cd, &rh->cw2, crypt_data_device(cd),
1372 reencrypt_get_data_offset_new(hdr),
1373 crypt_get_iv_offset(cd),
1374 reencrypt_get_sector_size_new(hdr),
1375 reencrypt_segment_cipher_new(hdr),
1378 log_err(cd, _("Failed to initialize new segment storage wrapper."));
1381 rh->wflags2 = wrapper_flags;
1382 log_dbg(cd, "New cipher storage wrapper type: %d", crypt_storage_wrapper_get_type(rh->cw2));
1387 static int reencrypt_context_set_names(struct luks2_reencrypt *rh, const char *name)
1393 if (!(rh->device_name = dm_device_name(name)))
1395 } else if (!(rh->device_name = strdup(name)))
1398 if (asprintf(&rh->hotzone_name, "%s-hotzone-%s", rh->device_name,
1399 rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward") < 0) {
1400 rh->hotzone_name = NULL;
1403 if (asprintf(&rh->overlay_name, "%s-overlay", rh->device_name) < 0) {
1404 rh->overlay_name = NULL;
1412 static int modify_offset(uint64_t *offset, uint64_t data_shift, crypt_reencrypt_direction_info di)
1419 if (di == CRYPT_REENCRYPT_FORWARD) {
1420 if (*offset >= data_shift) {
1421 *offset -= data_shift;
1424 } else if (di == CRYPT_REENCRYPT_BACKWARD) {
1425 *offset += data_shift;
1432 static int reencrypt_update_flag(struct crypt_device *cd, uint8_t version,
1433 bool enable, bool commit)
1436 struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
1439 log_dbg(cd, "Going to store reencryption requirement flag (version: %u).", version);
1440 return LUKS2_config_set_requirement_version(cd, hdr, CRYPT_REQUIREMENT_ONLINE_REENCRYPT, version, commit);
1443 if (LUKS2_config_get_requirements(cd, hdr, &reqs))
1446 reqs &= ~CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
1448 log_dbg(cd, "Going to wipe reencryption requirement flag.");
1450 return LUKS2_config_set_requirements(cd, hdr, reqs, commit);
1453 static int reencrypt_hotzone_protect_ready(struct crypt_device *cd,
1454 struct reenc_protection *rp)
1458 if (rp->type == REENC_PROTECTION_NOT_SET)
1461 if (rp->type != REENC_PROTECTION_CHECKSUM)
1464 if (!rp->p.csum.checksums) {
1465 log_dbg(cd, "Allocating buffer for storing resilience checksums.");
1466 if (posix_memalign(&rp->p.csum.checksums, device_alignment(crypt_metadata_device(cd)),
1467 rp->p.csum.checksums_len))
1474 static int reencrypt_recover_segment(struct crypt_device *cd,
1475 struct luks2_hdr *hdr,
1476 struct luks2_reencrypt *rh,
1477 struct volume_key *vks)
1479 struct volume_key *vk_old, *vk_new;
1482 struct reenc_protection *rp;
1483 int devfd, r, new_sector_size, old_sector_size, rseg;
1484 uint64_t area_offset, area_length, area_length_read, crash_iv_offset,
1485 data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
1486 char *checksum_tmp = NULL, *data_buffer = NULL;
1487 struct crypt_storage_wrapper *cw1 = NULL, *cw2 = NULL;
1493 rseg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
1494 if (rh->offset == 0 && rh->rp_moved_segment.type > REENC_PROTECTION_NOT_SET) {
1495 log_dbg(cd, "Recovery using moved segment protection.");
1496 rp = &rh->rp_moved_segment;
1500 if (rseg < 0 || rh->length < 512)
1503 r = reencrypt_hotzone_protect_ready(cd, rp);
1505 log_err(cd, _("Failed to initialize hotzone protection."));
1509 vk_new = crypt_volume_key_by_id(vks, rh->digest_new);
1510 if (!vk_new && rh->mode != CRYPT_REENCRYPT_DECRYPT)
1512 vk_old = crypt_volume_key_by_id(vks, rh->digest_old);
1513 if (!vk_old && rh->mode != CRYPT_REENCRYPT_ENCRYPT)
1515 old_sector_size = json_segment_get_sector_size(reencrypt_segment_old(hdr));
1516 new_sector_size = json_segment_get_sector_size(reencrypt_segment_new(hdr));
1517 if (rh->mode == CRYPT_REENCRYPT_DECRYPT)
1518 crash_iv_offset = rh->offset >> SECTOR_SHIFT; /* TODO: + old iv_tweak */
1520 crash_iv_offset = json_segment_get_iv_offset(json_segments_get_segment(rh->jobj_segs_hot, rseg));
1522 log_dbg(cd, "crash_offset: %" PRIu64 ", crash_length: %" PRIu64 ", crash_iv_offset: %" PRIu64,
1523 data_offset + rh->offset, rh->length, crash_iv_offset);
1525 r = crypt_storage_wrapper_init(cd, &cw2, crypt_data_device(cd),
1526 data_offset + rh->offset, crash_iv_offset, new_sector_size,
1527 reencrypt_segment_cipher_new(hdr), vk_new, 0);
1529 log_err(cd, _("Failed to initialize new segment storage wrapper."));
1533 if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &area_offset, &area_length)) {
1538 if (posix_memalign((void**)&data_buffer, device_alignment(crypt_data_device(cd)), rh->length)) {
1544 case REENC_PROTECTION_CHECKSUM:
1545 log_dbg(cd, "Checksums based recovery.");
1547 r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1548 data_offset + rh->offset, crash_iv_offset, old_sector_size,
1549 reencrypt_segment_cipher_old(hdr), vk_old, 0);
1551 log_err(cd, _("Failed to initialize old segment storage wrapper."));
1555 count = rh->length / rp->p.csum.block_size;
1556 area_length_read = count * rp->p.csum.hash_size;
1557 if (area_length_read > area_length) {
1558 log_dbg(cd, "Internal error in calculated area_length.");
1563 checksum_tmp = malloc(rp->p.csum.hash_size);
1564 if (!checksum_tmp) {
1569 /* TODO: lock for read */
1570 devfd = device_open(cd, crypt_metadata_device(cd), O_RDONLY);
1574 /* read old data checksums */
1575 read = read_lseek_blockwise(devfd, device_block_size(cd, crypt_metadata_device(cd)),
1576 device_alignment(crypt_metadata_device(cd)), rp->p.csum.checksums, area_length_read, area_offset);
1577 if (read < 0 || (size_t)read != area_length_read) {
1578 log_err(cd, _("Failed to read checksums for current hotzone."));
1583 read = crypt_storage_wrapper_read(cw2, 0, data_buffer, rh->length);
1584 if (read < 0 || (size_t)read != rh->length) {
1585 log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset + data_offset);
1590 for (s = 0; s < count; s++) {
1591 if (crypt_hash_write(rp->p.csum.ch, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size)) {
1592 log_dbg(cd, "Failed to write hash.");
1596 if (crypt_hash_final(rp->p.csum.ch, checksum_tmp, rp->p.csum.hash_size)) {
1597 log_dbg(cd, "Failed to finalize hash.");
1601 if (!memcmp(checksum_tmp, (char *)rp->p.csum.checksums + (s * rp->p.csum.hash_size), rp->p.csum.hash_size)) {
1602 log_dbg(cd, "Sector %zu (size %zu, offset %zu) needs recovery", s, rp->p.csum.block_size, s * rp->p.csum.block_size);
1603 if (crypt_storage_wrapper_decrypt(cw1, s * rp->p.csum.block_size, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size)) {
1604 log_err(cd, _("Failed to decrypt sector %zu."), s);
1608 w = crypt_storage_wrapper_encrypt_write(cw2, s * rp->p.csum.block_size, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size);
1609 if (w < 0 || (size_t)w != rp->p.csum.block_size) {
1610 log_err(cd, _("Failed to recover sector %zu."), s);
1619 case REENC_PROTECTION_JOURNAL:
1620 log_dbg(cd, "Journal based recovery.");
1622 /* FIXME: validation candidate */
1623 if (rh->length > area_length) {
1625 log_dbg(cd, "Invalid journal size.");
1630 r = crypt_storage_wrapper_init(cd, &cw1, crypt_metadata_device(cd),
1631 area_offset, crash_iv_offset, old_sector_size,
1632 reencrypt_segment_cipher_old(hdr), vk_old, 0);
1634 log_err(cd, _("Failed to initialize old segment storage wrapper."));
1637 read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
1638 if (read < 0 || (size_t)read != rh->length) {
1639 log_dbg(cd, "Failed to read journaled data.");
1641 /* may content plaintext */
1642 crypt_safe_memzero(data_buffer, rh->length);
1645 read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
1646 /* may content plaintext */
1647 crypt_safe_memzero(data_buffer, rh->length);
1648 if (read < 0 || (size_t)read != rh->length) {
1649 log_dbg(cd, "recovery write failed.");
1656 case REENC_PROTECTION_DATASHIFT:
1657 log_dbg(cd, "Data shift based recovery.");
1660 r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1661 json_segment_get_offset(rh->jobj_segment_moved, 0), 0,
1662 reencrypt_get_sector_size_old(hdr),
1663 reencrypt_segment_cipher_old(hdr), vk_old, 0);
1665 if (rh->direction == CRYPT_REENCRYPT_FORWARD)
1666 data_offset = data_offset + rh->offset + data_shift_value(rp);
1668 data_offset = data_offset + rh->offset - data_shift_value(rp);
1669 r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1672 reencrypt_get_sector_size_old(hdr),
1673 reencrypt_segment_cipher_old(hdr), vk_old, 0);
1676 log_err(cd, _("Failed to initialize old segment storage wrapper."));
1680 read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
1681 if (read < 0 || (size_t)read != rh->length) {
1682 log_dbg(cd, "Failed to read data.");
1684 /* may content plaintext */
1685 crypt_safe_memzero(data_buffer, rh->length);
1689 read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
1690 /* may content plaintext */
1691 crypt_safe_memzero(data_buffer, rh->length);
1692 if (read < 0 || (size_t)read != rh->length) {
1693 log_dbg(cd, "recovery write failed.");
1704 rh->read = rh->length;
1708 crypt_storage_wrapper_destroy(cw1);
1709 crypt_storage_wrapper_destroy(cw2);
1714 static int reencrypt_add_moved_segment(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
1716 int digest = rh->digest_old, s = LUKS2_segment_first_unused_id(hdr);
1718 if (!rh->jobj_segment_moved)
1724 if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(rh->jobj_segment_moved))) {
1725 json_object_put(rh->jobj_segment_moved);
1729 if (!strcmp(json_segment_type(rh->jobj_segment_moved), "crypt"))
1730 return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
1735 static int reencrypt_add_backup_segment(struct crypt_device *cd,
1736 struct luks2_hdr *hdr,
1737 struct luks2_reencrypt *rh,
1740 int digest, s = LUKS2_segment_first_unused_id(hdr);
1746 digest = final ? rh->digest_new : rh->digest_old;
1747 jobj = final ? rh->jobj_segment_new : rh->jobj_segment_old;
1749 if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(jobj))) {
1750 json_object_put(jobj);
1754 if (strcmp(json_segment_type(jobj), "crypt"))
1757 return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
1760 static int reencrypt_assign_segments_simple(struct crypt_device *cd,
1761 struct luks2_hdr *hdr,
1762 struct luks2_reencrypt *rh,
1768 if (hot && json_segments_count(rh->jobj_segs_hot) > 0) {
1769 log_dbg(cd, "Setting 'hot' segments.");
1771 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
1773 rh->jobj_segs_hot = NULL;
1774 } else if (!hot && json_segments_count(rh->jobj_segs_post) > 0) {
1775 log_dbg(cd, "Setting 'post' segments.");
1776 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
1778 rh->jobj_segs_post = NULL;
1780 log_dbg(cd, "No segments to set.");
1785 log_dbg(cd, "Failed to assign new enc segments.");
1789 r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
1791 log_dbg(cd, "Failed to assign reencryption previous backup segment.");
1795 r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
1797 log_dbg(cd, "Failed to assign reencryption final backup segment.");
1801 r = reencrypt_add_moved_segment(cd, hdr, rh);
1803 log_dbg(cd, "Failed to assign reencryption moved backup segment.");
1807 for (sg = 0; sg < LUKS2_segments_count(hdr); sg++) {
1808 if (LUKS2_segment_is_type(hdr, sg, "crypt") &&
1809 LUKS2_digest_segment_assign(cd, hdr, sg, rh->mode == CRYPT_REENCRYPT_ENCRYPT ? rh->digest_new : rh->digest_old, 1, 0)) {
1810 log_dbg(cd, "Failed to assign digest %u to segment %u.", rh->digest_new, sg);
1815 return commit ? LUKS2_hdr_write(cd, hdr) : 0;
1818 static int reencrypt_assign_segments(struct crypt_device *cd,
1819 struct luks2_hdr *hdr,
1820 struct luks2_reencrypt *rh,
1825 int rseg, scount, r = -EINVAL;
1827 /* FIXME: validate in reencrypt context load */
1828 if (rh->digest_new < 0 && rh->mode != CRYPT_REENCRYPT_DECRYPT)
1831 if (LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0))
1834 if (rh->mode == CRYPT_REENCRYPT_ENCRYPT || rh->mode == CRYPT_REENCRYPT_DECRYPT)
1835 return reencrypt_assign_segments_simple(cd, hdr, rh, hot, commit);
1837 if (hot && rh->jobj_segs_hot) {
1838 log_dbg(cd, "Setting 'hot' segments.");
1840 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
1842 rh->jobj_segs_hot = NULL;
1843 } else if (!hot && rh->jobj_segs_post) {
1844 log_dbg(cd, "Setting 'post' segments.");
1845 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
1847 rh->jobj_segs_post = NULL;
1853 scount = LUKS2_segments_count(hdr);
1855 /* segment in reencryption has to hold reference on both digests */
1856 rseg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
1857 if (rseg < 0 && hot)
1861 LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_new, 1, 0);
1862 LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_old, 1, 0);
1865 forward = (rh->direction == CRYPT_REENCRYPT_FORWARD);
1868 LUKS2_digest_segment_assign(cd, hdr, 0, forward ? rh->digest_new : rh->digest_old, 1, 0);
1869 if (scount > rseg + 1)
1870 LUKS2_digest_segment_assign(cd, hdr, rseg + 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
1872 LUKS2_digest_segment_assign(cd, hdr, 0, forward || scount == 1 ? rh->digest_new : rh->digest_old, 1, 0);
1874 LUKS2_digest_segment_assign(cd, hdr, 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
1877 r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
1879 log_dbg(cd, "Failed to assign hot reencryption backup segment.");
1882 r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
1884 log_dbg(cd, "Failed to assign post reencryption backup segment.");
1888 return commit ? LUKS2_hdr_write(cd, hdr) : 0;
1891 static int reencrypt_set_encrypt_segments(struct crypt_device *cd, struct luks2_hdr *hdr,
1892 uint64_t dev_size, uint64_t data_shift, bool move_first_segment,
1893 crypt_reencrypt_direction_info di)
1896 uint64_t first_segment_offset, first_segment_length,
1897 second_segment_offset, second_segment_length,
1898 data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT,
1899 data_size = dev_size - data_shift;
1900 json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
1902 if (dev_size < data_shift)
1905 if (data_shift && (di == CRYPT_REENCRYPT_FORWARD))
1908 if (move_first_segment) {
1910 * future data_device layout:
1911 * [future LUKS2 header (data shift size)][second data segment][gap (data shift size)][first data segment (data shift size)]
1913 first_segment_offset = dev_size;
1914 if (data_size < data_shift) {
1915 first_segment_length = data_size;
1916 second_segment_length = second_segment_offset = 0;
1918 first_segment_length = data_shift;
1919 second_segment_offset = data_shift;
1920 second_segment_length = data_size - data_shift;
1922 } else if (data_shift) {
1923 first_segment_offset = data_offset;
1924 first_segment_length = dev_size;
1926 /* future data_device layout with detached header: [first data segment] */
1927 first_segment_offset = data_offset;
1928 first_segment_length = 0; /* dynamic */
1931 jobj_segments = json_object_new_object();
1936 if (move_first_segment) {
1937 jobj_segment_first = json_segment_create_linear(first_segment_offset, &first_segment_length, 0);
1938 if (second_segment_length &&
1939 !(jobj_segment_second = json_segment_create_linear(second_segment_offset, &second_segment_length, 0))) {
1940 log_dbg(cd, "Failed generate 2nd segment.");
1944 jobj_segment_first = json_segment_create_linear(first_segment_offset, first_segment_length ? &first_segment_length : NULL, 0);
1946 if (!jobj_segment_first) {
1947 log_dbg(cd, "Failed generate 1st segment.");
1951 json_object_object_add(jobj_segments, "0", jobj_segment_first);
1952 if (jobj_segment_second)
1953 json_object_object_add(jobj_segments, "1", jobj_segment_second);
1955 r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0);
1957 return r ?: LUKS2_segments_set(cd, hdr, jobj_segments, 0);
1960 static int reencrypt_set_decrypt_shift_segments(struct crypt_device *cd,
1961 struct luks2_hdr *hdr,
1963 uint64_t moved_segment_length,
1964 crypt_reencrypt_direction_info di)
1967 uint64_t first_segment_offset, first_segment_length,
1968 second_segment_offset, second_segment_length,
1969 data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
1970 json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
1972 if (di == CRYPT_REENCRYPT_BACKWARD)
1976 * future data_device layout:
1977 * [encrypted first segment (max data shift size)][gap (data shift size)][second encrypted data segment]
1979 first_segment_offset = 0;
1980 first_segment_length = moved_segment_length;
1981 if (dev_size > moved_segment_length) {
1982 second_segment_offset = data_offset + first_segment_length;
1983 second_segment_length = 0;
1986 jobj_segments = json_object_new_object();
1991 jobj_segment_first = json_segment_create_crypt(first_segment_offset,
1992 crypt_get_iv_offset(cd), &first_segment_length,
1993 crypt_get_cipher_spec(cd), crypt_get_sector_size(cd), 0);
1995 if (!jobj_segment_first) {
1996 log_dbg(cd, "Failed generate 1st segment.");
2000 if (dev_size > moved_segment_length) {
2001 jobj_segment_second = json_segment_create_crypt(second_segment_offset,
2002 crypt_get_iv_offset(cd) + (first_segment_length >> SECTOR_SHIFT),
2003 second_segment_length ? &second_segment_length : NULL,
2004 crypt_get_cipher_spec(cd),
2005 crypt_get_sector_size(cd), 0);
2006 if (!jobj_segment_second) {
2007 json_object_put(jobj_segment_first);
2008 log_dbg(cd, "Failed generate 2nd segment.");
2013 json_object_object_add(jobj_segments, "0", jobj_segment_first);
2014 if (jobj_segment_second)
2015 json_object_object_add(jobj_segments, "1", jobj_segment_second);
2017 r = LUKS2_segments_set(cd, hdr, jobj_segments, 0);
2019 return r ?: LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, 0, 1, 0);
2022 static int reencrypt_make_targets(struct crypt_device *cd,
2023 struct luks2_hdr *hdr,
2024 struct device *hz_device,
2025 struct volume_key *vks,
2026 struct dm_target *result,
2030 struct volume_key *vk;
2031 uint64_t segment_size, segment_offset, segment_start = 0;
2034 json_object *jobj, *jobj_segments = LUKS2_get_segments_jobj(hdr);
2037 jobj = json_segments_get_segment(jobj_segments, s);
2039 log_dbg(cd, "Internal error. Segment %u is null.", s);
2043 reenc_seg = (s == json_segments_segment_in_reencrypt(jobj_segments));
2045 segment_offset = json_segment_get_offset(jobj, 1);
2046 segment_size = json_segment_get_size(jobj, 1);
2047 /* 'dynamic' length allowed in last segment only */
2048 if (!segment_size && !result->next)
2049 segment_size = (size >> SECTOR_SHIFT) - segment_start;
2050 if (!segment_size) {
2051 log_dbg(cd, "Internal error. Wrong segment size %u", s);
2056 segment_offset -= crypt_get_data_offset(cd);
2058 if (!strcmp(json_segment_type(jobj), "crypt")) {
2059 vk = crypt_volume_key_by_id(vks, reenc_seg ? LUKS2_reencrypt_digest_new(hdr) : LUKS2_digest_by_segment(hdr, s));
2061 log_err(cd, _("Missing key for dm-crypt segment %u"), s);
2065 r = dm_crypt_target_set(result, segment_start, segment_size,
2066 reenc_seg ? hz_device : crypt_data_device(cd),
2068 json_segment_get_cipher(jobj),
2069 json_segment_get_iv_offset(jobj),
2073 json_segment_get_sector_size(jobj));
2075 log_err(cd, _("Failed to set dm-crypt segment."));
2078 } else if (!strcmp(json_segment_type(jobj), "linear")) {
2079 r = dm_linear_target_set(result, segment_start, segment_size, reenc_seg ? hz_device : crypt_data_device(cd), segment_offset);
2081 log_err(cd, _("Failed to set dm-linear segment."));
2087 segment_start += segment_size;
2089 result = result->next;
2095 /* GLOBAL FIXME: audit function names and parameters names */
2098 * 1) audit log routines
2099 * 2) can't we derive hotzone device name from crypt context? (unlocked name, device uuid, etc?)
2101 static int reencrypt_load_overlay_device(struct crypt_device *cd, struct luks2_hdr *hdr,
2102 const char *overlay, const char *hotzone, struct volume_key *vks, uint64_t size,
2105 char hz_path[PATH_MAX];
2108 struct device *hz_dev = NULL;
2109 struct crypt_dm_active_device dmd = {
2113 log_dbg(cd, "Loading new table for overlay device %s.", overlay);
2115 r = snprintf(hz_path, PATH_MAX, "%s/%s", dm_get_dir(), hotzone);
2116 if (r < 0 || r >= PATH_MAX) {
2121 r = device_alloc(cd, &hz_dev, hz_path);
2125 r = dm_targets_allocate(&dmd.segment, LUKS2_segments_count(hdr));
2129 r = reencrypt_make_targets(cd, hdr, hz_dev, vks, &dmd.segment, size);
2133 r = dm_reload_device(cd, overlay, &dmd, 0, 0);
2135 /* what else on error here ? */
2137 dm_targets_free(cd, &dmd);
2138 device_free(cd, hz_dev);
2143 static int reencrypt_replace_device(struct crypt_device *cd, const char *target, const char *source, uint32_t flags)
2146 struct crypt_dm_active_device dmd_source, dmd_target = {};
2147 uint32_t dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH;
2149 log_dbg(cd, "Replacing table in device %s with table from device %s.", target, source);
2151 /* check only whether target device exists */
2152 r = dm_status_device(cd, target);
2160 r = dm_query_device(cd, source, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
2161 DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY, &dmd_source);
2166 if (exists && ((r = dm_query_device(cd, target, 0, &dmd_target)) < 0))
2169 dmd_source.flags |= flags;
2170 dmd_source.uuid = crypt_get_uuid(cd);
2173 if (dmd_target.size != dmd_source.size) {
2174 log_err(cd, _("Source and target device sizes don't match. Source %" PRIu64 ", target: %" PRIu64 "."),
2175 dmd_source.size, dmd_target.size);
2179 r = dm_reload_device(cd, target, &dmd_source, 0, 0);
2181 log_dbg(cd, "Resuming device %s", target);
2182 r = dm_resume_device(cd, target, dmflags | act2dmflags(dmd_source.flags));
2185 r = dm_create_device(cd, target, CRYPT_SUBDEV, &dmd_source);
2187 dm_targets_free(cd, &dmd_source);
2188 dm_targets_free(cd, &dmd_target);
2193 static int reencrypt_swap_backing_device(struct crypt_device *cd, const char *name,
2194 const char *new_backend_name)
2197 struct device *overlay_dev = NULL;
2198 char overlay_path[PATH_MAX] = { 0 };
2199 struct crypt_dm_active_device dmd = {};
2201 log_dbg(cd, "Redirecting %s mapping to new backing device: %s.", name, new_backend_name);
2203 r = snprintf(overlay_path, PATH_MAX, "%s/%s", dm_get_dir(), new_backend_name);
2204 if (r < 0 || r >= PATH_MAX) {
2209 r = device_alloc(cd, &overlay_dev, overlay_path);
2213 r = device_block_adjust(cd, overlay_dev, DEV_OK,
2214 0, &dmd.size, &dmd.flags);
2218 r = dm_linear_target_set(&dmd.segment, 0, dmd.size, overlay_dev, 0);
2222 r = dm_reload_device(cd, name, &dmd, 0, 0);
2224 log_dbg(cd, "Resuming device %s", name);
2225 r = dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2229 dm_targets_free(cd, &dmd);
2230 device_free(cd, overlay_dev);
2235 static int reencrypt_activate_hotzone_device(struct crypt_device *cd, const char *name, uint64_t device_size, uint32_t flags)
2238 uint64_t new_offset = reencrypt_get_data_offset_new(crypt_get_hdr(cd, CRYPT_LUKS2)) >> SECTOR_SHIFT;
2240 struct crypt_dm_active_device dmd = {
2242 .uuid = crypt_get_uuid(cd),
2243 .size = device_size >> SECTOR_SHIFT
2246 log_dbg(cd, "Activating hotzone device %s.", name);
2248 r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
2249 new_offset, &dmd.size, &dmd.flags);
2253 r = dm_linear_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), new_offset);
2257 r = dm_create_device(cd, name, CRYPT_SUBDEV, &dmd);
2259 dm_targets_free(cd, &dmd);
2264 static int reencrypt_init_device_stack(struct crypt_device *cd,
2265 const struct luks2_reencrypt *rh)
2269 /* Activate hotzone device 1:1 linear mapping to data_device */
2270 r = reencrypt_activate_hotzone_device(cd, rh->hotzone_name, rh->device_size, CRYPT_ACTIVATE_PRIVATE);
2272 log_err(cd, _("Failed to activate hotzone device %s."), rh->hotzone_name);
2277 * Activate overlay device with exactly same table as original 'name' mapping.
2278 * Note that within this step the 'name' device may already include a table
2279 * constructed from more than single dm-crypt segment. Therefore transfer
2282 * If we're about to resume reencryption orig mapping has to be already validated for
2283 * abrupt shutdown and rchunk_offset has to point on next chunk to reencrypt!
2285 * TODO: in crypt_activate_by*
2287 r = reencrypt_replace_device(cd, rh->overlay_name, rh->device_name, CRYPT_ACTIVATE_PRIVATE);
2289 log_err(cd, _("Failed to activate overlay device %s with actual origin table."), rh->overlay_name);
2293 /* swap origin mapping to overlay device */
2294 r = reencrypt_swap_backing_device(cd, rh->device_name, rh->overlay_name);
2296 log_err(cd, _("Failed to load new mapping for device %s."), rh->device_name);
2301 * Now the 'name' (unlocked luks) device is mapped via dm-linear to an overlay dev.
2302 * The overlay device has a original live table of 'name' device in-before the swap.
2307 /* TODO: force error helper devices on error path */
2308 dm_remove_device(cd, rh->overlay_name, 0);
2309 dm_remove_device(cd, rh->hotzone_name, 0);
2315 * 1) audit error path. any error in this routine is fatal and should be unlikely.
2316 * usually it would hint some collision with another userspace process touching
2317 * dm devices directly.
2319 static int reenc_refresh_helper_devices(struct crypt_device *cd, const char *overlay, const char *hotzone)
2324 * we have to explicitly suspend the overlay device before suspending
2325 * the hotzone one. Resuming overlay device (aka switching tables) only
2326 * after suspending the hotzone may lead to deadlock.
2328 * In other words: always suspend the stack from top to bottom!
2330 r = dm_suspend_device(cd, overlay, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2332 log_err(cd, _("Failed to suspend device %s."), overlay);
2336 /* suspend HZ device */
2337 r = dm_suspend_device(cd, hotzone, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2339 log_err(cd, _("Failed to suspend device %s."), hotzone);
2343 /* resume overlay device: inactive table (with hotozne) -> live */
2344 r = dm_resume_device(cd, overlay, DM_RESUME_PRIVATE);
2346 log_err(cd, _("Failed to resume device %s."), overlay);
2351 static int reencrypt_refresh_overlay_devices(struct crypt_device *cd,
2352 struct luks2_hdr *hdr,
2353 const char *overlay,
2354 const char *hotzone,
2355 struct volume_key *vks,
2356 uint64_t device_size,
2359 int r = reencrypt_load_overlay_device(cd, hdr, overlay, hotzone, vks, device_size, flags);
2361 log_err(cd, _("Failed to reload device %s."), overlay);
2365 r = reenc_refresh_helper_devices(cd, overlay, hotzone);
2367 log_err(cd, _("Failed to refresh reencryption devices stack."));
2368 return REENC_ROLLBACK;
2374 static int reencrypt_move_data(struct crypt_device *cd,
2376 uint64_t data_shift,
2377 crypt_reencrypt_mode_info mode)
2382 uint64_t buffer_len, offset,
2383 read_offset = (mode == CRYPT_REENCRYPT_ENCRYPT ? 0 : data_shift);
2384 struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
2386 offset = json_segment_get_offset(LUKS2_get_segment_jobj(hdr, 0), 0);
2387 buffer_len = json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0);
2388 if (!buffer_len || buffer_len > data_shift)
2391 if (posix_memalign(&buffer, device_alignment(crypt_data_device(cd)), buffer_len))
2394 ret = read_lseek_blockwise(devfd,
2395 device_block_size(cd, crypt_data_device(cd)),
2396 device_alignment(crypt_data_device(cd)),
2397 buffer, buffer_len, read_offset);
2398 if (ret < 0 || (uint64_t)ret != buffer_len) {
2399 log_dbg(cd, "Failed to read data at offset %" PRIu64 " (size: %zu)",
2400 read_offset, buffer_len);
2405 log_dbg(cd, "Going to write %" PRIu64 " bytes read at offset %" PRIu64 " to new offset %" PRIu64,
2406 buffer_len, read_offset, offset);
2407 ret = write_lseek_blockwise(devfd,
2408 device_block_size(cd, crypt_data_device(cd)),
2409 device_alignment(crypt_data_device(cd)),
2410 buffer, buffer_len, offset);
2411 if (ret < 0 || (uint64_t)ret != buffer_len) {
2412 log_dbg(cd, "Failed to write data at offset %" PRIu64 " (size: %zu)",
2413 offset, buffer_len);
2420 crypt_safe_memzero(buffer, buffer_len);
2425 static int reencrypt_make_backup_segments(struct crypt_device *cd,
2426 struct luks2_hdr *hdr,
2429 uint64_t data_offset,
2430 const struct crypt_params_reencrypt *params)
2432 int r, segment, moved_segment = -1, digest_old = -1, digest_new = -1;
2433 json_object *jobj_tmp, *jobj_segment_new = NULL, *jobj_segment_old = NULL, *jobj_segment_bcp = NULL;
2434 uint32_t sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
2435 uint64_t segment_offset, tmp, data_shift = params->data_shift << SECTOR_SHIFT,
2436 device_size = params->device_size << SECTOR_SHIFT;
2438 if (params->mode != CRYPT_REENCRYPT_DECRYPT) {
2439 digest_new = LUKS2_digest_by_keyslot(hdr, keyslot_new);
2444 if (params->mode != CRYPT_REENCRYPT_ENCRYPT) {
2445 digest_old = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT);
2450 segment = LUKS2_segment_first_unused_id(hdr);
2454 if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) {
2455 if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_segment_bcp)) {
2459 r = LUKS2_segment_set_flag(jobj_segment_bcp, "backup-moved-segment");
2462 moved_segment = segment++;
2463 json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), moved_segment, jobj_segment_bcp);
2464 if (!strcmp(json_segment_type(jobj_segment_bcp), "crypt"))
2465 LUKS2_digest_segment_assign(cd, hdr, moved_segment, digest_old, 1, 0);
2468 /* FIXME: Add detection for case (digest old == digest new && old segment == new segment) */
2469 if (digest_old >= 0) {
2470 if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) {
2471 jobj_tmp = LUKS2_get_segment_jobj(hdr, 0);
2477 jobj_segment_old = json_segment_create_crypt(data_offset,
2478 json_segment_get_iv_offset(jobj_tmp),
2479 device_size ? &device_size : NULL,
2480 json_segment_get_cipher(jobj_tmp),
2481 json_segment_get_sector_size(jobj_tmp),
2484 if (json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old)) {
2489 } else if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
2490 r = LUKS2_get_data_size(hdr, &tmp, NULL);
2494 if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT)
2495 jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0);
2497 jobj_segment_old = json_segment_create_linear(data_offset, tmp ? &tmp : NULL, 0);
2500 if (!jobj_segment_old) {
2505 r = LUKS2_segment_set_flag(jobj_segment_old, "backup-previous");
2508 json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_old);
2509 jobj_segment_old = NULL;
2510 if (digest_old >= 0)
2511 LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0);
2514 if (digest_new >= 0) {
2515 segment_offset = data_offset;
2516 if (params->mode != CRYPT_REENCRYPT_ENCRYPT &&
2517 modify_offset(&segment_offset, data_shift, params->direction)) {
2521 jobj_segment_new = json_segment_create_crypt(segment_offset,
2522 crypt_get_iv_offset(cd),
2523 NULL, cipher, sector_size, 0);
2524 } else if (params->mode == CRYPT_REENCRYPT_DECRYPT) {
2525 segment_offset = data_offset;
2526 if (modify_offset(&segment_offset, data_shift, params->direction)) {
2530 jobj_segment_new = json_segment_create_linear(segment_offset, NULL, 0);
2533 if (!jobj_segment_new) {
2538 r = LUKS2_segment_set_flag(jobj_segment_new, "backup-final");
2541 json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_new);
2542 jobj_segment_new = NULL;
2543 if (digest_new >= 0)
2544 LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0);
2546 /* FIXME: also check occupied space by keyslot in shrunk area */
2547 if (params->direction == CRYPT_REENCRYPT_FORWARD && data_shift &&
2548 crypt_metadata_device(cd) == crypt_data_device(cd) &&
2549 LUKS2_set_keyslots_size(hdr, json_segment_get_offset(reencrypt_segment_new(hdr), 0))) {
2550 log_err(cd, _("Failed to set new keyslots area size."));
2557 json_object_put(jobj_segment_new);
2558 json_object_put(jobj_segment_old);
2562 static int reencrypt_verify_single_key(struct crypt_device *cd, int digest, struct volume_key *vks)
2564 struct volume_key *vk;
2566 vk = crypt_volume_key_by_id(vks, digest);
2570 if (LUKS2_digest_verify_by_digest(cd, digest, vk) != digest)
2576 static int reencrypt_verify_keys(struct crypt_device *cd,
2579 struct volume_key *vks)
2583 if (digest_new >= 0 && (r = reencrypt_verify_single_key(cd, digest_new, vks)))
2586 if (digest_old >= 0 && (r = reencrypt_verify_single_key(cd, digest_old, vks)))
2592 static int reencrypt_upload_single_key(struct crypt_device *cd,
2593 struct luks2_hdr *hdr,
2595 struct volume_key *vks)
2597 struct volume_key *vk;
2599 vk = crypt_volume_key_by_id(vks, digest);
2603 return LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, digest);
2606 static int reencrypt_upload_keys(struct crypt_device *cd,
2607 struct luks2_hdr *hdr,
2610 struct volume_key *vks)
2614 if (!crypt_use_keyring_for_vk(cd))
2617 if (digest_new >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_new(hdr)) &&
2618 (r = reencrypt_upload_single_key(cd, hdr, digest_new, vks)))
2621 if (digest_old >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr)) &&
2622 (r = reencrypt_upload_single_key(cd, hdr, digest_old, vks))) {
2623 crypt_drop_keyring_key(cd, vks);
2630 static int reencrypt_verify_and_upload_keys(struct crypt_device *cd,
2631 struct luks2_hdr *hdr,
2634 struct volume_key *vks)
2638 r = reencrypt_verify_keys(cd, digest_old, digest_new, vks);
2642 r = reencrypt_upload_keys(cd, hdr, digest_old, digest_new, vks);
2649 static int reencrypt_verify_checksum_params(struct crypt_device *cd,
2650 const struct crypt_params_reencrypt *params)
2653 struct crypt_hash *ch;
2660 len = strlen(params->hash);
2661 if (!len || len > (LUKS2_CHECKSUM_ALG_L - 1))
2664 if (crypt_hash_size(params->hash) <= 0)
2667 if (crypt_hash_init(&ch, params->hash)) {
2668 log_err(cd, _("Hash algorithm %s is not available."), params->hash);
2671 /* We just check for alg availability */
2672 crypt_hash_destroy(ch);
2677 static int reencrypt_verify_datashift_params(struct crypt_device *cd,
2678 const struct crypt_params_reencrypt *params,
2679 uint32_t sector_size)
2683 if (!params->data_shift)
2685 if (MISALIGNED(params->data_shift, sector_size >> SECTOR_SHIFT)) {
2686 log_err(cd, _("Data shift value is not aligned to encryption sector size (%" PRIu32 " bytes)."),
2694 static int reencrypt_verify_resilience_params(struct crypt_device *cd,
2695 const struct crypt_params_reencrypt *params,
2696 uint32_t sector_size, bool move_first_segment)
2698 /* no change requested */
2699 if (!params || !params->resilience)
2702 if (!strcmp(params->resilience, "journal"))
2703 return (params->data_shift || move_first_segment) ? -EINVAL : 0;
2704 else if (!strcmp(params->resilience, "none"))
2705 return (params->data_shift || move_first_segment) ? -EINVAL : 0;
2706 else if (!strcmp(params->resilience, "datashift"))
2707 return reencrypt_verify_datashift_params(cd, params, sector_size);
2708 else if (!strcmp(params->resilience, "checksum")) {
2709 if (params->data_shift || move_first_segment)
2711 return reencrypt_verify_checksum_params(cd, params);
2712 } else if (!strcmp(params->resilience, "datashift-checksum")) {
2713 if (!move_first_segment ||
2714 reencrypt_verify_datashift_params(cd, params, sector_size))
2716 return reencrypt_verify_checksum_params(cd, params);
2717 } else if (!strcmp(params->resilience, "datashift-journal")) {
2718 if (!move_first_segment)
2720 return reencrypt_verify_datashift_params(cd, params, sector_size);
2723 log_err(cd, _("Unsupported resilience mode %s"), params->resilience);
2727 static int reencrypt_decrypt_with_datashift_init(struct crypt_device *cd,
2729 struct luks2_hdr *hdr,
2730 int reencrypt_keyslot,
2731 uint32_t sector_size,
2733 uint64_t data_offset,
2734 const char *passphrase,
2735 size_t passphrase_size,
2737 const struct crypt_params_reencrypt *params,
2738 struct volume_key **vks)
2740 bool clear_table = false;
2742 uint64_t data_shift, max_moved_segment_length, moved_segment_length;
2743 struct reenc_protection check_rp = {};
2744 struct crypt_dm_active_device dmd_target, dmd_source = {
2745 .uuid = crypt_get_uuid(cd),
2746 .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
2748 json_object *jobj_segments_old;
2752 assert(params->resilience);
2753 assert(params->data_shift);
2759 if (params->max_hotzone_size > params->data_shift) {
2760 log_err(cd, _("Moved segment size can not be greater than data shift value."));
2764 log_dbg(cd, "Initializing decryption with datashift.");
2766 data_shift = params->data_shift << SECTOR_SHIFT;
2769 * In offline mode we must perform data move with exclusively opened data
2770 * device in order to exclude LUKS2 decryption process and filesystem mount.
2773 devfd = device_open(cd, crypt_data_device(cd), O_RDWR);
2775 devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
2779 /* in-memory only */
2780 moved_segment_length = params->max_hotzone_size << SECTOR_SHIFT;
2781 if (!moved_segment_length)
2782 moved_segment_length = data_shift < LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH ?
2783 data_shift : LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
2785 if (moved_segment_length > data_size)
2786 moved_segment_length = data_size;
2788 r = reencrypt_set_decrypt_shift_segments(cd, hdr, data_size,
2789 moved_segment_length,
2794 r = reencrypt_make_backup_segments(cd, hdr, CRYPT_ANY_SLOT, NULL, data_offset, params);
2796 log_dbg(cd, "Failed to create reencryption backup device segments.");
2800 r = reencrypt_verify_resilience_params(cd, params, sector_size, true);
2802 log_err(cd, _("Invalid reencryption resilience parameters."));
2806 r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot,
2807 params, reencrypt_get_alignment(cd, hdr));
2811 r = LUKS2_keyslot_reencrypt_load(cd, hdr, reencrypt_keyslot, &check_rp, false);
2815 r = LUKS2_reencrypt_max_hotzone_size(cd, hdr, &check_rp,
2817 &max_moved_segment_length);
2821 LUKS2_reencrypt_protection_erase(&check_rp);
2823 if (moved_segment_length > max_moved_segment_length) {
2824 log_err(cd, _("Moved segment too large. Requested size %" PRIu64 ", available space for: %" PRIu64 "."),
2825 moved_segment_length, max_moved_segment_length);
2830 r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, CRYPT_ANY_SLOT,
2831 passphrase, passphrase_size, vks);
2835 r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, LUKS2_DECRYPT_DATASHIFT_REQ_VERSION, *vks);
2840 r = reencrypt_verify_and_upload_keys(cd, hdr,
2841 LUKS2_reencrypt_digest_old(hdr),
2842 LUKS2_reencrypt_digest_new(hdr),
2847 r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
2848 DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
2849 DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
2853 jobj_segments_old = reencrypt_segments_old(hdr);
2854 if (!jobj_segments_old) {
2858 r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, jobj_segments_old, &dmd_source);
2860 r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
2862 log_err(cd, _("Mismatching parameters on device %s."), name);
2864 json_object_put(jobj_segments_old);
2866 dm_targets_free(cd, &dmd_source);
2867 dm_targets_free(cd, &dmd_target);
2868 free(CONST_CAST(void*)dmd_target.uuid);
2873 dmd_source.size = dmd_target.size;
2874 r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
2876 r = dm_reload_device(cd, name, &dmd_source, dmd_target.flags, 0);
2878 log_err(cd, _("Failed to reload device %s."), name);
2883 dm_targets_free(cd, &dmd_source);
2890 r = dm_suspend_device(cd, name, DM_SUSPEND_SKIP_LOCKFS);
2892 log_err(cd, _("Failed to suspend device %s."), name);
2897 if (reencrypt_move_data(cd, devfd, data_shift, params->mode)) {
2902 /* This must be first and only write in LUKS2 metadata during _reencrypt_init */
2903 r = reencrypt_update_flag(cd, LUKS2_DECRYPT_DATASHIFT_REQ_VERSION, true, true);
2905 log_dbg(cd, "Failed to set online-reencryption requirement.");
2908 r = reencrypt_keyslot;
2910 if (r < 0 && clear_table && dm_clear_device(cd, name))
2911 log_err(cd, _("Failed to clear table."));
2912 else if (clear_table && dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS))
2913 log_err(cd, _("Failed to resume device %s."), name);
2915 device_release_excl(cd, crypt_data_device(cd));
2916 if (r < 0 && LUKS2_hdr_rollback(cd, hdr) < 0)
2917 log_dbg(cd, "Failed to rollback LUKS2 metadata after failure.");
2922 /* This function must be called with metadata lock held */
2923 static int reencrypt_init(struct crypt_device *cd,
2925 struct luks2_hdr *hdr,
2926 const char *passphrase,
2927 size_t passphrase_size,
2931 const char *cipher_mode,
2932 const struct crypt_params_reencrypt *params,
2933 struct volume_key **vks)
2935 bool move_first_segment;
2937 uint32_t check_sector_size, new_sector_size, old_sector_size;
2938 int r, reencrypt_keyslot, devfd = -1;
2939 uint64_t data_offset, data_size = 0;
2940 struct crypt_dm_active_device dmd_target, dmd_source = {
2941 .uuid = crypt_get_uuid(cd),
2942 .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
2948 if (!params || !params->resilience || params->mode > CRYPT_REENCRYPT_DECRYPT)
2951 if (params->mode != CRYPT_REENCRYPT_DECRYPT &&
2952 (!params->luks2 || !(cipher && cipher_mode) || keyslot_new < 0))
2955 log_dbg(cd, "Initializing reencryption (mode: %s) in LUKS2 metadata.",
2956 crypt_reencrypt_mode_to_str(params->mode));
2958 move_first_segment = (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT);
2960 old_sector_size = LUKS2_get_sector_size(hdr);
2962 /* implicit sector size 512 for decryption */
2963 new_sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
2964 if (new_sector_size < SECTOR_SIZE || new_sector_size > MAX_SECTOR_SIZE ||
2965 NOTPOW2(new_sector_size)) {
2966 log_err(cd, _("Unsupported encryption sector size."));
2969 /* check the larger encryption sector size only */
2970 check_sector_size = new_sector_size > old_sector_size ? new_sector_size : old_sector_size;
2972 if (!cipher_mode || *cipher_mode == '\0')
2973 r = snprintf(_cipher, sizeof(_cipher), "%s", cipher);
2975 r = snprintf(_cipher, sizeof(_cipher), "%s-%s", cipher, cipher_mode);
2976 if (r < 0 || (size_t)r >= sizeof(_cipher))
2979 data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
2981 r = device_check_access(cd, crypt_data_device(cd), DEV_OK);
2985 r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
2989 r = device_size(crypt_data_device(cd), &data_size);
2993 data_size -= data_offset;
2995 if (params->device_size) {
2996 if ((params->device_size << SECTOR_SHIFT) > data_size) {
2997 log_err(cd, _("Reduced data size is larger than real device size."));
3000 data_size = params->device_size << SECTOR_SHIFT;
3003 if (MISALIGNED(data_size, check_sector_size)) {
3004 log_err(cd, _("Data device is not aligned to encryption sector size (%" PRIu32 " bytes)."), check_sector_size);
3008 reencrypt_keyslot = LUKS2_keyslot_find_empty(cd, hdr, 0);
3009 if (reencrypt_keyslot < 0) {
3010 log_err(cd, _("All key slots full."));
3014 if (params->mode == CRYPT_REENCRYPT_DECRYPT && (params->data_shift > 0) && move_first_segment)
3015 return reencrypt_decrypt_with_datashift_init(cd, name, hdr,
3028 * We must perform data move with exclusive open data device
3029 * to exclude another cryptsetup process to colide with
3030 * encryption initialization (or mount)
3032 if (move_first_segment) {
3033 if (data_size < (params->data_shift << SECTOR_SHIFT)) {
3034 log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
3037 if (params->data_shift < LUKS2_get_data_offset(hdr)) {
3038 log_err(cd, _("Data shift (%" PRIu64 " sectors) is less than future data offset (%" PRIu64 " sectors)."),
3039 params->data_shift, LUKS2_get_data_offset(hdr));
3042 devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
3044 if (devfd == -EBUSY)
3045 log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."),
3046 device_path(crypt_data_device(cd)));
3051 if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
3052 /* in-memory only */
3053 r = reencrypt_set_encrypt_segments(cd, hdr, data_size,
3054 params->data_shift << SECTOR_SHIFT,
3061 r = reencrypt_make_backup_segments(cd, hdr, keyslot_new, _cipher, data_offset, params);
3063 log_dbg(cd, "Failed to create reencryption backup device segments.");
3067 r = reencrypt_verify_resilience_params(cd, params, check_sector_size, move_first_segment);
3071 r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot, params,
3072 reencrypt_get_alignment(cd, hdr));
3076 r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
3080 r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, LUKS2_REENCRYPT_REQ_VERSION, *vks);
3084 if (name && params->mode != CRYPT_REENCRYPT_ENCRYPT) {
3085 r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
3089 r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
3090 DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
3091 DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
3095 r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
3097 r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
3099 log_err(cd, _("Mismatching parameters on device %s."), name);
3102 dm_targets_free(cd, &dmd_source);
3103 dm_targets_free(cd, &dmd_target);
3104 free(CONST_CAST(void*)dmd_target.uuid);
3110 if (move_first_segment && reencrypt_move_data(cd, devfd, params->data_shift << SECTOR_SHIFT, params->mode)) {
3115 /* This must be first and only write in LUKS2 metadata during _reencrypt_init */
3116 r = reencrypt_update_flag(cd, LUKS2_REENCRYPT_REQ_VERSION, true, true);
3118 log_dbg(cd, "Failed to set online-reencryption requirement.");
3121 r = reencrypt_keyslot;
3123 device_release_excl(cd, crypt_data_device(cd));
3124 if (r < 0 && LUKS2_hdr_rollback(cd, hdr) < 0)
3125 log_dbg(cd, "Failed to rollback LUKS2 metadata after failure.");
3130 static int reencrypt_hotzone_protect_final(struct crypt_device *cd,
3131 struct luks2_hdr *hdr, int reencrypt_keyslot,
3132 const struct reenc_protection *rp,
3133 const void *buffer, size_t buffer_len)
3135 const void *pbuffer;
3136 size_t data_offset, len;
3142 if (rp->type == REENC_PROTECTION_NONE)
3145 if (rp->type == REENC_PROTECTION_CHECKSUM) {
3146 log_dbg(cd, "Checksums hotzone resilience.");
3148 for (data_offset = 0, len = 0; data_offset < buffer_len; data_offset += rp->p.csum.block_size, len += rp->p.csum.hash_size) {
3149 if (crypt_hash_write(rp->p.csum.ch, (const char *)buffer + data_offset, rp->p.csum.block_size)) {
3150 log_dbg(cd, "Failed to hash sector at offset %zu.", data_offset);
3153 if (crypt_hash_final(rp->p.csum.ch, (char *)rp->p.csum.checksums + len, rp->p.csum.hash_size)) {
3154 log_dbg(cd, "Failed to finalize hash.");
3158 pbuffer = rp->p.csum.checksums;
3159 } else if (rp->type == REENC_PROTECTION_JOURNAL) {
3160 log_dbg(cd, "Journal hotzone resilience.");
3163 } else if (rp->type == REENC_PROTECTION_DATASHIFT) {
3164 log_dbg(cd, "Data shift hotzone resilience.");
3165 return LUKS2_hdr_write(cd, hdr);
3169 log_dbg(cd, "Going to store %zu bytes in reencrypt keyslot.", len);
3171 r = LUKS2_keyslot_reencrypt_store(cd, hdr, reencrypt_keyslot, pbuffer, len);
3173 return r > 0 ? 0 : r;
3176 static int reencrypt_context_update(struct crypt_device *cd,
3177 struct luks2_reencrypt *rh)
3182 if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
3183 if (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
3185 rh->offset -= data_shift_value(&rh->rp);
3186 if (rh->offset && (rh->offset < data_shift_value(&rh->rp))) {
3187 rh->length = rh->offset;
3188 rh->offset = data_shift_value(&rh->rp);
3191 rh->length = data_shift_value(&rh->rp);
3193 if (rh->offset < rh->length)
3194 rh->length = rh->offset;
3195 rh->offset -= rh->length;
3197 } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
3198 rh->offset += (uint64_t)rh->read;
3199 if (rh->device_size == rh->offset &&
3200 rh->jobj_segment_moved &&
3201 rh->mode == CRYPT_REENCRYPT_DECRYPT &&
3202 rh->rp.type == REENC_PROTECTION_DATASHIFT) {
3204 rh->length = json_segment_get_size(rh->jobj_segment_moved, 0);
3206 /* it fails in-case of device_size < rh->offset later */
3207 else if (rh->device_size - rh->offset < rh->length)
3208 rh->length = rh->device_size - rh->offset;
3212 if (rh->device_size < rh->offset) {
3213 log_dbg(cd, "Calculated reencryption offset %" PRIu64 " is beyond device size %" PRIu64 ".", rh->offset, rh->device_size);
3217 rh->progress += (uint64_t)rh->read;
3222 static int reencrypt_load(struct crypt_device *cd, struct luks2_hdr *hdr,
3223 uint64_t device_size,
3224 uint64_t max_hotzone_size,
3225 uint64_t required_device_size,
3226 struct volume_key *vks,
3227 struct luks2_reencrypt **rh)
3230 struct luks2_reencrypt *tmp = NULL;
3231 crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
3233 if (ri == CRYPT_REENCRYPT_NONE) {
3234 log_err(cd, _("Device not marked for LUKS2 reencryption."));
3236 } else if (ri == CRYPT_REENCRYPT_INVALID)
3239 r = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
3243 if (ri == CRYPT_REENCRYPT_CLEAN)
3244 r = reencrypt_load_clean(cd, hdr, device_size, max_hotzone_size, required_device_size, &tmp);
3245 else if (ri == CRYPT_REENCRYPT_CRASH)
3246 r = reencrypt_load_crashed(cd, hdr, device_size, &tmp);
3250 if (r < 0 || !tmp) {
3251 log_err(cd, _("Failed to load LUKS2 reencryption context."));
3260 static int reencrypt_lock_internal(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock)
3263 char *lock_resource;
3265 if (!crypt_metadata_locking_enabled()) {
3266 *reencrypt_lock = NULL;
3270 r = asprintf(&lock_resource, "LUKS2-reencryption-%s", uuid);
3274 free(lock_resource);
3278 r = crypt_write_lock(cd, lock_resource, false, reencrypt_lock);
3280 free(lock_resource);
3286 int LUKS2_reencrypt_lock_by_dm_uuid(struct crypt_device *cd, const char *dm_uuid,
3287 struct crypt_lock_handle **reencrypt_lock)
3291 const char *uuid = crypt_get_uuid(cd);
3297 r = snprintf(hdr_uuid, sizeof(hdr_uuid), "%.8s-%.4s-%.4s-%.4s-%.12s",
3298 dm_uuid + 6, dm_uuid + 14, dm_uuid + 18, dm_uuid + 22, dm_uuid + 26);
3299 if (r < 0 || (size_t)r != (sizeof(hdr_uuid) - 1))
3301 } else if (crypt_uuid_cmp(dm_uuid, uuid))
3304 return reencrypt_lock_internal(cd, uuid, reencrypt_lock);
3308 int LUKS2_reencrypt_lock(struct crypt_device *cd, struct crypt_lock_handle **reencrypt_lock)
3310 if (!cd || !crypt_get_type(cd) || strcmp(crypt_get_type(cd), CRYPT_LUKS2))
3313 return reencrypt_lock_internal(cd, crypt_get_uuid(cd), reencrypt_lock);
3317 void LUKS2_reencrypt_unlock(struct crypt_device *cd, struct crypt_lock_handle *reencrypt_lock)
3319 crypt_unlock_internal(cd, reencrypt_lock);
3321 #if USE_LUKS2_REENCRYPTION
3322 static int reencrypt_lock_and_verify(struct crypt_device *cd, struct luks2_hdr *hdr,
3323 struct crypt_lock_handle **reencrypt_lock)
3326 crypt_reencrypt_info ri;
3327 struct crypt_lock_handle *h;
3329 ri = LUKS2_reencrypt_status(hdr);
3330 if (ri == CRYPT_REENCRYPT_INVALID) {
3331 log_err(cd, _("Failed to get reencryption state."));
3334 if (ri < CRYPT_REENCRYPT_CLEAN) {
3335 log_err(cd, _("Device is not in reencryption."));
3339 r = LUKS2_reencrypt_lock(cd, &h);
3342 log_err(cd, _("Reencryption process is already running."));
3344 log_err(cd, _("Failed to acquire reencryption lock."));
3348 /* With reencryption lock held, reload device context and verify metadata state */
3349 r = crypt_load(cd, CRYPT_LUKS2, NULL);
3351 LUKS2_reencrypt_unlock(cd, h);
3355 ri = LUKS2_reencrypt_status(hdr);
3356 if (ri == CRYPT_REENCRYPT_CLEAN) {
3357 *reencrypt_lock = h;
3361 LUKS2_reencrypt_unlock(cd, h);
3362 log_err(cd, _("Cannot proceed with reencryption. Run reencryption recovery first."));
3366 static int reencrypt_load_by_passphrase(struct crypt_device *cd,
3368 const char *passphrase,
3369 size_t passphrase_size,
3372 struct volume_key **vks,
3373 const struct crypt_params_reencrypt *params)
3375 int r, reencrypt_slot;
3376 struct luks2_hdr *hdr;
3377 struct crypt_lock_handle *reencrypt_lock;
3378 struct luks2_reencrypt *rh;
3379 const struct volume_key *vk;
3381 uint32_t old_sector_size, new_sector_size, sector_size;
3382 struct crypt_dm_active_device dmd_target, dmd_source = {
3383 .uuid = crypt_get_uuid(cd),
3384 .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
3386 uint64_t minimal_size, device_size, mapping_size = 0, required_size = 0,
3387 max_hotzone_size = 0;
3393 hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3397 log_dbg(cd, "Loading LUKS2 reencryption context.");
3399 old_sector_size = reencrypt_get_sector_size_old(hdr);
3400 new_sector_size = reencrypt_get_sector_size_new(hdr);
3401 sector_size = new_sector_size > old_sector_size ? new_sector_size : old_sector_size;
3403 r = reencrypt_verify_resilience_params(cd, params, sector_size,
3404 LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0);
3409 required_size = params->device_size;
3410 max_hotzone_size = params->max_hotzone_size;
3413 rh = crypt_get_luks2_reencrypt(cd);
3415 LUKS2_reencrypt_free(cd, rh);
3416 crypt_set_luks2_reencrypt(cd, NULL);
3420 r = reencrypt_lock_and_verify(cd, hdr, &reencrypt_lock);
3424 reencrypt_slot = LUKS2_find_keyslot(hdr, "reencrypt");
3425 if (reencrypt_slot < 0) {
3430 /* From now on we hold reencryption lock */
3432 if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic)) {
3437 /* some configurations provides fixed device size */
3438 r = LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, false, dynamic);
3444 minimal_size >>= SECTOR_SHIFT;
3446 r = reencrypt_verify_keys(cd, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
3448 log_dbg(cd, "Keys are not ready. Unlocking all volume keys.");
3449 r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
3456 r = reencrypt_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
3460 r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
3461 DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
3462 DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
3465 flags = dmd_target.flags;
3468 * By default reencryption code aims to retain flags from existing dm device.
3469 * The keyring activation flag can not be inherited if original cipher is null.
3471 * In this case override the flag based on decision made in reencrypt_upload_keys
3472 * above. The code checks if new VK is eligible for keyring.
3474 vk = crypt_volume_key_by_id(*vks, LUKS2_reencrypt_digest_new(hdr));
3475 if (vk && vk->key_description && crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr))) {
3476 flags |= CRYPT_ACTIVATE_KEYRING_KEY;
3477 dmd_source.flags |= CRYPT_ACTIVATE_KEYRING_KEY;
3480 r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
3482 r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
3484 log_err(cd, _("Mismatching parameters on device %s."), name);
3487 dm_targets_free(cd, &dmd_source);
3488 dm_targets_free(cd, &dmd_target);
3489 free(CONST_CAST(void*)dmd_target.uuid);
3492 mapping_size = dmd_target.size;
3496 if (required_size && mapping_size && (required_size != mapping_size)) {
3497 log_err(cd, _("Active device size and requested reencryption size don't match."));
3502 required_size = mapping_size;
3504 if (required_size) {
3505 /* TODO: Add support for changing fixed minimal size in reencryption mda where possible */
3506 if ((minimal_size && (required_size < minimal_size)) ||
3507 (required_size > (device_size >> SECTOR_SHIFT)) ||
3508 (!dynamic && (required_size != minimal_size)) ||
3509 (old_sector_size > 0 && MISALIGNED(required_size, old_sector_size >> SECTOR_SHIFT)) ||
3510 (new_sector_size > 0 && MISALIGNED(required_size, new_sector_size >> SECTOR_SHIFT))) {
3511 log_err(cd, _("Illegal device size requested in reencryption parameters."));
3516 alignment = reencrypt_get_alignment(cd, hdr);
3518 r = LUKS2_keyslot_reencrypt_update_needed(cd, hdr, reencrypt_slot, params, alignment);
3519 if (r > 0) /* metadata update needed */
3520 r = LUKS2_keyslot_reencrypt_update(cd, hdr, reencrypt_slot, params, alignment, *vks);
3524 r = reencrypt_load(cd, hdr, device_size, max_hotzone_size, required_size, *vks, &rh);
3528 if (name && (r = reencrypt_context_set_names(rh, name)))
3531 /* Reassure device is not mounted and there's no dm mapping active */
3532 if (!name && (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0)) {
3533 log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
3537 device_release_excl(cd, crypt_data_device(cd));
3539 /* There's a race for dm device activation not managed by cryptsetup.
3542 * 2) rogue dm device activation
3543 * 3) one or more dm-crypt based wrapper activation
3544 * 4) next excl open gets skipped due to 3) device from 2) remains undetected.
3546 r = reencrypt_init_storage_wrappers(cd, hdr, rh, *vks);
3550 /* If one of wrappers is based on dmcrypt fallback it already blocked mount */
3551 if (!name && crypt_storage_wrapper_get_type(rh->cw1) != DMCRYPT &&
3552 crypt_storage_wrapper_get_type(rh->cw2) != DMCRYPT) {
3553 if (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0) {
3554 log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
3562 MOVE_REF(rh->vks, *vks);
3563 MOVE_REF(rh->reenc_lock, reencrypt_lock);
3565 crypt_set_luks2_reencrypt(cd, rh);
3569 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3570 LUKS2_reencrypt_free(cd, rh);
3574 static int reencrypt_recovery_by_passphrase(struct crypt_device *cd,
3575 struct luks2_hdr *hdr,
3578 const char *passphrase,
3579 size_t passphrase_size)
3582 crypt_reencrypt_info ri;
3583 struct crypt_lock_handle *reencrypt_lock;
3585 r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
3588 log_err(cd, _("Reencryption in-progress. Cannot perform recovery."));
3590 log_err(cd, _("Failed to get reencryption lock."));
3594 if ((r = crypt_load(cd, CRYPT_LUKS2, NULL))) {
3595 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3599 ri = LUKS2_reencrypt_status(hdr);
3600 if (ri == CRYPT_REENCRYPT_INVALID) {
3601 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3605 if (ri == CRYPT_REENCRYPT_CRASH) {
3606 r = LUKS2_reencrypt_locked_recovery_by_passphrase(cd, keyslot_old, keyslot_new,
3607 passphrase, passphrase_size, NULL);
3609 log_err(cd, _("LUKS2 reencryption recovery failed."));
3611 log_dbg(cd, "No LUKS2 reencryption recovery needed.");
3615 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3619 static int reencrypt_repair_by_passphrase(
3620 struct crypt_device *cd,
3621 struct luks2_hdr *hdr,
3624 const char *passphrase,
3625 size_t passphrase_size)
3628 struct crypt_lock_handle *reencrypt_lock;
3629 struct luks2_reencrypt *rh;
3630 crypt_reencrypt_info ri;
3631 uint8_t requirement_version;
3632 const char *resilience;
3633 struct volume_key *vks = NULL;
3635 log_dbg(cd, "Loading LUKS2 reencryption context for metadata repair.");
3637 rh = crypt_get_luks2_reencrypt(cd);
3639 LUKS2_reencrypt_free(cd, rh);
3640 crypt_set_luks2_reencrypt(cd, NULL);
3644 ri = LUKS2_reencrypt_status(hdr);
3645 if (ri == CRYPT_REENCRYPT_INVALID)
3648 if (ri < CRYPT_REENCRYPT_CLEAN) {
3649 log_err(cd, _("Device is not in reencryption."));
3653 r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
3656 log_err(cd, _("Reencryption process is already running."));
3658 log_err(cd, _("Failed to acquire reencryption lock."));
3662 /* With reencryption lock held, reload device context and verify metadata state */
3663 r = crypt_load(cd, CRYPT_LUKS2, NULL);
3667 ri = LUKS2_reencrypt_status(hdr);
3668 if (ri == CRYPT_REENCRYPT_INVALID) {
3672 if (ri == CRYPT_REENCRYPT_NONE) {
3677 resilience = reencrypt_resilience_type(hdr);
3683 if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT &&
3684 !strncmp(resilience, "datashift-", 10) &&
3685 LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
3686 requirement_version = LUKS2_DECRYPT_DATASHIFT_REQ_VERSION;
3688 requirement_version = LUKS2_REENCRYPT_REQ_VERSION;
3690 r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, &vks);
3694 r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, requirement_version, vks);
3695 crypt_free_volume_key(vks);
3700 /* replaces old online-reencrypt flag with updated version and commits metadata */
3701 r = reencrypt_update_flag(cd, requirement_version, true, true);
3703 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3704 crypt_free_volume_key(vks);
3709 static int reencrypt_init_by_passphrase(struct crypt_device *cd,
3711 const char *passphrase,
3712 size_t passphrase_size,
3716 const char *cipher_mode,
3717 const struct crypt_params_reencrypt *params)
3719 #if USE_LUKS2_REENCRYPTION
3721 crypt_reencrypt_info ri;
3722 struct volume_key *vks = NULL;
3723 uint32_t flags = params ? params->flags : 0;
3724 struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3726 /* short-circuit in reencryption metadata update and finish immediately. */
3727 if (flags & CRYPT_REENCRYPT_REPAIR_NEEDED)
3728 return reencrypt_repair_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
3730 /* short-circuit in recovery and finish immediately. */
3731 if (flags & CRYPT_REENCRYPT_RECOVERY)
3732 return reencrypt_recovery_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
3734 if (cipher && !crypt_cipher_wrapped_key(cipher, cipher_mode)) {
3735 r = crypt_keyslot_get_key_size(cd, keyslot_new);
3738 r = LUKS2_check_cipher(cd, r, cipher, cipher_mode);
3740 log_err(cd, _("Unable to use cipher specification %s-%s for LUKS2."), cipher, cipher_mode);
3745 r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
3749 ri = LUKS2_reencrypt_status(hdr);
3750 if (ri == CRYPT_REENCRYPT_INVALID) {
3751 device_write_unlock(cd, crypt_metadata_device(cd));
3755 if ((ri > CRYPT_REENCRYPT_NONE) && (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY)) {
3756 device_write_unlock(cd, crypt_metadata_device(cd));
3757 log_err(cd, _("LUKS2 reencryption already initialized in metadata."));
3761 if (ri == CRYPT_REENCRYPT_NONE && !(flags & CRYPT_REENCRYPT_RESUME_ONLY)) {
3762 r = reencrypt_init(cd, name, hdr, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params, &vks);
3764 log_err(cd, _("Failed to initialize LUKS2 reencryption in metadata."));
3765 } else if (ri > CRYPT_REENCRYPT_NONE) {
3766 log_dbg(cd, "LUKS2 reencryption already initialized.");
3770 device_write_unlock(cd, crypt_metadata_device(cd));
3772 if (r < 0 || (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY))
3775 r = reencrypt_load_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, &vks, params);
3778 crypt_drop_keyring_key(cd, vks);
3779 crypt_free_volume_key(vks);
3780 return r < 0 ? r : LUKS2_find_keyslot(hdr, "reencrypt");
3782 log_err(cd, _("This operation is not supported for this device type."));
3787 int crypt_reencrypt_init_by_keyring(struct crypt_device *cd,
3789 const char *passphrase_description,
3793 const char *cipher_mode,
3794 const struct crypt_params_reencrypt *params)
3798 size_t passphrase_size;
3800 if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase_description)
3802 if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
3805 r = keyring_get_passphrase(passphrase_description, &passphrase, &passphrase_size);
3807 log_err(cd, _("Failed to read passphrase from keyring (error %d)."), r);
3811 r = reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
3813 crypt_safe_memzero(passphrase, passphrase_size);
3819 int crypt_reencrypt_init_by_passphrase(struct crypt_device *cd,
3821 const char *passphrase,
3822 size_t passphrase_size,
3826 const char *cipher_mode,
3827 const struct crypt_params_reencrypt *params)
3829 if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase)
3831 if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
3834 return reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
3837 #if USE_LUKS2_REENCRYPTION
3838 static reenc_status_t reencrypt_step(struct crypt_device *cd,
3839 struct luks2_hdr *hdr,
3840 struct luks2_reencrypt *rh,
3841 uint64_t device_size,
3845 struct reenc_protection *rp;
3852 /* in memory only */
3853 r = reencrypt_make_segments(cd, hdr, rh, device_size);
3857 r = reencrypt_assign_segments(cd, hdr, rh, 1, 0);
3859 log_err(cd, _("Failed to set device segments for next reencryption hotzone."));
3863 log_dbg(cd, "Reencrypting chunk starting at offset: %" PRIu64 ", size :%" PRIu64 ".", rh->offset, rh->length);
3864 log_dbg(cd, "data_offset: %" PRIu64, crypt_get_data_offset(cd) << SECTOR_SHIFT);
3866 if (!rh->offset && rp->type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
3867 crypt_storage_wrapper_destroy(rh->cw1);
3868 log_dbg(cd, "Reinitializing old segment storage wrapper for moved segment.");
3869 r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
3870 LUKS2_reencrypt_get_data_offset_moved(hdr),
3871 crypt_get_iv_offset(cd),
3872 reencrypt_get_sector_size_old(hdr),
3873 reencrypt_segment_cipher_old(hdr),
3874 crypt_volume_key_by_id(rh->vks, rh->digest_old),
3877 log_err(cd, _("Failed to initialize old segment storage wrapper."));
3878 return REENC_ROLLBACK;
3881 if (rh->rp_moved_segment.type != REENC_PROTECTION_NOT_SET) {
3882 log_dbg(cd, "Switching to moved segment resilience type.");
3883 rp = &rh->rp_moved_segment;
3887 r = reencrypt_hotzone_protect_ready(cd, rp);
3889 log_err(cd, _("Failed to initialize hotzone protection."));
3890 return REENC_ROLLBACK;
3894 r = reencrypt_refresh_overlay_devices(cd, hdr, rh->overlay_name, rh->hotzone_name, rh->vks, rh->device_size, rh->flags);
3895 /* Teardown overlay devices with dm-error. None bio shall pass! */
3900 rh->read = crypt_storage_wrapper_read(rh->cw1, rh->offset, rh->reenc_buffer, rh->length);
3902 /* severity normal */
3903 log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset);
3904 return REENC_ROLLBACK;
3907 /* metadata commit point */
3908 r = reencrypt_hotzone_protect_final(cd, hdr, rh->reenc_keyslot, rp, rh->reenc_buffer, rh->read);
3910 /* severity normal */
3911 log_err(cd, _("Failed to write reencryption resilience metadata."));
3912 return REENC_ROLLBACK;
3915 r = crypt_storage_wrapper_decrypt(rh->cw1, rh->offset, rh->reenc_buffer, rh->read);
3917 /* severity normal */
3918 log_err(cd, _("Decryption failed."));
3919 return REENC_ROLLBACK;
3921 if (rh->read != crypt_storage_wrapper_encrypt_write(rh->cw2, rh->offset, rh->reenc_buffer, rh->read)) {
3922 /* severity fatal */
3923 log_err(cd, _("Failed to write hotzone area starting at %" PRIu64 "."), rh->offset);
3927 if (rp->type != REENC_PROTECTION_NONE && crypt_storage_wrapper_datasync(rh->cw2)) {
3928 log_err(cd, _("Failed to sync data."));
3932 /* metadata commit safe point */
3933 r = reencrypt_assign_segments(cd, hdr, rh, 0, rp->type != REENC_PROTECTION_NONE);
3935 /* severity fatal */
3936 log_err(cd, _("Failed to update metadata after current reencryption hotzone completed."));
3941 /* severity normal */
3942 log_dbg(cd, "Resuming device %s", rh->hotzone_name);
3943 r = dm_resume_device(cd, rh->hotzone_name, DM_RESUME_PRIVATE);
3945 log_err(cd, _("Failed to resume device %s."), rh->hotzone_name);
3953 static int reencrypt_erase_backup_segments(struct crypt_device *cd,
3954 struct luks2_hdr *hdr)
3956 int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
3958 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3960 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3962 segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
3964 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3966 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3968 segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
3970 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3972 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3978 static int reencrypt_wipe_unused_device_area(struct crypt_device *cd, struct luks2_reencrypt *rh)
3980 uint64_t offset, length, dev_size;
3986 if (rh->jobj_segment_moved && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
3987 offset = json_segment_get_offset(rh->jobj_segment_moved, 0);
3988 length = json_segment_get_size(rh->jobj_segment_moved, 0);
3989 log_dbg(cd, "Wiping %" PRIu64 " bytes of backup segment data at offset %" PRIu64,
3991 r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
3992 offset, length, 1024 * 1024, NULL, NULL);
3998 if (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->direction == CRYPT_REENCRYPT_FORWARD) {
3999 r = device_size(crypt_data_device(cd), &dev_size);
4003 if (dev_size < data_shift_value(&rh->rp))
4006 offset = dev_size - data_shift_value(&rh->rp);
4007 length = data_shift_value(&rh->rp);
4008 log_dbg(cd, "Wiping %" PRIu64 " bytes of data at offset %" PRIu64,
4010 r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
4011 offset, length, 1024 * 1024, NULL, NULL);
4017 static int reencrypt_teardown_ok(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
4021 bool finished = !(rh->device_size > rh->progress);
4023 if (rh->rp.type == REENC_PROTECTION_NONE &&
4024 LUKS2_hdr_write(cd, hdr)) {
4025 log_err(cd, _("Failed to write LUKS2 metadata."));
4030 r = LUKS2_reload(cd, rh->device_name, rh->vks, rh->device_size, rh->flags);
4032 log_err(cd, _("Failed to reload device %s."), rh->device_name);
4034 r = dm_resume_device(cd, rh->device_name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
4036 log_err(cd, _("Failed to resume device %s."), rh->device_name);
4038 dm_remove_device(cd, rh->overlay_name, 0);
4039 dm_remove_device(cd, rh->hotzone_name, 0);
4041 if (!r && finished && rh->mode == CRYPT_REENCRYPT_DECRYPT &&
4042 !dm_flags(cd, DM_LINEAR, &dmt_flags) && (dmt_flags & DM_DEFERRED_SUPPORTED))
4043 dm_remove_device(cd, rh->device_name, CRYPT_DEACTIVATE_DEFERRED);
4047 if (reencrypt_wipe_unused_device_area(cd, rh))
4048 log_err(cd, _("Failed to wipe unused data device area."));
4049 if (reencrypt_get_data_offset_new(hdr) && LUKS2_set_keyslots_size(hdr, reencrypt_get_data_offset_new(hdr)))
4050 log_dbg(cd, "Failed to set new keyslots area size.");
4051 if (rh->digest_old >= 0 && rh->digest_new != rh->digest_old)
4052 for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++)
4053 if (LUKS2_digest_by_keyslot(hdr, i) == rh->digest_old && crypt_keyslot_destroy(cd, i))
4054 log_err(cd, _("Failed to remove unused (unbound) keyslot %d."), i);
4056 if (reencrypt_erase_backup_segments(cd, hdr))
4057 log_dbg(cd, "Failed to erase backup segments");
4059 if (reencrypt_update_flag(cd, 0, false, false))
4060 log_dbg(cd, "Failed to disable reencryption requirement flag.");
4062 /* metadata commit point also removing reencryption flag on-disk */
4063 if (crypt_keyslot_destroy(cd, rh->reenc_keyslot)) {
4064 log_err(cd, _("Failed to remove reencryption keyslot."));
4072 static void reencrypt_teardown_fatal(struct crypt_device *cd, struct luks2_reencrypt *rh)
4074 log_err(cd, _("Fatal error while reencrypting chunk starting at %" PRIu64 ", %" PRIu64 " sectors long."),
4075 (rh->offset >> SECTOR_SHIFT) + crypt_get_data_offset(cd), rh->length >> SECTOR_SHIFT);
4078 log_err(cd, _("Online reencryption failed."));
4079 if (dm_status_suspended(cd, rh->hotzone_name) > 0) {
4080 log_dbg(cd, "Hotzone device %s suspended, replacing with dm-error.", rh->hotzone_name);
4081 if (dm_error_device(cd, rh->hotzone_name)) {
4082 log_err(cd, _("Failed to replace suspended device %s with dm-error target."), rh->hotzone_name);
4083 log_err(cd, _("Do not resume the device unless replaced with error target manually."));
4089 static int reencrypt_teardown(struct crypt_device *cd, struct luks2_hdr *hdr,
4090 struct luks2_reencrypt *rh, reenc_status_t rs, bool interrupted,
4091 int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
4098 if (progress && !interrupted)
4099 progress(rh->device_size, rh->progress, usrptr);
4100 r = reencrypt_teardown_ok(cd, hdr, rh);
4103 reencrypt_teardown_fatal(cd, rh);
4109 /* this frees reencryption lock */
4110 LUKS2_reencrypt_free(cd, rh);
4111 crypt_set_luks2_reencrypt(cd, NULL);
4117 int crypt_reencrypt_run(
4118 struct crypt_device *cd,
4119 int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
4122 #if USE_LUKS2_REENCRYPTION
4124 crypt_reencrypt_info ri;
4125 struct luks2_hdr *hdr;
4126 struct luks2_reencrypt *rh;
4130 if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
4133 hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
4135 ri = LUKS2_reencrypt_status(hdr);
4136 if (ri > CRYPT_REENCRYPT_CLEAN) {
4137 log_err(cd, _("Cannot proceed with reencryption. Unexpected reencryption status."));
4141 rh = crypt_get_luks2_reencrypt(cd);
4142 if (!rh || (!rh->reenc_lock && crypt_metadata_locking_enabled())) {
4143 log_err(cd, _("Missing or invalid reencrypt context."));
4147 log_dbg(cd, "Resuming LUKS2 reencryption.");
4149 if (rh->online && reencrypt_init_device_stack(cd, rh)) {
4150 log_err(cd, _("Failed to initialize reencryption device stack."));
4154 log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
4158 if (progress && progress(rh->device_size, rh->progress, usrptr))
4161 while (!quit && (rh->device_size > rh->progress)) {
4162 rs = reencrypt_step(cd, hdr, rh, rh->device_size, rh->online);
4166 log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
4167 if (progress && progress(rh->device_size, rh->progress, usrptr))
4170 r = reencrypt_context_update(cd, rh);
4172 log_err(cd, _("Failed to update reencryption context."));
4177 log_dbg(cd, "Next reencryption offset will be %" PRIu64 " sectors.", rh->offset);
4178 log_dbg(cd, "Next reencryption chunk size will be %" PRIu64 " sectors).", rh->length);
4181 r = reencrypt_teardown(cd, hdr, rh, rs, quit, progress, usrptr);
4184 log_err(cd, _("This operation is not supported for this device type."));
4189 int crypt_reencrypt(
4190 struct crypt_device *cd,
4191 int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
4193 return crypt_reencrypt_run(cd, progress, NULL);
4195 #if USE_LUKS2_REENCRYPTION
4196 static int reencrypt_recovery(struct crypt_device *cd,
4197 struct luks2_hdr *hdr,
4198 uint64_t device_size,
4199 struct volume_key *vks)
4202 struct luks2_reencrypt *rh = NULL;
4204 r = reencrypt_load(cd, hdr, device_size, 0, 0, vks, &rh);
4206 log_err(cd, _("Failed to load LUKS2 reencryption context."));
4210 r = reencrypt_recover_segment(cd, hdr, rh, vks);
4214 if ((r = reencrypt_assign_segments(cd, hdr, rh, 0, 0)))
4217 r = reencrypt_context_update(cd, rh);
4219 log_err(cd, _("Failed to update reencryption context."));
4223 r = reencrypt_teardown_ok(cd, hdr, rh);
4225 r = LUKS2_hdr_write(cd, hdr);
4227 LUKS2_reencrypt_free(cd, rh);
4233 * use only for calculation of minimal data device size.
4234 * The real data offset is taken directly from segments!
4236 int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise)
4238 crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
4239 uint64_t data_offset = LUKS2_get_data_offset(hdr);
4241 if (ri == CRYPT_REENCRYPT_CLEAN && reencrypt_direction(hdr) == CRYPT_REENCRYPT_FORWARD)
4242 data_offset += reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
4244 return blockwise ? data_offset : data_offset << SECTOR_SHIFT;
4248 int LUKS2_reencrypt_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr,
4249 uint64_t check_size, uint64_t *dev_size, bool activation, bool dynamic)
4252 uint64_t data_offset, real_size = 0;
4254 if (reencrypt_direction(hdr) == CRYPT_REENCRYPT_BACKWARD &&
4255 (LUKS2_get_segment_by_flag(hdr, "backup-moved-segment") || dynamic))
4256 check_size += reencrypt_data_shift(hdr);
4258 r = device_check_access(cd, crypt_data_device(cd), activation ? DEV_EXCL : DEV_OK);
4262 data_offset = LUKS2_reencrypt_data_offset(hdr, false);
4264 r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
4268 r = device_size(crypt_data_device(cd), &real_size);
4272 log_dbg(cd, "Required minimal device size: %" PRIu64 " (%" PRIu64 " sectors)"
4273 ", real device size: %" PRIu64 " (%" PRIu64 " sectors) "
4274 "calculated device size: %" PRIu64 " (%" PRIu64 " sectors)",
4275 check_size, check_size >> SECTOR_SHIFT, real_size, real_size >> SECTOR_SHIFT,
4276 real_size - data_offset, (real_size - data_offset) >> SECTOR_SHIFT);
4278 if (real_size < data_offset || (check_size && real_size < check_size)) {
4279 log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
4283 *dev_size = real_size - data_offset;
4287 #if USE_LUKS2_REENCRYPTION
4288 /* returns keyslot number on success (>= 0) or negative errnor otherwise */
4289 int LUKS2_reencrypt_locked_recovery_by_passphrase(struct crypt_device *cd,
4292 const char *passphrase,
4293 size_t passphrase_size,
4294 struct volume_key **vks)
4296 uint64_t minimal_size, device_size;
4297 int keyslot, r = -EINVAL;
4298 struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
4299 struct volume_key *vk = NULL, *_vks = NULL;
4301 log_dbg(cd, "Entering reencryption crash recovery.");
4303 if (LUKS2_get_data_size(hdr, &minimal_size, NULL))
4306 r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new,
4307 passphrase, passphrase_size, &_vks);
4312 if (crypt_use_keyring_for_vk(cd))
4316 r = LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, crypt_volume_key_get_id(vk));
4319 vk = crypt_volume_key_next(vk);
4322 if (LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, true, false))
4325 r = reencrypt_recovery(cd, hdr, device_size, _vks);
4328 MOVE_REF(*vks, _vks);
4331 crypt_drop_keyring_key(cd, _vks);
4332 crypt_free_volume_key(_vks);
4334 return r < 0 ? r : keyslot;
4337 crypt_reencrypt_info LUKS2_reencrypt_get_params(struct luks2_hdr *hdr,
4338 struct crypt_params_reencrypt *params)
4340 crypt_reencrypt_info ri;
4345 memset(params, 0, sizeof(*params));
4347 ri = LUKS2_reencrypt_status(hdr);
4348 if (ri == CRYPT_REENCRYPT_NONE || ri == CRYPT_REENCRYPT_INVALID || !params)
4351 digest = LUKS2_digest_by_keyslot(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
4352 if (digest < 0 && digest != -ENOENT)
4353 return CRYPT_REENCRYPT_INVALID;
4356 * In case there's an old "online-reencrypt" requirement or reencryption
4357 * keyslot digest is missing inform caller reencryption metadata requires repair.
4359 if (!LUKS2_config_get_reencrypt_version(hdr, &version) &&
4360 (version < 2 || digest == -ENOENT)) {
4361 params->flags |= CRYPT_REENCRYPT_REPAIR_NEEDED;
4365 params->mode = reencrypt_mode(hdr);
4366 params->direction = reencrypt_direction(hdr);
4367 params->resilience = reencrypt_resilience_type(hdr);
4368 params->hash = reencrypt_resilience_hash(hdr);
4369 params->data_shift = reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
4370 params->max_hotzone_size = 0;
4371 if (LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
4372 params->flags |= CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT;