2 * LUKS - Linux Unified Key Setup v2, reencryption helpers
4 * Copyright (C) 2015-2020, Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2015-2020, Ondrej Kozina
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #include "luks2_internal.h"
23 #include "utils_device_locking.h"
25 static json_object *reencrypt_segment(struct luks2_hdr *hdr, unsigned new)
27 return LUKS2_get_segment_by_flag(hdr, new ? "backup-final" : "backup-previous");
30 static json_object *reencrypt_segment_new(struct luks2_hdr *hdr)
32 return reencrypt_segment(hdr, 1);
35 static json_object *reencrypt_segment_old(struct luks2_hdr *hdr)
37 return reencrypt_segment(hdr, 0);
40 static const char *reencrypt_segment_cipher_new(struct luks2_hdr *hdr)
42 return json_segment_get_cipher(reencrypt_segment(hdr, 1));
45 static const char *reencrypt_segment_cipher_old(struct luks2_hdr *hdr)
47 return json_segment_get_cipher(reencrypt_segment(hdr, 0));
50 static int reencrypt_get_sector_size_new(struct luks2_hdr *hdr)
52 return json_segment_get_sector_size(reencrypt_segment(hdr, 1));
55 static int reencrypt_get_sector_size_old(struct luks2_hdr *hdr)
57 return json_segment_get_sector_size(reencrypt_segment(hdr, 0));
60 static uint64_t reencrypt_data_offset(struct luks2_hdr *hdr, unsigned new)
62 json_object *jobj = reencrypt_segment(hdr, new);
64 return json_segment_get_offset(jobj, 0);
66 return LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
69 static uint64_t LUKS2_reencrypt_get_data_offset_moved(struct luks2_hdr *hdr)
71 json_object *jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-moved-segment");
76 return json_segment_get_offset(jobj_segment, 0);
79 static uint64_t reencrypt_get_data_offset_new(struct luks2_hdr *hdr)
81 return reencrypt_data_offset(hdr, 1);
84 static uint64_t reencrypt_get_data_offset_old(struct luks2_hdr *hdr)
86 return reencrypt_data_offset(hdr, 0);
89 static int reencrypt_digest(struct luks2_hdr *hdr, unsigned new)
91 int segment = LUKS2_get_segment_id_by_flag(hdr, new ? "backup-final" : "backup-previous");
96 return LUKS2_digest_by_segment(hdr, segment);
99 int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr)
101 return reencrypt_digest(hdr, 1);
104 int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr)
106 return reencrypt_digest(hdr, 0);
109 /* none, checksums, journal or shift */
110 static const char *reencrypt_resilience_type(struct luks2_hdr *hdr)
112 json_object *jobj_keyslot, *jobj_area, *jobj_type;
113 int ks = LUKS2_find_keyslot(hdr, "reencrypt");
118 jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
120 json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
121 if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
124 return json_object_get_string(jobj_type);
127 static const char *reencrypt_resilience_hash(struct luks2_hdr *hdr)
129 json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash;
130 int ks = LUKS2_find_keyslot(hdr, "reencrypt");
135 jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
137 json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
138 if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
140 if (strcmp(json_object_get_string(jobj_type), "checksum"))
142 if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
145 return json_object_get_string(jobj_hash);
148 static uint32_t reencrypt_alignment(struct luks2_hdr *hdr)
150 json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash, *jobj_sector_size;
151 int ks = LUKS2_find_keyslot(hdr, "reencrypt");
156 jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
158 json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
159 if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
161 if (strcmp(json_object_get_string(jobj_type), "checksum"))
163 if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
165 if (!json_object_object_get_ex(jobj_area, "sector_size", &jobj_sector_size))
168 return crypt_jobj_get_uint32(jobj_sector_size);
171 static json_object *_enc_create_segments_shift_after(struct crypt_device *cd,
172 struct luks2_hdr *hdr,
173 struct luks2_reenc_context *rh,
174 uint64_t data_offset)
176 int reenc_seg, i = 0;
177 json_object *jobj_copy, *jobj_seg_new = NULL, *jobj_segs_post = json_object_new_object();
180 if (!rh->jobj_segs_hot || !jobj_segs_post)
183 if (json_segments_count(rh->jobj_segs_hot) == 0)
184 return jobj_segs_post;
186 reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
190 while (i < reenc_seg) {
191 jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, i);
194 json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
197 if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1), &jobj_seg_new)) {
198 if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg), &jobj_seg_new))
200 json_segment_remove_flag(jobj_seg_new, "in-reencryption");
203 json_object_object_add(jobj_seg_new, "offset", crypt_jobj_new_uint64(rh->offset + data_offset));
204 json_object_object_add(jobj_seg_new, "iv_tweak", crypt_jobj_new_uint64(rh->offset >> SECTOR_SHIFT));
205 tmp = json_segment_get_size(jobj_seg_new, 0) + rh->length;
208 /* alter size of new segment, reenc_seg == 0 we're finished */
209 json_object_object_add(jobj_seg_new, "size", reenc_seg > 0 ? crypt_jobj_new_uint64(tmp) : json_object_new_string("dynamic"));
210 json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_seg_new);
212 return jobj_segs_post;
214 json_object_put(jobj_segs_post);
218 static json_object *reencrypt_make_hot_segments_encrypt_shift(struct crypt_device *cd,
219 struct luks2_hdr *hdr,
220 struct luks2_reenc_context *rh,
221 uint64_t data_offset)
223 int sg, crypt_seg, i = 0;
224 uint64_t segment_size;
225 json_object *jobj_seg_shrunk, *jobj_seg_new, *jobj_copy, *jobj_enc_seg = NULL,
226 *jobj_segs_hot = json_object_new_object();
231 crypt_seg = LUKS2_segment_by_type(hdr, "crypt");
233 /* FIXME: This is hack. Find proper way to fix it. */
234 sg = LUKS2_last_segment_by_type(hdr, "linear");
235 if (rh->offset && sg < 0)
238 return jobj_segs_hot;
240 jobj_enc_seg = json_segment_create_crypt(data_offset + rh->offset,
241 rh->offset >> SECTOR_SHIFT,
243 reencrypt_segment_cipher_new(hdr),
244 reencrypt_get_sector_size_new(hdr),
248 jobj_copy = LUKS2_get_segment_jobj(hdr, i);
251 json_object_object_add_by_uint(jobj_segs_hot, i++, json_object_get(jobj_copy));
254 segment_size = LUKS2_segment_size(hdr, sg, 0);
255 if (segment_size > rh->length) {
256 jobj_seg_shrunk = NULL;
257 if (json_object_copy(LUKS2_get_segment_jobj(hdr, sg), &jobj_seg_shrunk))
259 json_object_object_add(jobj_seg_shrunk, "size", crypt_jobj_new_uint64(segment_size - rh->length));
260 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_seg_shrunk);
263 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_enc_seg);
264 jobj_enc_seg = NULL; /* see err: label */
266 /* first crypt segment after encryption ? */
267 if (crypt_seg >= 0) {
268 jobj_seg_new = LUKS2_get_segment_jobj(hdr, crypt_seg);
271 json_object_object_add_by_uint(jobj_segs_hot, sg, json_object_get(jobj_seg_new));
274 return jobj_segs_hot;
276 json_object_put(jobj_enc_seg);
277 json_object_put(jobj_segs_hot);
282 static json_object *reencrypt_make_segment_new(struct crypt_device *cd,
283 struct luks2_hdr *hdr,
284 const struct luks2_reenc_context *rh,
285 uint64_t data_offset,
286 uint64_t segment_offset,
288 const uint64_t *segment_length)
291 case CRYPT_REENCRYPT_REENCRYPT:
292 case CRYPT_REENCRYPT_ENCRYPT:
293 return json_segment_create_crypt(data_offset + segment_offset,
294 crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
296 reencrypt_segment_cipher_new(hdr),
297 reencrypt_get_sector_size_new(hdr), 0);
298 case CRYPT_REENCRYPT_DECRYPT:
299 return json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
305 static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd,
306 struct luks2_hdr *hdr,
307 struct luks2_reenc_context *rh,
308 uint64_t data_offset)
311 json_object *jobj_new_seg_after, *jobj_old_seg, *jobj_old_seg_copy = NULL,
312 *jobj_segs_post = json_object_new_object();
313 uint64_t fixed_length = rh->offset + rh->length;
315 if (!rh->jobj_segs_hot || !jobj_segs_post)
318 reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
322 jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
325 * if there's no old segment after reencryption, we're done.
326 * Set size to 'dynamic' again.
328 jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &fixed_length : NULL);
329 if (!jobj_new_seg_after)
331 json_object_object_add_by_uint(jobj_segs_post, 0, jobj_new_seg_after);
334 if (rh->fixed_length) {
335 if (json_object_copy(jobj_old_seg, &jobj_old_seg_copy))
337 jobj_old_seg = jobj_old_seg_copy;
338 fixed_length = rh->device_size - fixed_length;
339 json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(fixed_length));
341 json_object_get(jobj_old_seg);
342 json_object_object_add_by_uint(jobj_segs_post, 1, jobj_old_seg);
345 return jobj_segs_post;
347 json_object_put(jobj_segs_post);
351 static json_object *reencrypt_make_post_segments_backward(struct crypt_device *cd,
352 struct luks2_hdr *hdr,
353 struct luks2_reenc_context *rh,
354 uint64_t data_offset)
357 uint64_t fixed_length;
359 json_object *jobj_new_seg_after, *jobj_old_seg,
360 *jobj_segs_post = json_object_new_object();
362 if (!rh->jobj_segs_hot || !jobj_segs_post)
365 reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
369 jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg - 1);
371 json_object_object_add_by_uint(jobj_segs_post, reenc_seg - 1, json_object_get(jobj_old_seg));
372 if (rh->fixed_length && rh->offset) {
373 fixed_length = rh->device_size - rh->offset;
374 jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, &fixed_length);
376 jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL);
377 if (!jobj_new_seg_after)
379 json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_new_seg_after);
381 return jobj_segs_post;
383 json_object_put(jobj_segs_post);
387 static json_object *reencrypt_make_segment_reencrypt(struct crypt_device *cd,
388 struct luks2_hdr *hdr,
389 const struct luks2_reenc_context *rh,
390 uint64_t data_offset,
391 uint64_t segment_offset,
393 const uint64_t *segment_length)
396 case CRYPT_REENCRYPT_REENCRYPT:
397 case CRYPT_REENCRYPT_ENCRYPT:
398 return json_segment_create_crypt(data_offset + segment_offset,
399 crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
401 reencrypt_segment_cipher_new(hdr),
402 reencrypt_get_sector_size_new(hdr), 1);
403 case CRYPT_REENCRYPT_DECRYPT:
404 return json_segment_create_linear(data_offset + segment_offset, segment_length, 1);
410 static json_object *reencrypt_make_segment_old(struct crypt_device *cd,
411 struct luks2_hdr *hdr,
412 const struct luks2_reenc_context *rh,
413 uint64_t data_offset,
414 uint64_t segment_offset,
415 const uint64_t *segment_length)
417 json_object *jobj_old_seg = NULL;
420 case CRYPT_REENCRYPT_REENCRYPT:
421 case CRYPT_REENCRYPT_DECRYPT:
422 jobj_old_seg = json_segment_create_crypt(data_offset + segment_offset,
423 crypt_get_iv_offset(cd) + (segment_offset >> SECTOR_SHIFT),
425 reencrypt_segment_cipher_old(hdr),
426 reencrypt_get_sector_size_old(hdr),
429 case CRYPT_REENCRYPT_ENCRYPT:
430 jobj_old_seg = json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
436 static json_object *reencrypt_make_hot_segments_forward(struct crypt_device *cd,
437 struct luks2_hdr *hdr,
438 struct luks2_reenc_context *rh,
439 uint64_t device_size,
440 uint64_t data_offset)
442 json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
443 uint64_t fixed_length, tmp = rh->offset + rh->length;
446 jobj_segs_hot = json_object_new_object();
451 jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, &rh->offset);
454 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
457 jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
461 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
463 if (tmp < device_size) {
464 fixed_length = device_size - tmp;
465 jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh, data_offset + rh->data_shift, rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
468 json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
471 return jobj_segs_hot;
473 json_object_put(jobj_segs_hot);
477 static json_object *reencrypt_make_hot_segments_backward(struct crypt_device *cd,
478 struct luks2_hdr *hdr,
479 struct luks2_reenc_context *rh,
480 uint64_t device_size,
481 uint64_t data_offset)
483 json_object *jobj_reenc_seg, *jobj_new_seg, *jobj_old_seg = NULL,
484 *jobj_segs_hot = json_object_new_object();
486 uint64_t fixed_length, tmp = rh->offset + rh->length;
492 if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_old_seg))
494 json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(rh->offset));
496 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_old_seg);
499 jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
503 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
505 if (tmp < device_size) {
506 fixed_length = device_size - tmp;
507 jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length, rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
510 json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_new_seg);
513 return jobj_segs_hot;
515 json_object_put(jobj_segs_hot);
519 static int reencrypt_make_hot_segments(struct crypt_device *cd,
520 struct luks2_hdr *hdr,
521 struct luks2_reenc_context *rh,
522 uint64_t device_size,
523 uint64_t data_offset)
525 rh->jobj_segs_hot = NULL;
527 if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
528 rh->data_shift && rh->jobj_segment_moved) {
529 log_dbg(cd, "Calculating hot segments for encryption with data move.");
530 rh->jobj_segs_hot = reencrypt_make_hot_segments_encrypt_shift(cd, hdr, rh, data_offset);
531 } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
532 log_dbg(cd, "Calculating hot segments (forward direction).");
533 rh->jobj_segs_hot = reencrypt_make_hot_segments_forward(cd, hdr, rh, device_size, data_offset);
534 } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
535 log_dbg(cd, "Calculating hot segments (backward direction).");
536 rh->jobj_segs_hot = reencrypt_make_hot_segments_backward(cd, hdr, rh, device_size, data_offset);
539 return rh->jobj_segs_hot ? 0 : -EINVAL;
542 static int reencrypt_make_post_segments(struct crypt_device *cd,
543 struct luks2_hdr *hdr,
544 struct luks2_reenc_context *rh,
545 uint64_t data_offset)
547 rh->jobj_segs_post = NULL;
549 if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
550 rh->data_shift && rh->jobj_segment_moved) {
551 log_dbg(cd, "Calculating post segments for encryption with data move.");
552 rh->jobj_segs_post = _enc_create_segments_shift_after(cd, hdr, rh, data_offset);
553 } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
554 log_dbg(cd, "Calculating post segments (forward direction).");
555 rh->jobj_segs_post = reencrypt_make_post_segments_forward(cd, hdr, rh, data_offset);
556 } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
557 log_dbg(cd, "Calculating segments (backward direction).");
558 rh->jobj_segs_post = reencrypt_make_post_segments_backward(cd, hdr, rh, data_offset);
561 return rh->jobj_segs_post ? 0 : -EINVAL;
564 static uint64_t reencrypt_data_shift(struct luks2_hdr *hdr)
566 json_object *jobj_keyslot, *jobj_area, *jobj_data_shift;
567 int ks = LUKS2_find_keyslot(hdr, "reencrypt");
572 jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
574 json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
575 if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj_data_shift))
578 return crypt_jobj_get_uint64(jobj_data_shift);
581 static crypt_reencrypt_mode_info reencrypt_mode(struct luks2_hdr *hdr)
584 crypt_reencrypt_mode_info mi = CRYPT_REENCRYPT_REENCRYPT;
585 json_object *jobj_keyslot, *jobj_mode;
587 jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
591 json_object_object_get_ex(jobj_keyslot, "mode", &jobj_mode);
592 mode = json_object_get_string(jobj_mode);
594 /* validation enforces allowed values */
595 if (!strcmp(mode, "encrypt"))
596 mi = CRYPT_REENCRYPT_ENCRYPT;
597 else if (!strcmp(mode, "decrypt"))
598 mi = CRYPT_REENCRYPT_DECRYPT;
603 static crypt_reencrypt_direction_info reencrypt_direction(struct luks2_hdr *hdr)
606 json_object *jobj_keyslot, *jobj_mode;
607 crypt_reencrypt_direction_info di = CRYPT_REENCRYPT_FORWARD;
609 jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
613 json_object_object_get_ex(jobj_keyslot, "direction", &jobj_mode);
614 value = json_object_get_string(jobj_mode);
616 /* validation enforces allowed values */
617 if (strcmp(value, "forward"))
618 di = CRYPT_REENCRYPT_BACKWARD;
623 typedef enum { REENC_OK = 0, REENC_ERR, REENC_ROLLBACK, REENC_FATAL } reenc_status_t;
625 void LUKS2_reenc_context_free(struct crypt_device *cd, struct luks2_reenc_context *rh)
630 if (rh->rp.type == REENC_PROTECTION_CHECKSUM) {
631 if (rh->rp.p.csum.ch) {
632 crypt_hash_destroy(rh->rp.p.csum.ch);
633 rh->rp.p.csum.ch = NULL;
635 if (rh->rp.p.csum.checksums) {
636 memset(rh->rp.p.csum.checksums, 0, rh->rp.p.csum.checksums_len);
637 free(rh->rp.p.csum.checksums);
638 rh->rp.p.csum.checksums = NULL;
642 json_object_put(rh->jobj_segs_hot);
643 rh->jobj_segs_hot = NULL;
644 json_object_put(rh->jobj_segs_post);
645 rh->jobj_segs_post = NULL;
646 json_object_put(rh->jobj_segment_old);
647 rh->jobj_segment_old = NULL;
648 json_object_put(rh->jobj_segment_new);
649 rh->jobj_segment_new = NULL;
650 json_object_put(rh->jobj_segment_moved);
651 rh->jobj_segment_moved = NULL;
653 free(rh->reenc_buffer);
654 rh->reenc_buffer = NULL;
655 crypt_storage_wrapper_destroy(rh->cw1);
657 crypt_storage_wrapper_destroy(rh->cw2);
660 free(rh->device_name);
661 free(rh->overlay_name);
662 free(rh->hotzone_name);
663 crypt_drop_keyring_key(cd, rh->vks);
664 crypt_free_volume_key(rh->vks);
665 device_release_excl(cd, crypt_data_device(cd));
666 crypt_unlock_internal(cd, rh->reenc_lock);
670 static size_t reencrypt_get_alignment(struct crypt_device *cd,
671 struct luks2_hdr *hdr)
674 size_t alignment = device_block_size(cd, crypt_data_device(cd));
676 ss = reencrypt_get_sector_size_old(hdr);
677 if (ss > 0 && (size_t)ss > alignment)
679 ss = reencrypt_get_sector_size_new(hdr);
680 if (ss > 0 && (size_t)ss > alignment)
681 alignment = (size_t)ss;
686 /* returns void because it must not fail on valid LUKS2 header */
687 static void _load_backup_segments(struct luks2_hdr *hdr,
688 struct luks2_reenc_context *rh)
690 int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
693 rh->jobj_segment_new = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
694 rh->digest_new = LUKS2_digest_by_segment(hdr, segment);
696 rh->jobj_segment_new = NULL;
697 rh->digest_new = -ENOENT;
700 segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
702 rh->jobj_segment_old = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
703 rh->digest_old = LUKS2_digest_by_segment(hdr, segment);
705 rh->jobj_segment_old = NULL;
706 rh->digest_old = -ENOENT;
709 segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
711 rh->jobj_segment_moved = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
713 rh->jobj_segment_moved = NULL;
716 static int reencrypt_offset_backward_moved(struct luks2_hdr *hdr, json_object *jobj_segments, uint64_t *reencrypt_length, uint64_t data_shift, uint64_t *offset)
718 uint64_t tmp, linear_length = 0;
719 int sg, segs = json_segments_count(jobj_segments);
721 /* find reencrypt offset with data shift */
722 for (sg = 0; sg < segs; sg++)
723 if (LUKS2_segment_is_type(hdr, sg, "linear"))
724 linear_length += LUKS2_segment_size(hdr, sg, 0);
726 /* all active linear segments length */
728 if (linear_length < data_shift)
730 tmp = linear_length - data_shift;
731 if (tmp && tmp < data_shift) {
732 *offset = data_shift;
733 *reencrypt_length = tmp;
744 /* should be unreachable */
749 static int _offset_forward(struct luks2_hdr *hdr, json_object *jobj_segments, uint64_t *offset)
751 int segs = json_segments_count(jobj_segments);
755 else if (segs == 2) {
756 *offset = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
765 static int _offset_backward(struct luks2_hdr *hdr, json_object *jobj_segments, uint64_t device_size, uint64_t *length, uint64_t *offset)
767 int segs = json_segments_count(jobj_segments);
771 if (device_size < *length)
772 *length = device_size;
773 *offset = device_size - *length;
774 } else if (segs == 2) {
775 tmp = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
778 *offset = tmp - *length;
785 /* must be always relative to data offset */
786 /* the LUKS2 header MUST be valid */
787 static int reencrypt_offset(struct luks2_hdr *hdr,
788 crypt_reencrypt_direction_info di,
789 uint64_t device_size,
790 uint64_t *reencrypt_length,
794 json_object *jobj_segments;
795 uint64_t data_shift = reencrypt_data_shift(hdr);
800 /* if there's segment in reencryption return directly offset of it */
801 json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments);
802 sg = json_segments_segment_in_reencrypt(jobj_segments);
804 *offset = LUKS2_segment_offset(hdr, sg, 0) - (reencrypt_get_data_offset_new(hdr));
808 if (di == CRYPT_REENCRYPT_FORWARD)
809 return _offset_forward(hdr, jobj_segments, offset);
810 else if (di == CRYPT_REENCRYPT_BACKWARD) {
811 if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_ENCRYPT &&
812 LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
813 return reencrypt_offset_backward_moved(hdr, jobj_segments, reencrypt_length, data_shift, offset);
814 return _offset_backward(hdr, jobj_segments, device_size, reencrypt_length, offset);
820 static uint64_t reencrypt_length(struct crypt_device *cd,
821 struct luks2_hdr *hdr,
822 struct luks2_reenc_context *rh,
823 uint64_t keyslot_area_length,
826 unsigned long dummy, optimal_alignment;
827 uint64_t length, soft_mem_limit;
829 if (rh->rp.type == REENC_PROTECTION_NONE)
830 length = length_max ?: LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
831 else if (rh->rp.type == REENC_PROTECTION_CHECKSUM)
832 length = (keyslot_area_length / rh->rp.p.csum.hash_size) * rh->alignment;
833 else if (rh->rp.type == REENC_PROTECTION_DATASHIFT)
834 return reencrypt_data_shift(hdr);
836 length = keyslot_area_length;
839 if (length > LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH)
840 length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
842 /* soft limit is 1/4 of system memory */
843 soft_mem_limit = crypt_getphysmemory_kb() << 8; /* multiply by (1024/4) */
845 if (soft_mem_limit && length > soft_mem_limit)
846 length = soft_mem_limit;
848 if (length_max && length > length_max)
851 length -= (length % rh->alignment);
853 /* Emits error later */
857 device_topology_alignment(cd, crypt_data_device(cd), &optimal_alignment, &dummy, length);
859 /* we have to stick with encryption sector size alignment */
860 if (optimal_alignment % rh->alignment)
863 /* align to opt-io size only if remaining size allows it */
864 if (length > optimal_alignment)
865 length -= (length % optimal_alignment);
870 static int reencrypt_context_init(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reenc_context *rh, uint64_t device_size, const struct crypt_params_reencrypt *params)
873 uint64_t dummy, area_length;
875 rh->reenc_keyslot = LUKS2_find_keyslot(hdr, "reencrypt");
876 if (rh->reenc_keyslot < 0)
878 if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &dummy, &area_length) < 0)
881 rh->mode = reencrypt_mode(hdr);
883 rh->alignment = reencrypt_get_alignment(cd, hdr);
887 log_dbg(cd, "Hotzone size: %" PRIu64 ", device size: %" PRIu64 ", alignment: %zu.",
888 params->max_hotzone_size << SECTOR_SHIFT,
889 params->device_size << SECTOR_SHIFT, rh->alignment);
891 if ((params->max_hotzone_size << SECTOR_SHIFT) % rh->alignment) {
892 log_err(cd, _("Hotzone size must be multiple of calculated zone alignment (%zu bytes)."), rh->alignment);
896 if ((params->device_size << SECTOR_SHIFT) % rh->alignment) {
897 log_err(cd, _("Device size must be multiple of calculated zone alignment (%zu bytes)."), rh->alignment);
901 rh->direction = reencrypt_direction(hdr);
903 if (!strcmp(params->resilience, "datashift")) {
904 log_dbg(cd, "Initializing reencryption context with data_shift resilience.");
905 rh->rp.type = REENC_PROTECTION_DATASHIFT;
906 rh->data_shift = reencrypt_data_shift(hdr);
907 } else if (!strcmp(params->resilience, "journal")) {
908 log_dbg(cd, "Initializing reencryption context with journal resilience.");
909 rh->rp.type = REENC_PROTECTION_JOURNAL;
910 } else if (!strcmp(params->resilience, "checksum")) {
911 log_dbg(cd, "Initializing reencryption context with checksum resilience.");
912 rh->rp.type = REENC_PROTECTION_CHECKSUM;
914 r = snprintf(rh->rp.p.csum.hash,
915 sizeof(rh->rp.p.csum.hash), "%s", params->hash);
916 if (r < 0 || (size_t)r >= sizeof(rh->rp.p.csum.hash)) {
917 log_dbg(cd, "Invalid hash parameter");
921 if (crypt_hash_init(&rh->rp.p.csum.ch, params->hash)) {
922 log_dbg(cd, "Failed to initialize checksum resilience hash %s", params->hash);
926 r = crypt_hash_size(params->hash);
928 log_dbg(cd, "Invalid hash size");
931 rh->rp.p.csum.hash_size = r;
933 rh->rp.p.csum.checksums_len = area_length;
934 if (posix_memalign(&rh->rp.p.csum.checksums, device_alignment(crypt_metadata_device(cd)),
935 rh->rp.p.csum.checksums_len))
937 } else if (!strcmp(params->resilience, "none")) {
938 log_dbg(cd, "Initializing reencryption context with none resilience.");
939 rh->rp.type = REENC_PROTECTION_NONE;
941 log_err(cd, _("Unsupported resilience mode %s"), params->resilience);
945 if (params->device_size) {
946 log_dbg(cd, "Switching reencryption to fixed size mode.");
947 device_size = params->device_size << SECTOR_SHIFT;
948 rh->fixed_length = true;
950 rh->fixed_length = false;
952 rh->length = reencrypt_length(cd, hdr, rh, area_length, params->max_hotzone_size << SECTOR_SHIFT);
954 log_dbg(cd, "Invalid reencryption length.");
958 if (reencrypt_offset(hdr, rh->direction, device_size, &rh->length, &rh->offset)) {
959 log_dbg(cd, "Failed to get reencryption offset.");
963 if (rh->offset > device_size)
965 if (rh->length > device_size - rh->offset)
966 rh->length = device_size - rh->offset;
968 log_dbg(cd, "reencrypt-direction: %s", rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward");
970 _load_backup_segments(hdr, rh);
972 if (rh->direction == CRYPT_REENCRYPT_BACKWARD)
973 rh->progress = device_size - rh->offset - rh->length;
975 rh->progress = rh->offset;
977 log_dbg(cd, "backup-previous digest id: %d", rh->digest_old);
978 log_dbg(cd, "backup-final digest id: %d", rh->digest_new);
979 log_dbg(cd, "reencrypt length: %" PRIu64, rh->length);
980 log_dbg(cd, "reencrypt offset: %" PRIu64, rh->offset);
981 log_dbg(cd, "reencrypt shift: %s%" PRIu64, (rh->data_shift && rh->direction == CRYPT_REENCRYPT_BACKWARD ? "-" : ""), rh->data_shift);
982 log_dbg(cd, "reencrypt alignment: %zu", rh->alignment);
983 log_dbg(cd, "reencrypt progress: %" PRIu64, rh->progress);
985 rh->device_size = device_size;
987 return rh->length < 512 ? -EINVAL : 0;
990 static size_t reencrypt_buffer_length(struct luks2_reenc_context *rh)
993 return rh->data_shift;
997 static int reencrypt_load_clean(struct crypt_device *cd,
998 struct luks2_hdr *hdr,
999 uint64_t device_size,
1000 struct luks2_reenc_context **rh,
1001 const struct crypt_params_reencrypt *params)
1004 const struct crypt_params_reencrypt hdr_reenc_params = {
1005 .resilience = reencrypt_resilience_type(hdr),
1006 .hash = reencrypt_resilience_hash(hdr),
1007 .device_size = params ? params->device_size : 0
1009 struct luks2_reenc_context *tmp = crypt_zalloc(sizeof (*tmp));
1015 if (!hdr_reenc_params.resilience)
1018 /* skip context update if data shift is detected in header */
1019 if (!strcmp(hdr_reenc_params.resilience, "datashift"))
1022 log_dbg(cd, "Initializing reencryption context (%s).", params ? "update" : "load");
1024 if (!params || !params->resilience)
1025 params = &hdr_reenc_params;
1027 r = reencrypt_context_init(cd, hdr, tmp, device_size, params);
1031 if (posix_memalign(&tmp->reenc_buffer, device_alignment(crypt_data_device(cd)),
1032 reencrypt_buffer_length(tmp))) {
1041 LUKS2_reenc_context_free(cd, tmp);
1046 static int reencrypt_make_segments(struct crypt_device *cd,
1047 struct luks2_hdr *hdr,
1048 struct luks2_reenc_context *rh,
1049 uint64_t device_size)
1052 uint64_t data_offset = reencrypt_get_data_offset_new(hdr);
1054 log_dbg(cd, "Calculating segments.");
1056 r = reencrypt_make_hot_segments(cd, hdr, rh, device_size, data_offset);
1058 r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
1060 json_object_put(rh->jobj_segs_hot);
1064 log_dbg(cd, "Failed to make reencryption segments.");
1069 static int reencrypt_make_segments_crashed(struct crypt_device *cd,
1070 struct luks2_hdr *hdr,
1071 struct luks2_reenc_context *rh)
1074 uint64_t data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
1079 rh->jobj_segs_hot = json_object_new_object();
1080 if (!rh->jobj_segs_hot)
1083 json_object_object_foreach(LUKS2_get_segments_jobj(hdr), key, val) {
1084 if (json_segment_is_backup(val))
1086 json_object_object_add(rh->jobj_segs_hot, key, json_object_get(val));
1089 r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
1091 json_object_put(rh->jobj_segs_hot);
1092 rh->jobj_segs_hot = NULL;
1098 static int reencrypt_load_crashed(struct crypt_device *cd,
1099 struct luks2_hdr *hdr, uint64_t device_size, struct luks2_reenc_context **rh)
1102 uint64_t minimal_size;
1104 struct crypt_params_reencrypt params = {};
1106 if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic))
1110 params.device_size = minimal_size >> SECTOR_SHIFT;
1112 r = reencrypt_load_clean(cd, hdr, device_size, rh, ¶ms);
1115 reenc_seg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
1119 (*rh)->length = LUKS2_segment_size(hdr, reenc_seg, 0);
1122 if (!r && ((*rh)->rp.type == REENC_PROTECTION_CHECKSUM)) {
1123 /* we have to override calculated alignment with value stored in mda */
1124 (*rh)->alignment = reencrypt_alignment(hdr);
1125 if (!(*rh)->alignment) {
1126 log_dbg(cd, "Failed to get read resilience sector_size from metadata.");
1132 r = reencrypt_make_segments_crashed(cd, hdr, *rh);
1135 LUKS2_reenc_context_free(cd, *rh);
1141 static int reencrypt_init_storage_wrappers(struct crypt_device *cd,
1142 struct luks2_hdr *hdr,
1143 struct luks2_reenc_context *rh,
1144 struct volume_key *vks)
1147 struct volume_key *vk;
1148 uint32_t wrapper_flags = (getuid() || geteuid()) ? 0 : DISABLE_KCAPI;
1150 vk = crypt_volume_key_by_id(vks, rh->digest_old);
1151 r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
1152 reencrypt_get_data_offset_old(hdr),
1153 crypt_get_iv_offset(cd),
1154 reencrypt_get_sector_size_old(hdr),
1155 reencrypt_segment_cipher_old(hdr),
1156 vk, wrapper_flags | OPEN_READONLY);
1158 log_err(cd, _("Failed to initialize old segment storage wrapper."));
1161 rh->wflags1 = wrapper_flags | OPEN_READONLY;
1162 log_dbg(cd, "Old cipher storage wrapper type: %d.", crypt_storage_wrapper_get_type(rh->cw1));
1164 vk = crypt_volume_key_by_id(vks, rh->digest_new);
1165 r = crypt_storage_wrapper_init(cd, &rh->cw2, crypt_data_device(cd),
1166 reencrypt_get_data_offset_new(hdr),
1167 crypt_get_iv_offset(cd),
1168 reencrypt_get_sector_size_new(hdr),
1169 reencrypt_segment_cipher_new(hdr),
1172 log_err(cd, _("Failed to initialize new segment storage wrapper."));
1175 rh->wflags2 = wrapper_flags;
1176 log_dbg(cd, "New cipher storage wrapper type: %d", crypt_storage_wrapper_get_type(rh->cw2));
1181 static int reencrypt_context_set_names(struct luks2_reenc_context *rh, const char *name)
1187 if (!(rh->device_name = dm_device_name(name)))
1189 } else if (!(rh->device_name = strdup(name)))
1192 if (asprintf(&rh->hotzone_name, "%s-hotzone-%s", rh->device_name,
1193 rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward") < 0) {
1194 rh->hotzone_name = NULL;
1197 if (asprintf(&rh->overlay_name, "%s-overlay", rh->device_name) < 0) {
1198 rh->overlay_name = NULL;
1206 static int modify_offset(uint64_t *offset, uint64_t data_shift, crypt_reencrypt_direction_info di)
1213 if (di == CRYPT_REENCRYPT_FORWARD) {
1214 if (*offset >= data_shift) {
1215 *offset -= data_shift;
1218 } else if (di == CRYPT_REENCRYPT_BACKWARD) {
1219 *offset += data_shift;
1226 static int reencrypt_update_flag(struct crypt_device *cd, int enable, bool commit)
1229 struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
1231 if (LUKS2_config_get_requirements(cd, hdr, &reqs))
1235 if (enable && (reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
1239 if (!enable && !(reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
1243 reqs |= CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
1245 reqs &= ~CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
1247 log_dbg(cd, "Going to %s reencryption requirement flag.", enable ? "store" : "wipe");
1249 return LUKS2_config_set_requirements(cd, hdr, reqs, commit);
1252 static int reencrypt_recover_segment(struct crypt_device *cd,
1253 struct luks2_hdr *hdr,
1254 struct luks2_reenc_context *rh,
1255 struct volume_key *vks)
1257 struct volume_key *vk_old, *vk_new;
1260 unsigned resilience;
1261 uint64_t area_offset, area_length, area_length_read, crash_iv_offset,
1262 data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
1263 int devfd, r, new_sector_size, old_sector_size, rseg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
1264 char *checksum_tmp = NULL, *data_buffer = NULL;
1265 struct crypt_storage_wrapper *cw1 = NULL, *cw2 = NULL;
1267 resilience = rh->rp.type;
1269 if (rseg < 0 || rh->length < 512)
1272 vk_new = crypt_volume_key_by_id(vks, rh->digest_new);
1273 if (!vk_new && rh->mode != CRYPT_REENCRYPT_DECRYPT)
1275 vk_old = crypt_volume_key_by_id(vks, rh->digest_old);
1276 if (!vk_old && rh->mode != CRYPT_REENCRYPT_ENCRYPT)
1278 old_sector_size = json_segment_get_sector_size(reencrypt_segment_old(hdr));
1279 new_sector_size = json_segment_get_sector_size(reencrypt_segment_new(hdr));
1280 if (rh->mode == CRYPT_REENCRYPT_DECRYPT)
1281 crash_iv_offset = rh->offset >> SECTOR_SHIFT; /* TODO: + old iv_tweak */
1283 crash_iv_offset = json_segment_get_iv_offset(json_segments_get_segment(rh->jobj_segs_hot, rseg));
1285 log_dbg(cd, "crash_offset: %" PRIu64 ", crash_length: %" PRIu64 ", crash_iv_offset: %" PRIu64, data_offset + rh->offset, rh->length, crash_iv_offset);
1287 r = crypt_storage_wrapper_init(cd, &cw2, crypt_data_device(cd),
1288 data_offset + rh->offset, crash_iv_offset, new_sector_size,
1289 reencrypt_segment_cipher_new(hdr), vk_new, 0);
1291 log_err(cd, _("Failed to initialize new segment storage wrapper."));
1295 if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &area_offset, &area_length)) {
1300 if (posix_memalign((void**)&data_buffer, device_alignment(crypt_data_device(cd)), rh->length)) {
1305 switch (resilience) {
1306 case REENC_PROTECTION_CHECKSUM:
1307 log_dbg(cd, "Checksums based recovery.");
1309 r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1310 data_offset + rh->offset, crash_iv_offset, old_sector_size,
1311 reencrypt_segment_cipher_old(hdr), vk_old, 0);
1313 log_err(cd, _("Failed to initialize old segment storage wrapper."));
1317 count = rh->length / rh->alignment;
1318 area_length_read = count * rh->rp.p.csum.hash_size;
1319 if (area_length_read > area_length) {
1320 log_dbg(cd, "Internal error in calculated area_length.");
1325 checksum_tmp = malloc(rh->rp.p.csum.hash_size);
1326 if (!checksum_tmp) {
1331 /* TODO: lock for read */
1332 devfd = device_open(cd, crypt_metadata_device(cd), O_RDONLY);
1336 /* read old data checksums */
1337 read = read_lseek_blockwise(devfd, device_block_size(cd, crypt_metadata_device(cd)),
1338 device_alignment(crypt_metadata_device(cd)), rh->rp.p.csum.checksums, area_length_read, area_offset);
1339 if (read < 0 || (size_t)read != area_length_read) {
1340 log_err(cd, _("Failed to read checksums for current hotzone."));
1345 read = crypt_storage_wrapper_read(cw2, 0, data_buffer, rh->length);
1346 if (read < 0 || (size_t)read != rh->length) {
1347 log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset + data_offset);
1352 for (s = 0; s < count; s++) {
1353 if (crypt_hash_write(rh->rp.p.csum.ch, data_buffer + (s * rh->alignment), rh->alignment)) {
1354 log_dbg(cd, "Failed to write hash.");
1358 if (crypt_hash_final(rh->rp.p.csum.ch, checksum_tmp, rh->rp.p.csum.hash_size)) {
1359 log_dbg(cd, "Failed to finalize hash.");
1363 if (!memcmp(checksum_tmp, (char *)rh->rp.p.csum.checksums + (s * rh->rp.p.csum.hash_size), rh->rp.p.csum.hash_size)) {
1364 log_dbg(cd, "Sector %zu (size %zu, offset %zu) needs recovery", s, rh->alignment, s * rh->alignment);
1365 if (crypt_storage_wrapper_decrypt(cw1, s * rh->alignment, data_buffer + (s * rh->alignment), rh->alignment)) {
1366 log_err(cd, _("Failed to decrypt sector %zu."), s);
1370 w = crypt_storage_wrapper_encrypt_write(cw2, s * rh->alignment, data_buffer + (s * rh->alignment), rh->alignment);
1371 if (w < 0 || (size_t)w != rh->alignment) {
1372 log_err(cd, _("Failed to recover sector %zu."), s);
1381 case REENC_PROTECTION_JOURNAL:
1382 log_dbg(cd, "Journal based recovery.");
1384 /* FIXME: validation candidate */
1385 if (rh->length > area_length) {
1387 log_dbg(cd, "Invalid journal size.");
1392 r = crypt_storage_wrapper_init(cd, &cw1, crypt_metadata_device(cd),
1393 area_offset, crash_iv_offset, old_sector_size,
1394 reencrypt_segment_cipher_old(hdr), vk_old, 0);
1396 log_err(cd, _("Failed to initialize old segment storage wrapper."));
1399 read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
1400 if (read < 0 || (size_t)read != rh->length) {
1401 log_dbg(cd, "Failed to read journaled data.");
1403 /* may content plaintext */
1404 crypt_safe_memzero(data_buffer, rh->length);
1407 read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
1408 /* may content plaintext */
1409 crypt_safe_memzero(data_buffer, rh->length);
1410 if (read < 0 || (size_t)read != rh->length) {
1411 log_dbg(cd, "recovery write failed.");
1418 case REENC_PROTECTION_DATASHIFT:
1419 log_dbg(cd, "Data shift based recovery.");
1422 r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1423 json_segment_get_offset(rh->jobj_segment_moved, 0), 0, 0,
1424 reencrypt_segment_cipher_old(hdr), NULL, 0);
1426 r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1427 data_offset + rh->offset - rh->data_shift, 0, 0,
1428 reencrypt_segment_cipher_old(hdr), NULL, 0);
1430 log_err(cd, _("Failed to initialize old segment storage wrapper."));
1434 read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
1435 if (read < 0 || (size_t)read != rh->length) {
1436 log_dbg(cd, "Failed to read data.");
1438 /* may content plaintext */
1439 crypt_safe_memzero(data_buffer, rh->length);
1443 read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
1444 /* may content plaintext */
1445 crypt_safe_memzero(data_buffer, rh->length);
1446 if (read < 0 || (size_t)read != rh->length) {
1447 log_dbg(cd, "recovery write failed.");
1458 rh->read = rh->length;
1462 crypt_storage_wrapper_destroy(cw1);
1463 crypt_storage_wrapper_destroy(cw2);
1468 static int reencrypt_add_moved_segment(struct crypt_device *cd,
1469 struct luks2_hdr *hdr,
1470 struct luks2_reenc_context *rh)
1472 int s = LUKS2_segment_first_unused_id(hdr);
1474 if (!rh->jobj_segment_moved)
1480 if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(rh->jobj_segment_moved))) {
1481 json_object_put(rh->jobj_segment_moved);
1488 static int reencrypt_add_backup_segment(struct crypt_device *cd,
1489 struct luks2_hdr *hdr,
1490 struct luks2_reenc_context *rh,
1493 int digest, s = LUKS2_segment_first_unused_id(hdr);
1499 digest = final ? rh->digest_new : rh->digest_old;
1500 jobj = final ? rh->jobj_segment_new : rh->jobj_segment_old;
1502 if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(jobj))) {
1503 json_object_put(jobj);
1507 if (strcmp(json_segment_type(jobj), "crypt"))
1510 return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
1513 static int reencrypt_assign_segments_simple(struct crypt_device *cd,
1514 struct luks2_hdr *hdr,
1515 struct luks2_reenc_context *rh,
1521 if (hot && json_segments_count(rh->jobj_segs_hot) > 0) {
1522 log_dbg(cd, "Setting 'hot' segments.");
1524 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
1526 rh->jobj_segs_hot = NULL;
1527 } else if (!hot && json_segments_count(rh->jobj_segs_post) > 0) {
1528 log_dbg(cd, "Setting 'post' segments.");
1529 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
1531 rh->jobj_segs_post = NULL;
1533 log_dbg(cd, "No segments to set.");
1538 log_dbg(cd, "Failed to assign new enc segments.");
1542 r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
1544 log_dbg(cd, "Failed to assign reencryption previous backup segment.");
1548 r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
1550 log_dbg(cd, "Failed to assign reencryption final backup segment.");
1554 r = reencrypt_add_moved_segment(cd, hdr, rh);
1556 log_dbg(cd, "Failed to assign reencryption moved backup segment.");
1560 for (sg = 0; sg < LUKS2_segments_count(hdr); sg++) {
1561 if (LUKS2_segment_is_type(hdr, sg, "crypt") &&
1562 LUKS2_digest_segment_assign(cd, hdr, sg, rh->mode == CRYPT_REENCRYPT_ENCRYPT ? rh->digest_new : rh->digest_old, 1, 0)) {
1563 log_dbg(cd, "Failed to assign digest %u to segment %u.", rh->digest_new, sg);
1568 return commit ? LUKS2_hdr_write(cd, hdr) : 0;
1571 static int reencrypt_assign_segments(struct crypt_device *cd,
1572 struct luks2_hdr *hdr,
1573 struct luks2_reenc_context *rh,
1578 int rseg, scount, r = -EINVAL;
1580 /* FIXME: validate in reencrypt context load */
1581 if (rh->digest_new < 0 && rh->mode != CRYPT_REENCRYPT_DECRYPT)
1584 if (LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0))
1587 if (rh->mode == CRYPT_REENCRYPT_ENCRYPT || rh->mode == CRYPT_REENCRYPT_DECRYPT)
1588 return reencrypt_assign_segments_simple(cd, hdr, rh, hot, commit);
1590 if (hot && rh->jobj_segs_hot) {
1591 log_dbg(cd, "Setting 'hot' segments.");
1593 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
1595 rh->jobj_segs_hot = NULL;
1596 } else if (!hot && rh->jobj_segs_post) {
1597 log_dbg(cd, "Setting 'post' segments.");
1598 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
1600 rh->jobj_segs_post = NULL;
1606 scount = LUKS2_segments_count(hdr);
1608 /* segment in reencryption has to hold reference on both digests */
1609 rseg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
1610 if (rseg < 0 && hot)
1614 LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_new, 1, 0);
1615 LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_old, 1, 0);
1618 forward = (rh->direction == CRYPT_REENCRYPT_FORWARD);
1621 LUKS2_digest_segment_assign(cd, hdr, 0, forward ? rh->digest_new : rh->digest_old, 1, 0);
1622 if (scount > rseg + 1)
1623 LUKS2_digest_segment_assign(cd, hdr, rseg + 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
1625 LUKS2_digest_segment_assign(cd, hdr, 0, forward || scount == 1 ? rh->digest_new : rh->digest_old, 1, 0);
1627 LUKS2_digest_segment_assign(cd, hdr, 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
1630 r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
1632 log_dbg(cd, "Failed to assign hot reencryption backup segment.");
1635 r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
1637 log_dbg(cd, "Failed to assign post reencryption backup segment.");
1641 return commit ? LUKS2_hdr_write(cd, hdr) : 0;
1644 static int reencrypt_set_encrypt_segments(struct crypt_device *cd, struct luks2_hdr *hdr, uint64_t dev_size, uint64_t data_shift, bool move_first_segment, crypt_reencrypt_direction_info di)
1647 uint64_t first_segment_offset, first_segment_length,
1648 second_segment_offset, second_segment_length,
1649 data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
1650 json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
1652 if (dev_size < data_shift)
1655 if (data_shift && (di == CRYPT_REENCRYPT_FORWARD))
1658 if (move_first_segment) {
1660 * future data_device layout:
1661 * [future LUKS2 header (data shift size)][second data segment][gap (data shift size)][first data segment (data shift size)]
1663 first_segment_offset = dev_size;
1664 first_segment_length = data_shift;
1665 second_segment_offset = data_shift;
1666 second_segment_length = dev_size - 2 * data_shift;
1667 } else if (data_shift) {
1668 first_segment_offset = data_offset;
1669 first_segment_length = dev_size;
1671 /* future data_device layout with detached header: [first data segment] */
1672 first_segment_offset = data_offset;
1673 first_segment_length = 0; /* dynamic */
1676 jobj_segments = json_object_new_object();
1681 if (move_first_segment) {
1682 jobj_segment_first = json_segment_create_linear(first_segment_offset, &first_segment_length, 0);
1683 if (second_segment_length &&
1684 !(jobj_segment_second = json_segment_create_linear(second_segment_offset, &second_segment_length, 0))) {
1685 log_dbg(cd, "Failed generate 2nd segment.");
1689 jobj_segment_first = json_segment_create_linear(first_segment_offset, first_segment_length ? &first_segment_length : NULL, 0);
1691 if (!jobj_segment_first) {
1692 log_dbg(cd, "Failed generate 1st segment.");
1696 json_object_object_add(jobj_segments, "0", jobj_segment_first);
1697 if (jobj_segment_second)
1698 json_object_object_add(jobj_segments, "1", jobj_segment_second);
1700 r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0);
1703 r = LUKS2_segments_set(cd, hdr, jobj_segments, 0);
1708 static int reencrypt_make_targets(struct crypt_device *cd,
1709 struct luks2_hdr *hdr,
1710 struct device *hz_device,
1711 struct volume_key *vks,
1712 struct dm_target *result,
1716 struct volume_key *vk;
1717 uint64_t segment_size, segment_offset, segment_start = 0;
1720 json_object *jobj, *jobj_segments = LUKS2_get_segments_jobj(hdr);
1723 jobj = json_segments_get_segment(jobj_segments, s);
1725 log_dbg(cd, "Internal error. Segment %u is null.", s);
1730 reenc_seg = (s == json_segments_segment_in_reencrypt(jobj_segments));
1732 segment_offset = json_segment_get_offset(jobj, 1);
1733 segment_size = json_segment_get_size(jobj, 1);
1734 /* 'dynamic' length allowed in last segment only */
1735 if (!segment_size && !result->next)
1736 segment_size = (size >> SECTOR_SHIFT) - segment_start;
1737 if (!segment_size) {
1738 log_dbg(cd, "Internal error. Wrong segment size %u", s);
1743 if (!strcmp(json_segment_type(jobj), "crypt")) {
1744 vk = crypt_volume_key_by_id(vks, reenc_seg ? LUKS2_reencrypt_digest_new(hdr) : LUKS2_digest_by_segment(hdr, s));
1746 log_err(cd, _("Missing key for dm-crypt segment %u"), s);
1752 segment_offset -= crypt_get_data_offset(cd);
1754 r = dm_crypt_target_set(result, segment_start, segment_size,
1755 reenc_seg ? hz_device : crypt_data_device(cd),
1757 json_segment_get_cipher(jobj),
1758 json_segment_get_iv_offset(jobj),
1762 json_segment_get_sector_size(jobj));
1764 log_err(cd, _("Failed to set dm-crypt segment."));
1767 } else if (!strcmp(json_segment_type(jobj), "linear")) {
1768 r = dm_linear_target_set(result, segment_start, segment_size, reenc_seg ? hz_device : crypt_data_device(cd), segment_offset);
1770 log_err(cd, _("Failed to set dm-linear segment."));
1778 segment_start += segment_size;
1780 result = result->next;
1788 /* GLOBAL FIXME: audit function names and parameters names */
1791 * 1) audit log routines
1792 * 2) can't we derive hotzone device name from crypt context? (unlocked name, device uuid, etc?)
1794 static int reencrypt_load_overlay_device(struct crypt_device *cd, struct luks2_hdr *hdr,
1795 const char *overlay, const char *hotzone, struct volume_key *vks, uint64_t size,
1798 char hz_path[PATH_MAX];
1801 struct device *hz_dev = NULL;
1802 struct crypt_dm_active_device dmd = {
1806 log_dbg(cd, "Loading new table for overlay device %s.", overlay);
1808 r = snprintf(hz_path, PATH_MAX, "%s/%s", dm_get_dir(), hotzone);
1809 if (r < 0 || r >= PATH_MAX) {
1814 r = device_alloc(cd, &hz_dev, hz_path);
1818 r = dm_targets_allocate(&dmd.segment, LUKS2_segments_count(hdr));
1822 r = reencrypt_make_targets(cd, hdr, hz_dev, vks, &dmd.segment, size);
1826 r = dm_reload_device(cd, overlay, &dmd, 0, 0);
1828 /* what else on error here ? */
1830 dm_targets_free(cd, &dmd);
1831 device_free(cd, hz_dev);
1836 static int reencrypt_replace_device(struct crypt_device *cd, const char *target, const char *source, uint32_t flags)
1839 struct crypt_dm_active_device dmd_source, dmd_target = {};
1840 uint32_t dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH;
1842 log_dbg(cd, "Replacing table in device %s with table from device %s.", target, source);
1844 /* check only whether target device exists */
1845 r = dm_status_device(cd, target);
1853 r = dm_query_device(cd, source, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
1854 DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY, &dmd_source);
1859 if (exists && ((r = dm_query_device(cd, target, 0, &dmd_target)) < 0))
1862 dmd_source.flags |= flags;
1863 dmd_source.uuid = crypt_get_uuid(cd);
1866 if (dmd_target.size != dmd_source.size) {
1867 log_err(cd, _("Source and target device sizes don't match. Source %" PRIu64 ", target: %" PRIu64 "."),
1868 dmd_source.size, dmd_target.size);
1872 r = dm_reload_device(cd, target, &dmd_source, 0, 0);
1874 log_dbg(cd, "Resuming device %s", target);
1875 r = dm_resume_device(cd, target, dmflags | act2dmflags(dmd_source.flags));
1878 r = dm_create_device(cd, target, CRYPT_SUBDEV, &dmd_source);
1880 dm_targets_free(cd, &dmd_source);
1881 dm_targets_free(cd, &dmd_target);
1886 static int reencrypt_swap_backing_device(struct crypt_device *cd, const char *name,
1887 const char *new_backend_name)
1890 struct device *overlay_dev = NULL;
1891 char overlay_path[PATH_MAX] = { 0 };
1892 struct crypt_dm_active_device dmd = {};
1894 log_dbg(cd, "Redirecting %s mapping to new backing device: %s.", name, new_backend_name);
1896 r = snprintf(overlay_path, PATH_MAX, "%s/%s", dm_get_dir(), new_backend_name);
1897 if (r < 0 || r >= PATH_MAX) {
1902 r = device_alloc(cd, &overlay_dev, overlay_path);
1906 r = device_block_adjust(cd, overlay_dev, DEV_OK,
1907 0, &dmd.size, &dmd.flags);
1911 r = dm_linear_target_set(&dmd.segment, 0, dmd.size, overlay_dev, 0);
1915 r = dm_reload_device(cd, name, &dmd, 0, 0);
1917 log_dbg(cd, "Resuming device %s", name);
1918 r = dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
1922 dm_targets_free(cd, &dmd);
1923 device_free(cd, overlay_dev);
1928 static int reencrypt_activate_hotzone_device(struct crypt_device *cd, const char *name, uint64_t device_size, uint32_t flags)
1931 uint64_t new_offset = reencrypt_get_data_offset_new(crypt_get_hdr(cd, CRYPT_LUKS2)) >> SECTOR_SHIFT;
1933 struct crypt_dm_active_device dmd = {
1935 .uuid = crypt_get_uuid(cd),
1936 .size = device_size >> SECTOR_SHIFT
1939 log_dbg(cd, "Activating hotzone device %s.", name);
1941 r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
1942 new_offset, &dmd.size, &dmd.flags);
1946 r = dm_linear_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), new_offset);
1950 r = dm_create_device(cd, name, CRYPT_SUBDEV, &dmd);
1952 dm_targets_free(cd, &dmd);
1957 static int reencrypt_init_device_stack(struct crypt_device *cd,
1958 const struct luks2_reenc_context *rh)
1962 /* Activate hotzone device 1:1 linear mapping to data_device */
1963 r = reencrypt_activate_hotzone_device(cd, rh->hotzone_name, rh->device_size, CRYPT_ACTIVATE_PRIVATE);
1965 log_err(cd, _("Failed to activate hotzone device %s."), rh->hotzone_name);
1970 * Activate overlay device with exactly same table as original 'name' mapping.
1971 * Note that within this step the 'name' device may already include a table
1972 * constructed from more than single dm-crypt segment. Therefore transfer
1975 * If we're about to resume reencryption orig mapping has to be already validated for
1976 * abrupt shutdown and rchunk_offset has to point on next chunk to reencrypt!
1978 * TODO: in crypt_activate_by*
1980 r = reencrypt_replace_device(cd, rh->overlay_name, rh->device_name, CRYPT_ACTIVATE_PRIVATE);
1982 log_err(cd, _("Failed to activate overlay device %s with actual origin table."), rh->overlay_name);
1986 /* swap origin mapping to overlay device */
1987 r = reencrypt_swap_backing_device(cd, rh->device_name, rh->overlay_name);
1989 log_err(cd, _("Failed to load new mapping for device %s."), rh->device_name);
1994 * Now the 'name' (unlocked luks) device is mapped via dm-linear to an overlay dev.
1995 * The overlay device has a original live table of 'name' device in-before the swap.
2000 /* TODO: force error helper devices on error path */
2001 dm_remove_device(cd, rh->overlay_name, 0);
2002 dm_remove_device(cd, rh->hotzone_name, 0);
2008 * 1) audit error path. any error in this routine is fatal and should be unlikely.
2009 * usually it would hint some collision with another userspace process touching
2010 * dm devices directly.
2012 static int reenc_refresh_helper_devices(struct crypt_device *cd, const char *overlay, const char *hotzone)
2017 * we have to explicitly suspend the overlay device before suspending
2018 * the hotzone one. Resuming overlay device (aka switching tables) only
2019 * after suspending the hotzone may lead to deadlock.
2021 * In other words: always suspend the stack from top to bottom!
2023 r = dm_suspend_device(cd, overlay, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2025 log_err(cd, _("Failed to suspend device %s."), overlay);
2029 /* suspend HZ device */
2030 r = dm_suspend_device(cd, hotzone, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2032 log_err(cd, _("Failed to suspend device %s."), hotzone);
2036 /* resume overlay device: inactive table (with hotozne) -> live */
2037 r = dm_resume_device(cd, overlay, DM_RESUME_PRIVATE);
2039 log_err(cd, _("Failed to resume device %s."), overlay);
2044 static int reencrypt_refresh_overlay_devices(struct crypt_device *cd,
2045 struct luks2_hdr *hdr,
2046 const char *overlay,
2047 const char *hotzone,
2048 struct volume_key *vks,
2049 uint64_t device_size,
2052 int r = reencrypt_load_overlay_device(cd, hdr, overlay, hotzone, vks, device_size, flags);
2054 log_err(cd, _("Failed to reload device %s."), overlay);
2058 r = reenc_refresh_helper_devices(cd, overlay, hotzone);
2060 log_err(cd, _("Failed to refresh reencryption devices stack."));
2061 return REENC_ROLLBACK;
2067 static int reencrypt_move_data(struct crypt_device *cd, int devfd, uint64_t data_shift)
2072 uint64_t buffer_len, offset;
2073 struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
2075 log_dbg(cd, "Going to move data from head of data device.");
2077 buffer_len = data_shift;
2081 offset = json_segment_get_offset(LUKS2_get_segment_jobj(hdr, 0), 0);
2083 /* this is nonsense anyway */
2084 if (buffer_len != json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0)) {
2085 log_dbg(cd, "buffer_len %" PRIu64", segment size %" PRIu64, buffer_len, json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0));
2089 if (posix_memalign(&buffer, device_alignment(crypt_data_device(cd)), buffer_len))
2092 ret = read_lseek_blockwise(devfd,
2093 device_block_size(cd, crypt_data_device(cd)),
2094 device_alignment(crypt_data_device(cd)),
2095 buffer, buffer_len, 0);
2096 if (ret < 0 || (uint64_t)ret != buffer_len) {
2101 log_dbg(cd, "Going to write %" PRIu64 " bytes at offset %" PRIu64, buffer_len, offset);
2102 ret = write_lseek_blockwise(devfd,
2103 device_block_size(cd, crypt_data_device(cd)),
2104 device_alignment(crypt_data_device(cd)),
2105 buffer, buffer_len, offset);
2106 if (ret < 0 || (uint64_t)ret != buffer_len) {
2113 memset(buffer, 0, buffer_len);
2118 static int reencrypt_make_backup_segments(struct crypt_device *cd,
2119 struct luks2_hdr *hdr,
2122 uint64_t data_offset,
2123 const struct crypt_params_reencrypt *params)
2125 int r, segment, moved_segment = -1, digest_old = -1, digest_new = -1;
2126 json_object *jobj_segment_new = NULL, *jobj_segment_old = NULL, *jobj_segment_bcp = NULL;
2127 uint32_t sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
2128 uint64_t segment_offset, tmp, data_shift = params->data_shift << SECTOR_SHIFT;
2130 if (params->mode != CRYPT_REENCRYPT_DECRYPT) {
2131 digest_new = LUKS2_digest_by_keyslot(hdr, keyslot_new);
2136 if (params->mode != CRYPT_REENCRYPT_ENCRYPT) {
2137 digest_old = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT);
2142 segment = LUKS2_segment_first_unused_id(hdr);
2146 if (params->mode == CRYPT_REENCRYPT_ENCRYPT &&
2147 (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT)) {
2148 json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_segment_bcp);
2149 r = LUKS2_segment_set_flag(jobj_segment_bcp, "backup-moved-segment");
2152 moved_segment = segment++;
2153 json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), moved_segment, jobj_segment_bcp);
2156 /* FIXME: Add detection for case (digest old == digest new && old segment == new segment) */
2157 if (digest_old >= 0)
2158 json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old);
2159 else if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
2160 r = LUKS2_get_data_size(hdr, &tmp, NULL);
2163 jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0);
2166 if (!jobj_segment_old) {
2171 r = LUKS2_segment_set_flag(jobj_segment_old, "backup-previous");
2174 json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_old);
2175 jobj_segment_old = NULL;
2176 if (digest_old >= 0)
2177 LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0);
2180 if (digest_new >= 0) {
2181 segment_offset = data_offset;
2182 if (params->mode != CRYPT_REENCRYPT_ENCRYPT &&
2183 modify_offset(&segment_offset, data_shift, params->direction)) {
2187 jobj_segment_new = json_segment_create_crypt(segment_offset,
2188 crypt_get_iv_offset(cd),
2189 NULL, cipher, sector_size, 0);
2190 } else if (params->mode == CRYPT_REENCRYPT_DECRYPT) {
2191 segment_offset = data_offset;
2192 if (modify_offset(&segment_offset, data_shift, params->direction)) {
2196 jobj_segment_new = json_segment_create_linear(segment_offset, NULL, 0);
2199 if (!jobj_segment_new) {
2204 r = LUKS2_segment_set_flag(jobj_segment_new, "backup-final");
2207 json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_new);
2208 jobj_segment_new = NULL;
2209 if (digest_new >= 0)
2210 LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0);
2212 /* FIXME: also check occupied space by keyslot in shrunk area */
2213 if (params->direction == CRYPT_REENCRYPT_FORWARD && data_shift &&
2214 crypt_metadata_device(cd) == crypt_data_device(cd) &&
2215 LUKS2_set_keyslots_size(cd, hdr, json_segment_get_offset(reencrypt_segment_new(hdr), 0))) {
2216 log_err(cd, _("Failed to set new keyslots area size."));
2223 json_object_put(jobj_segment_new);
2224 json_object_put(jobj_segment_old);
2228 static int reencrypt_verify_and_upload_keys(struct crypt_device *cd, struct luks2_hdr *hdr, int digest_old, int digest_new, struct volume_key *vks)
2231 struct volume_key *vk;
2233 if (digest_new >= 0) {
2234 vk = crypt_volume_key_by_id(vks, digest_new);
2238 if (LUKS2_digest_verify_by_digest(cd, hdr, digest_new, vk) != digest_new)
2241 if (crypt_use_keyring_for_vk(cd) &&
2242 (r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk))))
2247 if (digest_old >= 0 && digest_old != digest_new) {
2248 vk = crypt_volume_key_by_id(vks, digest_old);
2253 if (LUKS2_digest_verify_by_digest(cd, hdr, digest_old, vk) != digest_old) {
2257 if (crypt_use_keyring_for_vk(cd) &&
2258 (r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk))))
2265 crypt_drop_keyring_key(cd, vks);
2269 /* This function must be called with metadata lock held */
2270 static int reencrypt_init(struct crypt_device *cd,
2272 struct luks2_hdr *hdr,
2273 const char *passphrase,
2274 size_t passphrase_size,
2278 const char *cipher_mode,
2279 const struct crypt_params_reencrypt *params,
2280 struct volume_key **vks)
2282 bool move_first_segment;
2284 uint32_t sector_size;
2285 int r, reencrypt_keyslot, devfd = -1;
2286 uint64_t data_offset, dev_size = 0;
2287 struct crypt_dm_active_device dmd_target, dmd_source = {
2288 .uuid = crypt_get_uuid(cd),
2289 .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
2292 if (!params || params->mode > CRYPT_REENCRYPT_DECRYPT)
2295 if (params->mode != CRYPT_REENCRYPT_DECRYPT &&
2296 (!params->luks2 || !(cipher && cipher_mode) || keyslot_new < 0))
2299 log_dbg(cd, "Initializing reencryption (mode: %s) in LUKS2 metadata.",
2300 crypt_reencrypt_mode_to_str(params->mode));
2302 move_first_segment = (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT);
2304 /* implicit sector size 512 for decryption */
2305 sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
2306 if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE ||
2307 NOTPOW2(sector_size)) {
2308 log_err(cd, _("Unsupported encryption sector size."));
2312 if (!cipher_mode || *cipher_mode == '\0')
2313 snprintf(_cipher, sizeof(_cipher), "%s", cipher);
2315 snprintf(_cipher, sizeof(_cipher), "%s-%s", cipher, cipher_mode);
2317 if (MISALIGNED(params->data_shift, sector_size >> SECTOR_SHIFT)) {
2318 log_err(cd, _("Data shift is not aligned to requested encryption sector size (%" PRIu32 " bytes)."), sector_size);
2322 data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
2324 r = device_check_access(cd, crypt_data_device(cd), DEV_OK);
2328 r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
2332 r = device_size(crypt_data_device(cd), &dev_size);
2336 dev_size -= data_offset;
2338 if (MISALIGNED(dev_size, sector_size)) {
2339 log_err(cd, _("Data device is not aligned to requested encryption sector size (%" PRIu32 " bytes)."), sector_size);
2343 reencrypt_keyslot = LUKS2_keyslot_find_empty(hdr);
2344 if (reencrypt_keyslot < 0) {
2345 log_err(cd, _("All key slots full."));
2350 * We must perform data move with exclusive open data device
2351 * to exclude another cryptsetup process to colide with
2352 * encryption initialization (or mount)
2354 if (move_first_segment) {
2355 if (dev_size < 2 * (params->data_shift << SECTOR_SHIFT)) {
2356 log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
2359 if (params->data_shift < LUKS2_get_data_offset(hdr)) {
2360 log_err(cd, _("Data shift (%" PRIu64 " sectors) is less than future data offset (%" PRIu64 " sectors)."), params->data_shift, LUKS2_get_data_offset(hdr));
2363 devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
2365 if (devfd == -EBUSY)
2366 log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
2371 if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
2372 /* in-memory only */
2373 r = reencrypt_set_encrypt_segments(cd, hdr, dev_size, params->data_shift << SECTOR_SHIFT, move_first_segment, params->direction);
2378 r = LUKS2_keyslot_reencrypt_create(cd, hdr, reencrypt_keyslot,
2383 r = reencrypt_make_backup_segments(cd, hdr, keyslot_new, _cipher, data_offset, params);
2385 log_dbg(cd, "Failed to create reencryption backup device segments.");
2389 r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
2393 if (name && params->mode != CRYPT_REENCRYPT_ENCRYPT) {
2394 r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
2398 r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
2399 DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
2400 DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
2404 r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
2406 r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
2408 log_err(cd, _("Mismatching parameters on device %s."), name);
2411 dm_targets_free(cd, &dmd_source);
2412 dm_targets_free(cd, &dmd_target);
2413 free(CONST_CAST(void*)dmd_target.uuid);
2419 if (move_first_segment && reencrypt_move_data(cd, devfd, params->data_shift << SECTOR_SHIFT)) {
2424 /* This must be first and only write in LUKS2 metadata during _reencrypt_init */
2425 r = reencrypt_update_flag(cd, 1, true);
2427 log_dbg(cd, "Failed to set online-reencryption requirement.");
2430 r = reencrypt_keyslot;
2432 device_release_excl(cd, crypt_data_device(cd));
2434 crypt_load(cd, CRYPT_LUKS2, NULL);
2439 static int reencrypt_hotzone_protect_final(struct crypt_device *cd,
2440 struct luks2_hdr *hdr, struct luks2_reenc_context *rh,
2441 const void *buffer, size_t buffer_len)
2443 const void *pbuffer;
2444 size_t data_offset, len;
2447 if (rh->rp.type == REENC_PROTECTION_NONE)
2450 if (rh->rp.type == REENC_PROTECTION_CHECKSUM) {
2451 log_dbg(cd, "Checksums hotzone resilience.");
2453 for (data_offset = 0, len = 0; data_offset < buffer_len; data_offset += rh->alignment, len += rh->rp.p.csum.hash_size) {
2454 if (crypt_hash_write(rh->rp.p.csum.ch, (const char *)buffer + data_offset, rh->alignment)) {
2455 log_dbg(cd, "Failed to hash sector at offset %zu.", data_offset);
2458 if (crypt_hash_final(rh->rp.p.csum.ch, (char *)rh->rp.p.csum.checksums + len, rh->rp.p.csum.hash_size)) {
2459 log_dbg(cd, "Failed to finalize hash.");
2463 pbuffer = rh->rp.p.csum.checksums;
2464 } else if (rh->rp.type == REENC_PROTECTION_JOURNAL) {
2465 log_dbg(cd, "Journal hotzone resilience.");
2468 } else if (rh->rp.type == REENC_PROTECTION_DATASHIFT) {
2469 log_dbg(cd, "Data shift hotzone resilience.");
2470 return LUKS2_hdr_write(cd, hdr);
2474 log_dbg(cd, "Going to store %zu bytes in reencrypt keyslot.", len);
2476 r = LUKS2_keyslot_reencrypt_store(cd, hdr, rh->reenc_keyslot, pbuffer, len);
2478 return r > 0 ? 0 : r;
2481 static int reencrypt_context_update(struct crypt_device *cd,
2482 struct luks2_reenc_context *rh)
2487 if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
2488 if (rh->data_shift && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
2490 rh->offset -= rh->data_shift;
2491 if (rh->offset && (rh->offset < rh->data_shift)) {
2492 rh->length = rh->offset;
2493 rh->offset = rh->data_shift;
2496 rh->length = rh->data_shift;
2498 if (rh->offset < rh->length)
2499 rh->length = rh->offset;
2500 rh->offset -= rh->length;
2502 } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
2503 rh->offset += (uint64_t)rh->read;
2504 /* it fails in-case of device_size < rh->offset later */
2505 if (rh->device_size - rh->offset < rh->length)
2506 rh->length = rh->device_size - rh->offset;
2510 if (rh->device_size < rh->offset) {
2511 log_dbg(cd, "Calculated reencryption offset %" PRIu64 " is beyond device size %" PRIu64 ".", rh->offset, rh->device_size);
2515 rh->progress += (uint64_t)rh->read;
2520 static int reencrypt_load(struct crypt_device *cd, struct luks2_hdr *hdr,
2521 uint64_t device_size,
2522 const struct crypt_params_reencrypt *params,
2523 struct luks2_reenc_context **rh)
2526 struct luks2_reenc_context *tmp = NULL;
2527 crypt_reencrypt_info ri = LUKS2_reenc_status(hdr);
2529 if (ri == CRYPT_REENCRYPT_CLEAN)
2530 r = reencrypt_load_clean(cd, hdr, device_size, &tmp, params);
2531 else if (ri == CRYPT_REENCRYPT_CRASH)
2532 r = reencrypt_load_crashed(cd, hdr, device_size, &tmp);
2533 else if (ri == CRYPT_REENCRYPT_NONE) {
2534 log_err(cd, _("Device not marked for LUKS2 reencryption."));
2539 if (r < 0 || !tmp) {
2540 log_err(cd, _("Failed to load LUKS2 reencryption context."));
2549 static int reencrypt_lock_internal(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock)
2552 char *lock_resource;
2554 if (!crypt_metadata_locking_enabled()) {
2555 *reencrypt_lock = NULL;
2559 r = asprintf(&lock_resource, "LUKS2-reencryption-%s", uuid);
2567 r = crypt_write_lock(cd, lock_resource, false, reencrypt_lock);
2569 free(lock_resource);
2575 int crypt_reencrypt_lock_by_dm_uuid(struct crypt_device *cd, const char *dm_uuid, struct crypt_lock_handle **reencrypt_lock)
2579 const char *uuid = crypt_get_uuid(cd);
2585 r = snprintf(hdr_uuid, sizeof(hdr_uuid), "%.8s-%.4s-%.4s-%.4s-%.12s",
2586 dm_uuid + 6, dm_uuid + 14, dm_uuid + 18, dm_uuid + 22, dm_uuid + 26);
2587 if (r < 0 || (size_t)r != (sizeof(hdr_uuid) - 1))
2589 } else if (crypt_uuid_cmp(dm_uuid, uuid))
2592 return reencrypt_lock_internal(cd, uuid, reencrypt_lock);
2596 int crypt_reencrypt_lock(struct crypt_device *cd, struct crypt_lock_handle **reencrypt_lock)
2598 if (!cd || !crypt_get_type(cd) || strcmp(crypt_get_type(cd), CRYPT_LUKS2))
2601 return reencrypt_lock_internal(cd, crypt_get_uuid(cd), reencrypt_lock);
2605 void crypt_reencrypt_unlock(struct crypt_device *cd, struct crypt_lock_handle *reencrypt_lock)
2607 crypt_unlock_internal(cd, reencrypt_lock);
2610 static int reencrypt_lock_and_verify(struct crypt_device *cd, struct luks2_hdr *hdr,
2611 struct crypt_lock_handle **reencrypt_lock)
2614 crypt_reencrypt_info ri;
2615 struct crypt_lock_handle *h;
2617 ri = LUKS2_reenc_status(hdr);
2618 if (ri == CRYPT_REENCRYPT_INVALID) {
2619 log_err(cd, _("Failed to get reencryption state."));
2622 if (ri < CRYPT_REENCRYPT_CLEAN) {
2623 log_err(cd, _("Device is not in reencryption."));
2627 r = crypt_reencrypt_lock(cd, &h);
2630 log_err(cd, _("Reencryption process is already running."));
2632 log_err(cd, _("Failed to acquire reencryption lock."));
2636 /* With reencryption lock held, reload device context and verify metadata state */
2637 r = crypt_load(cd, CRYPT_LUKS2, NULL);
2639 crypt_reencrypt_unlock(cd, h);
2643 ri = LUKS2_reenc_status(hdr);
2644 if (ri == CRYPT_REENCRYPT_CLEAN) {
2645 *reencrypt_lock = h;
2649 crypt_reencrypt_unlock(cd, h);
2650 log_err(cd, _("Cannot proceed with reencryption. Run reencryption recovery first."));
2654 static int reencrypt_load_by_passphrase(struct crypt_device *cd,
2656 const char *passphrase,
2657 size_t passphrase_size,
2660 struct volume_key **vks,
2661 const struct crypt_params_reencrypt *params)
2663 int r, old_ss, new_ss;
2664 struct luks2_hdr *hdr;
2665 struct crypt_lock_handle *reencrypt_lock;
2666 struct luks2_reenc_context *rh;
2667 struct crypt_dm_active_device dmd_target, dmd_source = {
2668 .uuid = crypt_get_uuid(cd),
2669 .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
2671 uint64_t minimal_size, device_size, mapping_size = 0, required_size = 0;
2673 struct crypt_params_reencrypt rparams = {};
2678 required_size = params->device_size;
2681 log_dbg(cd, "Loading LUKS2 reencryption context.");
2683 rh = crypt_get_reenc_context(cd);
2685 LUKS2_reenc_context_free(cd, rh);
2686 crypt_set_reenc_context(cd, NULL);
2690 hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
2692 r = reencrypt_lock_and_verify(cd, hdr, &reencrypt_lock);
2696 /* From now on we hold reencryption lock */
2698 if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic))
2701 /* some configurations provides fixed device size */
2702 r = luks2_check_device_size(cd, hdr, minimal_size, &device_size, false, dynamic);
2708 minimal_size >>= SECTOR_SHIFT;
2710 old_ss = reencrypt_get_sector_size_old(hdr);
2711 new_ss = reencrypt_get_sector_size_new(hdr);
2713 r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
2715 log_dbg(cd, "Keys are not ready. Unlocking all volume keys.");
2716 r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
2719 r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
2726 r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
2727 DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
2728 DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
2731 flags = dmd_target.flags;
2733 r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
2735 r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
2737 log_err(cd, _("Mismatching parameters on device %s."), name);
2740 dm_targets_free(cd, &dmd_source);
2741 dm_targets_free(cd, &dmd_target);
2742 free(CONST_CAST(void*)dmd_target.uuid);
2745 mapping_size = dmd_target.size;
2749 if (required_size && mapping_size && (required_size != mapping_size)) {
2750 log_err(cd, _("Active device size and requested reencryption size don't match."));
2755 required_size = mapping_size;
2757 if (required_size) {
2758 /* TODO: Add support for changing fixed minimal size in reencryption mda where possible */
2759 if ((minimal_size && (required_size < minimal_size)) ||
2760 (required_size > (device_size >> SECTOR_SHIFT)) ||
2761 (!dynamic && (required_size != minimal_size)) ||
2762 (old_ss > 0 && MISALIGNED(required_size, old_ss >> SECTOR_SHIFT)) ||
2763 (new_ss > 0 && MISALIGNED(required_size, new_ss >> SECTOR_SHIFT))) {
2764 log_err(cd, _("Illegal device size requested in reencryption parameters."));
2767 rparams.device_size = required_size;
2770 r = reencrypt_load(cd, hdr, device_size, &rparams, &rh);
2774 if (name && (r = reencrypt_context_set_names(rh, name)))
2777 /* Reassure device is not mounted and there's no dm mapping active */
2778 if (!name && (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0)) {
2779 log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
2783 device_release_excl(cd, crypt_data_device(cd));
2785 /* FIXME: There's a race for dm device activation not managed by cryptsetup.
2788 * 2) rogue dm device activation
2789 * 3) one or more dm-crypt based wrapper activation
2790 * 4) next excl open get's skipped due to 3) device from 2) remains undetected.
2792 r = reencrypt_init_storage_wrappers(cd, hdr, rh, *vks);
2796 /* If one of wrappers is based on dmcrypt fallback it already blocked mount */
2797 if (!name && crypt_storage_wrapper_get_type(rh->cw1) != DMCRYPT &&
2798 crypt_storage_wrapper_get_type(rh->cw2) != DMCRYPT) {
2799 if (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0) {
2800 log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
2808 MOVE_REF(rh->vks, *vks);
2809 MOVE_REF(rh->reenc_lock, reencrypt_lock);
2811 crypt_set_reenc_context(cd, rh);
2815 crypt_reencrypt_unlock(cd, reencrypt_lock);
2816 LUKS2_reenc_context_free(cd, rh);
2820 static int reencrypt_recovery_by_passphrase(struct crypt_device *cd,
2821 struct luks2_hdr *hdr,
2824 const char *passphrase,
2825 size_t passphrase_size)
2828 crypt_reencrypt_info ri;
2829 struct crypt_lock_handle *reencrypt_lock;
2831 r = crypt_reencrypt_lock(cd, &reencrypt_lock);
2834 log_err(cd, _("Reencryption in-progress. Cannot perform recovery."));
2836 log_err(cd, _("Failed to get reencryption lock."));
2840 if ((r = crypt_load(cd, CRYPT_LUKS2, NULL))) {
2841 crypt_reencrypt_unlock(cd, reencrypt_lock);
2845 ri = LUKS2_reenc_status(hdr);
2846 if (ri == CRYPT_REENCRYPT_INVALID) {
2847 crypt_reencrypt_unlock(cd, reencrypt_lock);
2851 if (ri == CRYPT_REENCRYPT_CRASH) {
2852 r = LUKS2_reencrypt_locked_recovery_by_passphrase(cd, keyslot_old, keyslot_new,
2853 passphrase, passphrase_size, 0, NULL);
2855 log_err(cd, _("LUKS2 reencryption recovery failed."));
2857 log_dbg(cd, "No LUKS2 reencryption recovery needed.");
2861 crypt_reencrypt_unlock(cd, reencrypt_lock);
2865 static int reencrypt_init_by_passphrase(struct crypt_device *cd,
2867 const char *passphrase,
2868 size_t passphrase_size,
2872 const char *cipher_mode,
2873 const struct crypt_params_reencrypt *params)
2876 crypt_reencrypt_info ri;
2877 struct volume_key *vks = NULL;
2878 uint32_t flags = params ? params->flags : 0;
2879 struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
2881 /* short-circuit in recovery and finish immediately. */
2882 if (flags & CRYPT_REENCRYPT_RECOVERY)
2883 return reencrypt_recovery_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
2886 r = crypt_keyslot_get_key_size(cd, keyslot_new);
2889 r = LUKS2_check_cipher(cd, r, cipher, cipher_mode);
2894 r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
2898 ri = LUKS2_reenc_status(hdr);
2899 if (ri == CRYPT_REENCRYPT_INVALID) {
2900 device_write_unlock(cd, crypt_metadata_device(cd));
2904 if ((ri > CRYPT_REENCRYPT_NONE) && (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY)) {
2905 device_write_unlock(cd, crypt_metadata_device(cd));
2906 log_err(cd, _("LUKS2 reencryption already initialized in metadata."));
2910 if (ri == CRYPT_REENCRYPT_NONE && !(flags & CRYPT_REENCRYPT_RESUME_ONLY)) {
2911 r = reencrypt_init(cd, name, hdr, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params, &vks);
2913 log_err(cd, _("Failed to initialize LUKS2 reencryption in metadata."));
2914 } else if (ri > CRYPT_REENCRYPT_NONE) {
2915 log_dbg(cd, "LUKS2 reencryption already initialized.");
2919 device_write_unlock(cd, crypt_metadata_device(cd));
2921 if (r < 0 || (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY))
2924 r = reencrypt_load_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, &vks, params);
2927 crypt_drop_keyring_key(cd, vks);
2928 crypt_free_volume_key(vks);
2929 return r < 0 ? r : LUKS2_find_keyslot(hdr, "reencrypt");
2932 int crypt_reencrypt_init_by_keyring(struct crypt_device *cd,
2934 const char *passphrase_description,
2938 const char *cipher_mode,
2939 const struct crypt_params_reencrypt *params)
2943 size_t passphrase_size;
2945 if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase_description)
2947 if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
2950 r = keyring_get_passphrase(passphrase_description, &passphrase, &passphrase_size);
2952 log_err(cd, _("Failed to read passphrase from keyring (error %d)."), r);
2956 r = reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
2958 crypt_safe_memzero(passphrase, passphrase_size);
2964 int crypt_reencrypt_init_by_passphrase(struct crypt_device *cd,
2966 const char *passphrase,
2967 size_t passphrase_size,
2971 const char *cipher_mode,
2972 const struct crypt_params_reencrypt *params)
2974 if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase)
2976 if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
2979 return reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
2982 static reenc_status_t reencrypt_step(struct crypt_device *cd,
2983 struct luks2_hdr *hdr,
2984 struct luks2_reenc_context *rh,
2985 uint64_t device_size,
2990 /* update reencrypt keyslot protection parameters in memory only */
2991 r = reenc_keyslot_update(cd, rh);
2993 log_dbg(cd, "Keyslot update failed.");
2997 /* in memory only */
2998 r = reencrypt_make_segments(cd, hdr, rh, device_size);
3002 r = reencrypt_assign_segments(cd, hdr, rh, 1, 0);
3004 log_err(cd, _("Failed to set device segments for next reencryption hotzone."));
3009 r = reencrypt_refresh_overlay_devices(cd, hdr, rh->overlay_name, rh->hotzone_name, rh->vks, rh->device_size, rh->flags);
3010 /* Teardown overlay devices with dm-error. None bio shall pass! */
3015 log_dbg(cd, "Reencrypting chunk starting at offset: %" PRIu64 ", size :%" PRIu64 ".", rh->offset, rh->length);
3016 log_dbg(cd, "data_offset: %" PRIu64, crypt_get_data_offset(cd) << SECTOR_SHIFT);
3018 if (!rh->offset && rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->data_shift &&
3019 rh->jobj_segment_moved) {
3020 crypt_storage_wrapper_destroy(rh->cw1);
3021 log_dbg(cd, "Reinitializing old segment storage wrapper for moved segment.");
3022 r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
3023 LUKS2_reencrypt_get_data_offset_moved(hdr),
3024 crypt_get_iv_offset(cd),
3025 reencrypt_get_sector_size_old(hdr),
3026 reencrypt_segment_cipher_old(hdr),
3027 crypt_volume_key_by_id(rh->vks, rh->digest_old),
3030 log_err(cd, _("Failed to initialize old segment storage wrapper."));
3031 return REENC_ROLLBACK;
3035 rh->read = crypt_storage_wrapper_read(rh->cw1, rh->offset, rh->reenc_buffer, rh->length);
3037 /* severity normal */
3038 log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset);
3039 return REENC_ROLLBACK;
3042 /* metadata commit point */
3043 r = reencrypt_hotzone_protect_final(cd, hdr, rh, rh->reenc_buffer, rh->read);
3045 /* severity normal */
3046 log_err(cd, _("Failed to write reencryption resilience metadata."));
3047 return REENC_ROLLBACK;
3050 r = crypt_storage_wrapper_decrypt(rh->cw1, rh->offset, rh->reenc_buffer, rh->read);
3052 /* severity normal */
3053 log_err(cd, _("Decryption failed."));
3054 return REENC_ROLLBACK;
3056 if (rh->read != crypt_storage_wrapper_encrypt_write(rh->cw2, rh->offset, rh->reenc_buffer, rh->read)) {
3057 /* severity fatal */
3058 log_err(cd, _("Failed to write hotzone area starting at %" PRIu64 "."), rh->offset);
3062 if (rh->rp.type != REENC_PROTECTION_NONE && crypt_storage_wrapper_datasync(rh->cw2)) {
3063 log_err(cd, _("Failed to sync data."));
3067 /* metadata commit safe point */
3068 r = reencrypt_assign_segments(cd, hdr, rh, 0, rh->rp.type != REENC_PROTECTION_NONE);
3070 /* severity fatal */
3071 log_err(cd, _("Failed to update metadata after current reencryption hotzone completed."));
3076 /* severity normal */
3077 log_dbg(cd, "Resuming device %s", rh->hotzone_name);
3078 r = dm_resume_device(cd, rh->hotzone_name, DM_RESUME_PRIVATE);
3080 log_err(cd, _("Failed to resume device %s."), rh->hotzone_name);
3088 static int reencrypt_erase_backup_segments(struct crypt_device *cd,
3089 struct luks2_hdr *hdr)
3091 int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
3093 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3095 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3097 segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
3099 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3101 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3103 segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
3105 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3107 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3113 static int reencrypt_wipe_moved_segment(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reenc_context *rh)
3116 uint64_t offset, length;
3118 if (rh->jobj_segment_moved) {
3119 offset = json_segment_get_offset(rh->jobj_segment_moved, 0);
3120 length = json_segment_get_size(rh->jobj_segment_moved, 0);
3121 log_dbg(cd, "Wiping %" PRIu64 " bytes of backup segment data at offset %" PRIu64,
3123 r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
3124 offset, length, 1024 * 1024, NULL, NULL);
3130 static int reencrypt_teardown_ok(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reenc_context *rh)
3134 bool finished = !(rh->device_size > rh->progress);
3136 if (rh->rp.type == REENC_PROTECTION_NONE &&
3137 LUKS2_hdr_write(cd, hdr)) {
3138 log_err(cd, _("Failed to write LUKS2 metadata."));
3143 r = LUKS2_reload(cd, rh->device_name, rh->vks, rh->device_size, rh->flags);
3145 log_err(cd, _("Failed to reload device %s."), rh->device_name);
3147 r = dm_resume_device(cd, rh->device_name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
3149 log_err(cd, _("Failed to resume device %s."), rh->device_name);
3151 dm_remove_device(cd, rh->overlay_name, 0);
3152 dm_remove_device(cd, rh->hotzone_name, 0);
3154 if (!r && finished && rh->mode == CRYPT_REENCRYPT_DECRYPT &&
3155 !dm_flags(cd, DM_LINEAR, &dmt_flags) && (dmt_flags & DM_DEFERRED_SUPPORTED))
3156 dm_remove_device(cd, rh->device_name, CRYPT_DEACTIVATE_DEFERRED);
3160 if (reencrypt_wipe_moved_segment(cd, hdr, rh))
3161 log_err(cd, _("Failed to wipe backup segment data."));
3162 if (reencrypt_get_data_offset_new(hdr) && LUKS2_set_keyslots_size(cd, hdr, reencrypt_get_data_offset_new(hdr)))
3163 log_dbg(cd, "Failed to set new keyslots area size.");
3164 if (rh->digest_old >= 0 && rh->digest_new != rh->digest_old)
3165 for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++)
3166 if (LUKS2_digest_by_keyslot(hdr, i) == rh->digest_old)
3167 crypt_keyslot_destroy(cd, i);
3168 crypt_keyslot_destroy(cd, rh->reenc_keyslot);
3169 if (reencrypt_erase_backup_segments(cd, hdr))
3170 log_dbg(cd, "Failed to erase backup segments");
3172 /* do we need atomic erase? */
3173 if (reencrypt_update_flag(cd, 0, true))
3174 log_err(cd, _("Failed to disable reencryption requirement flag."));
3180 static void reencrypt_teardown_fatal(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reenc_context *rh)
3182 log_err(cd, _("Fatal error while reencrypting chunk starting at %" PRIu64 ", %" PRIu64 " sectors long."),
3183 (rh->offset >> SECTOR_SHIFT) + crypt_get_data_offset(cd), rh->length >> SECTOR_SHIFT);
3186 log_err(cd, "Reencryption was run in online mode.");
3187 if (dm_status_suspended(cd, rh->hotzone_name) > 0) {
3188 log_dbg(cd, "Hotzone device %s suspended, replacing with dm-error.", rh->hotzone_name);
3189 if (dm_error_device(cd, rh->hotzone_name)) {
3190 log_err(cd, _("Failed to replace suspended device %s with dm-error target."), rh->hotzone_name);
3191 log_err(cd, _("Do not resume the device unless replaced with error target manually."));
3197 static int reencrypt_teardown(struct crypt_device *cd, struct luks2_hdr *hdr,
3198 struct luks2_reenc_context *rh, reenc_status_t rs, bool interrupted,
3199 int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
3205 if (progress && !interrupted)
3206 progress(rh->device_size, rh->progress, NULL);
3207 r = reencrypt_teardown_ok(cd, hdr, rh);
3210 reencrypt_teardown_fatal(cd, hdr, rh);
3216 /* this frees reencryption lock */
3217 LUKS2_reenc_context_free(cd, rh);
3218 crypt_set_reenc_context(cd, NULL);
3223 int crypt_reencrypt(struct crypt_device *cd,
3224 int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
3227 crypt_reencrypt_info ri;
3228 struct luks2_hdr *hdr;
3229 struct luks2_reenc_context *rh;
3233 if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
3236 hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3238 ri = LUKS2_reenc_status(hdr);
3239 if (ri > CRYPT_REENCRYPT_CLEAN) {
3240 log_err(cd, _("Cannot proceed with reencryption. Unexpected reencryption status."));
3244 rh = crypt_get_reenc_context(cd);
3245 if (!rh || (!rh->reenc_lock && crypt_metadata_locking_enabled())) {
3246 log_err(cd, _("Missing or invalid reencrypt context."));
3250 log_dbg(cd, "Resuming LUKS2 reencryption.");
3252 if (rh->online && reencrypt_init_device_stack(cd, rh)) {
3253 log_err(cd, _("Failed to initialize reencryption device stack."));
3257 log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
3261 while (!quit && (rh->device_size > rh->progress)) {
3262 rs = reencrypt_step(cd, hdr, rh, rh->device_size, rh->online);
3266 log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
3267 if (progress && progress(rh->device_size, rh->progress, NULL))
3270 r = reencrypt_context_update(cd, rh);
3272 log_err(cd, _("Failed to update reencryption context."));
3277 log_dbg(cd, "Next reencryption offset will be %" PRIu64 " sectors.", rh->offset);
3278 log_dbg(cd, "Next reencryption chunk size will be %" PRIu64 " sectors).", rh->length);
3281 r = reencrypt_teardown(cd, hdr, rh, rs, quit, progress);
3285 static int reencrypt_recovery(struct crypt_device *cd,
3286 struct luks2_hdr *hdr,
3287 uint64_t device_size,
3288 struct volume_key *vks)
3291 struct luks2_reenc_context *rh = NULL;
3293 r = reencrypt_load(cd, hdr, device_size, NULL, &rh);
3295 log_err(cd, _("Failed to load LUKS2 reencryption context."));
3299 r = reencrypt_recover_segment(cd, hdr, rh, vks);
3303 if ((r = reencrypt_assign_segments(cd, hdr, rh, 0, 0)))
3306 r = reencrypt_context_update(cd, rh);
3308 log_err(cd, _("Failed to update reencryption context."));
3312 r = reencrypt_teardown_ok(cd, hdr, rh);
3314 r = LUKS2_hdr_write(cd, hdr);
3316 LUKS2_reenc_context_free(cd, rh);
3322 * use only for calculation of minimal data device size.
3323 * The real data offset is taken directly from segments!
3325 int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise)
3327 crypt_reencrypt_info ri = LUKS2_reenc_status(hdr);
3328 uint64_t data_offset = LUKS2_get_data_offset(hdr);
3330 if (ri == CRYPT_REENCRYPT_CLEAN && reencrypt_direction(hdr) == CRYPT_REENCRYPT_FORWARD)
3331 data_offset += reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
3333 return blockwise ? data_offset : data_offset << SECTOR_SHIFT;
3337 int luks2_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr, uint64_t check_size, uint64_t *dev_size, bool activation, bool dynamic)
3340 uint64_t data_offset, real_size = 0;
3342 if (reencrypt_direction(hdr) == CRYPT_REENCRYPT_BACKWARD &&
3343 (LUKS2_get_segment_by_flag(hdr, "backup-moved-segment") || dynamic))
3344 check_size += reencrypt_data_shift(hdr);
3346 r = device_check_access(cd, crypt_data_device(cd), activation ? DEV_EXCL : DEV_OK);
3350 data_offset = LUKS2_reencrypt_data_offset(hdr, false);
3352 r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
3356 r = device_size(crypt_data_device(cd), &real_size);
3360 log_dbg(cd, "Required minimal device size: %" PRIu64 " (%" PRIu64 " sectors)"
3361 ", real device size: %" PRIu64 " (%" PRIu64 " sectors)\n"
3362 "calculated device size: %" PRIu64 " (%" PRIu64 " sectors)",
3363 check_size, check_size >> SECTOR_SHIFT, real_size, real_size >> SECTOR_SHIFT,
3364 real_size - data_offset, (real_size - data_offset) >> SECTOR_SHIFT);
3366 if (real_size < data_offset || (check_size && (real_size - data_offset) < check_size)) {
3367 log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
3371 *dev_size = real_size - data_offset;
3376 /* returns keyslot number on success (>= 0) or negative errnor otherwise */
3377 int LUKS2_reencrypt_locked_recovery_by_passphrase(struct crypt_device *cd,
3380 const char *passphrase,
3381 size_t passphrase_size,
3383 struct volume_key **vks)
3385 uint64_t minimal_size, device_size;
3386 int keyslot, r = -EINVAL;
3387 struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3388 struct volume_key *vk = NULL, *_vks = NULL;
3390 log_dbg(cd, "Entering reencryption crash recovery.");
3392 if (LUKS2_get_data_size(hdr, &minimal_size, NULL))
3395 r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new,
3396 passphrase, passphrase_size, &_vks);
3401 if (crypt_use_keyring_for_vk(cd))
3405 r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk));
3408 vk = crypt_volume_key_next(vk);
3411 if (luks2_check_device_size(cd, hdr, minimal_size, &device_size, true, false))
3414 r = reencrypt_recovery(cd, hdr, device_size, _vks);
3417 MOVE_REF(*vks, _vks);
3420 crypt_drop_keyring_key(cd, _vks);
3421 crypt_free_volume_key(_vks);
3423 return r < 0 ? r : keyslot;
3426 crypt_reencrypt_info LUKS2_reencrypt_status(struct crypt_device *cd, struct crypt_params_reencrypt *params)
3428 crypt_reencrypt_info ri;
3429 struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3431 ri = LUKS2_reenc_status(hdr);
3432 if (ri == CRYPT_REENCRYPT_NONE || ri == CRYPT_REENCRYPT_INVALID || !params)
3435 params->mode = reencrypt_mode(hdr);
3436 params->direction = reencrypt_direction(hdr);
3437 params->resilience = reencrypt_resilience_type(hdr);
3438 params->hash = reencrypt_resilience_hash(hdr);
3439 params->data_shift = reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
3440 params->max_hotzone_size = 0;
3441 if (LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
3442 params->flags |= CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT;