68d3194222c4976ba1bcc11935e433d503c0b9ac
[platform/upstream/cryptsetup.git] / lib / luks2 / luks2_reencrypt.c
1 /*
2  * LUKS - Linux Unified Key Setup v2, reencryption helpers
3  *
4  * Copyright (C) 2015-2020, Red Hat, Inc. All rights reserved.
5  * Copyright (C) 2015-2020, Ondrej Kozina
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version 2
10  * of the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21
22 #include "luks2_internal.h"
23 #include "utils_device_locking.h"
24
25 static json_object *reencrypt_segment(struct luks2_hdr *hdr, unsigned new)
26 {
27         return LUKS2_get_segment_by_flag(hdr, new ? "backup-final" : "backup-previous");
28 }
29
30 static json_object *reencrypt_segment_new(struct luks2_hdr *hdr)
31 {
32         return reencrypt_segment(hdr, 1);
33 }
34
35 static json_object *reencrypt_segment_old(struct luks2_hdr *hdr)
36 {
37         return reencrypt_segment(hdr, 0);
38 }
39
40 static const char *reencrypt_segment_cipher_new(struct luks2_hdr *hdr)
41 {
42         return json_segment_get_cipher(reencrypt_segment(hdr, 1));
43 }
44
45 static const char *reencrypt_segment_cipher_old(struct luks2_hdr *hdr)
46 {
47         return json_segment_get_cipher(reencrypt_segment(hdr, 0));
48 }
49
50 static int reencrypt_get_sector_size_new(struct luks2_hdr *hdr)
51 {
52         return json_segment_get_sector_size(reencrypt_segment(hdr, 1));
53 }
54
55 static int reencrypt_get_sector_size_old(struct luks2_hdr *hdr)
56 {
57         return json_segment_get_sector_size(reencrypt_segment(hdr, 0));
58 }
59
60 static uint64_t reencrypt_data_offset(struct luks2_hdr *hdr, unsigned new)
61 {
62         json_object *jobj = reencrypt_segment(hdr, new);
63         if (jobj)
64                 return json_segment_get_offset(jobj, 0);
65
66         return LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
67 }
68
69 static uint64_t LUKS2_reencrypt_get_data_offset_moved(struct luks2_hdr *hdr)
70 {
71         json_object *jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-moved-segment");
72
73         if (!jobj_segment)
74                 return 0;
75
76         return json_segment_get_offset(jobj_segment, 0);
77 }
78
79 static uint64_t reencrypt_get_data_offset_new(struct luks2_hdr *hdr)
80 {
81         return reencrypt_data_offset(hdr, 1);
82 }
83
84 static uint64_t reencrypt_get_data_offset_old(struct luks2_hdr *hdr)
85 {
86         return reencrypt_data_offset(hdr, 0);
87 }
88
89 static int reencrypt_digest(struct luks2_hdr *hdr, unsigned new)
90 {
91         int segment = LUKS2_get_segment_id_by_flag(hdr, new ? "backup-final" : "backup-previous");
92
93         if (segment < 0)
94                 return segment;
95
96         return LUKS2_digest_by_segment(hdr, segment);
97 }
98
99 int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr)
100 {
101         return reencrypt_digest(hdr, 1);
102 }
103
104 int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr)
105 {
106         return reencrypt_digest(hdr, 0);
107 }
108
109 /* none, checksums, journal or shift */
110 static const char *reencrypt_resilience_type(struct luks2_hdr *hdr)
111 {
112         json_object *jobj_keyslot, *jobj_area, *jobj_type;
113         int ks = LUKS2_find_keyslot(hdr, "reencrypt");
114
115         if (ks < 0)
116                 return NULL;
117
118         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
119
120         json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
121         if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
122                 return NULL;
123
124         return json_object_get_string(jobj_type);
125 }
126
127 static const char *reencrypt_resilience_hash(struct luks2_hdr *hdr)
128 {
129         json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash;
130         int ks = LUKS2_find_keyslot(hdr, "reencrypt");
131
132         if (ks < 0)
133                 return NULL;
134
135         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
136
137         json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
138         if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
139                 return NULL;
140         if (strcmp(json_object_get_string(jobj_type), "checksum"))
141                 return NULL;
142         if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
143                 return NULL;
144
145         return json_object_get_string(jobj_hash);
146 }
147
148 static uint32_t reencrypt_alignment(struct luks2_hdr *hdr)
149 {
150         json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash, *jobj_sector_size;
151         int ks = LUKS2_find_keyslot(hdr, "reencrypt");
152
153         if (ks < 0)
154                 return 0;
155
156         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
157
158         json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
159         if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
160                 return 0;
161         if (strcmp(json_object_get_string(jobj_type), "checksum"))
162                 return 0;
163         if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
164                 return 0;
165         if (!json_object_object_get_ex(jobj_area, "sector_size", &jobj_sector_size))
166                 return 0;
167
168         return crypt_jobj_get_uint32(jobj_sector_size);
169 }
170
171 static json_object *_enc_create_segments_shift_after(struct crypt_device *cd,
172         struct luks2_hdr *hdr,
173         struct luks2_reenc_context *rh,
174         uint64_t data_offset)
175 {
176         int reenc_seg, i = 0;
177         json_object *jobj_copy, *jobj_seg_new = NULL, *jobj_segs_post = json_object_new_object();
178         uint64_t tmp;
179
180         if (!rh->jobj_segs_hot || !jobj_segs_post)
181                 goto err;
182
183         if (json_segments_count(rh->jobj_segs_hot) == 0)
184                 return jobj_segs_post;
185
186         reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
187         if (reenc_seg < 0)
188                 goto err;
189
190         while (i < reenc_seg) {
191                 jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, i);
192                 if (!jobj_copy)
193                         goto err;
194                 json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
195         }
196
197         if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1), &jobj_seg_new)) {
198                 if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg), &jobj_seg_new))
199                         goto err;
200                 json_segment_remove_flag(jobj_seg_new, "in-reencryption");
201                 tmp = rh->length;
202         } else {
203                 json_object_object_add(jobj_seg_new, "offset", crypt_jobj_new_uint64(rh->offset + data_offset));
204                 json_object_object_add(jobj_seg_new, "iv_tweak", crypt_jobj_new_uint64(rh->offset >> SECTOR_SHIFT));
205                 tmp = json_segment_get_size(jobj_seg_new, 0) + rh->length;
206         }
207
208         /* alter size of new segment, reenc_seg == 0 we're finished */
209         json_object_object_add(jobj_seg_new, "size", reenc_seg > 0 ? crypt_jobj_new_uint64(tmp) : json_object_new_string("dynamic"));
210         json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_seg_new);
211
212         return jobj_segs_post;
213 err:
214         json_object_put(jobj_segs_post);
215         return NULL;
216 }
217
218 static json_object *reencrypt_make_hot_segments_encrypt_shift(struct crypt_device *cd,
219         struct luks2_hdr *hdr,
220         struct luks2_reenc_context *rh,
221         uint64_t data_offset)
222 {
223         int sg, crypt_seg, i = 0;
224         uint64_t segment_size;
225         json_object *jobj_seg_shrunk, *jobj_seg_new, *jobj_copy, *jobj_enc_seg = NULL,
226                      *jobj_segs_hot = json_object_new_object();
227
228         if (!jobj_segs_hot)
229                 return NULL;
230
231         crypt_seg = LUKS2_segment_by_type(hdr, "crypt");
232
233         /* FIXME: This is hack. Find proper way to fix it. */
234         sg = LUKS2_last_segment_by_type(hdr, "linear");
235         if (rh->offset && sg < 0)
236                 goto err;
237         if (sg < 0)
238                 return jobj_segs_hot;
239
240         jobj_enc_seg = json_segment_create_crypt(data_offset + rh->offset,
241                                                       rh->offset >> SECTOR_SHIFT,
242                                                       &rh->length,
243                                                       reencrypt_segment_cipher_new(hdr),
244                                                       reencrypt_get_sector_size_new(hdr),
245                                                       1);
246
247         while (i < sg) {
248                 jobj_copy = LUKS2_get_segment_jobj(hdr, i);
249                 if (!jobj_copy)
250                         goto err;
251                 json_object_object_add_by_uint(jobj_segs_hot, i++, json_object_get(jobj_copy));
252         }
253
254         segment_size = LUKS2_segment_size(hdr, sg, 0);
255         if (segment_size > rh->length) {
256                 jobj_seg_shrunk = NULL;
257                 if (json_object_copy(LUKS2_get_segment_jobj(hdr, sg), &jobj_seg_shrunk))
258                         goto err;
259                 json_object_object_add(jobj_seg_shrunk, "size", crypt_jobj_new_uint64(segment_size - rh->length));
260                 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_seg_shrunk);
261         }
262
263         json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_enc_seg);
264         jobj_enc_seg = NULL; /* see err: label */
265
266         /* first crypt segment after encryption ? */
267         if (crypt_seg >= 0) {
268                 jobj_seg_new = LUKS2_get_segment_jobj(hdr, crypt_seg);
269                 if (!jobj_seg_new)
270                         goto err;
271                 json_object_object_add_by_uint(jobj_segs_hot, sg, json_object_get(jobj_seg_new));
272         }
273
274         return jobj_segs_hot;
275 err:
276         json_object_put(jobj_enc_seg);
277         json_object_put(jobj_segs_hot);
278
279         return NULL;
280 }
281
282 static json_object *reencrypt_make_segment_new(struct crypt_device *cd,
283                 struct luks2_hdr *hdr,
284                 const struct luks2_reenc_context *rh,
285                 uint64_t data_offset,
286                 uint64_t segment_offset,
287                 uint64_t iv_offset,
288                 const uint64_t *segment_length)
289 {
290         switch (rh->mode) {
291         case CRYPT_REENCRYPT_REENCRYPT:
292         case CRYPT_REENCRYPT_ENCRYPT:
293                 return json_segment_create_crypt(data_offset + segment_offset,
294                                                   crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
295                                                   segment_length,
296                                                   reencrypt_segment_cipher_new(hdr),
297                                                   reencrypt_get_sector_size_new(hdr), 0);
298         case CRYPT_REENCRYPT_DECRYPT:
299                 return json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
300         }
301
302         return NULL;
303 }
304
305 static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd,
306         struct luks2_hdr *hdr,
307         struct luks2_reenc_context *rh,
308         uint64_t data_offset)
309 {
310         int reenc_seg;
311         json_object *jobj_new_seg_after, *jobj_old_seg, *jobj_old_seg_copy = NULL,
312                     *jobj_segs_post = json_object_new_object();
313         uint64_t fixed_length = rh->offset + rh->length;
314
315         if (!rh->jobj_segs_hot || !jobj_segs_post)
316                 goto err;
317
318         reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
319         if (reenc_seg < 0)
320                 return NULL;
321
322         jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
323
324         /*
325          * if there's no old segment after reencryption, we're done.
326          * Set size to 'dynamic' again.
327          */
328         jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &fixed_length : NULL);
329         if (!jobj_new_seg_after)
330                 goto err;
331         json_object_object_add_by_uint(jobj_segs_post, 0, jobj_new_seg_after);
332
333         if (jobj_old_seg) {
334                 if (rh->fixed_length) {
335                         if (json_object_copy(jobj_old_seg, &jobj_old_seg_copy))
336                                 goto err;
337                         jobj_old_seg = jobj_old_seg_copy;
338                         fixed_length = rh->device_size - fixed_length;
339                         json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(fixed_length));
340                 } else
341                         json_object_get(jobj_old_seg);
342                 json_object_object_add_by_uint(jobj_segs_post, 1, jobj_old_seg);
343         }
344
345         return jobj_segs_post;
346 err:
347         json_object_put(jobj_segs_post);
348         return NULL;
349 }
350
351 static json_object *reencrypt_make_post_segments_backward(struct crypt_device *cd,
352         struct luks2_hdr *hdr,
353         struct luks2_reenc_context *rh,
354         uint64_t data_offset)
355 {
356         int reenc_seg;
357         uint64_t fixed_length;
358
359         json_object *jobj_new_seg_after, *jobj_old_seg,
360                     *jobj_segs_post = json_object_new_object();
361
362         if (!rh->jobj_segs_hot || !jobj_segs_post)
363                 goto err;
364
365         reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
366         if (reenc_seg < 0)
367                 return NULL;
368
369         jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg - 1);
370         if (jobj_old_seg)
371                 json_object_object_add_by_uint(jobj_segs_post, reenc_seg - 1, json_object_get(jobj_old_seg));
372         if (rh->fixed_length && rh->offset) {
373                 fixed_length = rh->device_size - rh->offset;
374                 jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, &fixed_length);
375         } else
376                 jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL);
377         if (!jobj_new_seg_after)
378                 goto err;
379         json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_new_seg_after);
380
381         return jobj_segs_post;
382 err:
383         json_object_put(jobj_segs_post);
384         return NULL;
385 }
386
387 static json_object *reencrypt_make_segment_reencrypt(struct crypt_device *cd,
388                 struct luks2_hdr *hdr,
389                 const struct luks2_reenc_context *rh,
390                 uint64_t data_offset,
391                 uint64_t segment_offset,
392                 uint64_t iv_offset,
393                 const uint64_t *segment_length)
394 {
395         switch (rh->mode) {
396         case CRYPT_REENCRYPT_REENCRYPT:
397         case CRYPT_REENCRYPT_ENCRYPT:
398                 return json_segment_create_crypt(data_offset + segment_offset,
399                                 crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
400                                 segment_length,
401                                 reencrypt_segment_cipher_new(hdr),
402                                 reencrypt_get_sector_size_new(hdr), 1);
403         case CRYPT_REENCRYPT_DECRYPT:
404                 return json_segment_create_linear(data_offset + segment_offset, segment_length, 1);
405         }
406
407         return NULL;
408 }
409
410 static json_object *reencrypt_make_segment_old(struct crypt_device *cd,
411                 struct luks2_hdr *hdr,
412                 const struct luks2_reenc_context *rh,
413                 uint64_t data_offset,
414                 uint64_t segment_offset,
415                 const uint64_t *segment_length)
416 {
417         json_object *jobj_old_seg = NULL;
418
419         switch (rh->mode) {
420         case CRYPT_REENCRYPT_REENCRYPT:
421         case CRYPT_REENCRYPT_DECRYPT:
422                 jobj_old_seg = json_segment_create_crypt(data_offset + segment_offset,
423                                                     crypt_get_iv_offset(cd) + (segment_offset >> SECTOR_SHIFT),
424                                                     segment_length,
425                                                     reencrypt_segment_cipher_old(hdr),
426                                                     reencrypt_get_sector_size_old(hdr),
427                                                     0);
428                 break;
429         case CRYPT_REENCRYPT_ENCRYPT:
430                 jobj_old_seg = json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
431         }
432
433         return jobj_old_seg;
434 }
435
436 static json_object *reencrypt_make_hot_segments_forward(struct crypt_device *cd,
437                 struct luks2_hdr *hdr,
438                 struct luks2_reenc_context *rh,
439                 uint64_t device_size,
440                 uint64_t data_offset)
441 {
442         json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
443         uint64_t fixed_length, tmp = rh->offset + rh->length;
444         unsigned int sg = 0;
445
446         jobj_segs_hot = json_object_new_object();
447         if (!jobj_segs_hot)
448                 return NULL;
449
450         if (rh->offset) {
451                 jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, &rh->offset);
452                 if (!jobj_new_seg)
453                         goto err;
454                 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
455         }
456
457         jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
458         if (!jobj_reenc_seg)
459                 goto err;
460
461         json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
462
463         if (tmp < device_size) {
464                 fixed_length = device_size - tmp;
465                 jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh, data_offset + rh->data_shift, rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
466                 if (!jobj_old_seg)
467                         goto err;
468                 json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
469         }
470
471         return jobj_segs_hot;
472 err:
473         json_object_put(jobj_segs_hot);
474         return NULL;
475 }
476
477 static json_object *reencrypt_make_hot_segments_backward(struct crypt_device *cd,
478                 struct luks2_hdr *hdr,
479                 struct luks2_reenc_context *rh,
480                 uint64_t device_size,
481                 uint64_t data_offset)
482 {
483         json_object *jobj_reenc_seg, *jobj_new_seg, *jobj_old_seg = NULL,
484                     *jobj_segs_hot = json_object_new_object();
485         int sg = 0;
486         uint64_t fixed_length, tmp = rh->offset + rh->length;
487
488         if (!jobj_segs_hot)
489                 return NULL;
490
491         if (rh->offset) {
492                 if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_old_seg))
493                         goto err;
494                 json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(rh->offset));
495
496                 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_old_seg);
497         }
498
499         jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
500         if (!jobj_reenc_seg)
501                 goto err;
502
503         json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
504
505         if (tmp < device_size) {
506                 fixed_length = device_size - tmp;
507                 jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length, rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
508                 if (!jobj_new_seg)
509                         goto err;
510                 json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_new_seg);
511         }
512
513         return jobj_segs_hot;
514 err:
515         json_object_put(jobj_segs_hot);
516         return NULL;
517 }
518
519 static int reencrypt_make_hot_segments(struct crypt_device *cd,
520                 struct luks2_hdr *hdr,
521                 struct luks2_reenc_context *rh,
522                 uint64_t device_size,
523                 uint64_t data_offset)
524 {
525         rh->jobj_segs_hot = NULL;
526
527         if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
528             rh->data_shift && rh->jobj_segment_moved) {
529                 log_dbg(cd, "Calculating hot segments for encryption with data move.");
530                 rh->jobj_segs_hot = reencrypt_make_hot_segments_encrypt_shift(cd, hdr, rh, data_offset);
531         } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
532                 log_dbg(cd, "Calculating hot segments (forward direction).");
533                 rh->jobj_segs_hot = reencrypt_make_hot_segments_forward(cd, hdr, rh, device_size, data_offset);
534         } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
535                 log_dbg(cd, "Calculating hot segments (backward direction).");
536                 rh->jobj_segs_hot = reencrypt_make_hot_segments_backward(cd, hdr, rh, device_size, data_offset);
537         }
538
539         return rh->jobj_segs_hot ? 0 : -EINVAL;
540 }
541
542 static int reencrypt_make_post_segments(struct crypt_device *cd,
543                 struct luks2_hdr *hdr,
544                 struct luks2_reenc_context *rh,
545                 uint64_t data_offset)
546 {
547         rh->jobj_segs_post = NULL;
548
549         if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
550             rh->data_shift && rh->jobj_segment_moved) {
551                 log_dbg(cd, "Calculating post segments for encryption with data move.");
552                 rh->jobj_segs_post = _enc_create_segments_shift_after(cd, hdr, rh, data_offset);
553         } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
554                 log_dbg(cd, "Calculating post segments (forward direction).");
555                 rh->jobj_segs_post = reencrypt_make_post_segments_forward(cd, hdr, rh, data_offset);
556         } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
557                 log_dbg(cd, "Calculating segments (backward direction).");
558                 rh->jobj_segs_post = reencrypt_make_post_segments_backward(cd, hdr, rh, data_offset);
559         }
560
561         return rh->jobj_segs_post ? 0 : -EINVAL;
562 }
563
564 static uint64_t reencrypt_data_shift(struct luks2_hdr *hdr)
565 {
566         json_object *jobj_keyslot, *jobj_area, *jobj_data_shift;
567         int ks = LUKS2_find_keyslot(hdr, "reencrypt");
568
569         if (ks < 0)
570                 return 0;
571
572         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
573
574         json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
575         if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj_data_shift))
576                 return 0;
577
578         return crypt_jobj_get_uint64(jobj_data_shift);
579 }
580
581 static crypt_reencrypt_mode_info reencrypt_mode(struct luks2_hdr *hdr)
582 {
583         const char *mode;
584         crypt_reencrypt_mode_info mi = CRYPT_REENCRYPT_REENCRYPT;
585         json_object *jobj_keyslot, *jobj_mode;
586
587         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
588         if (!jobj_keyslot)
589                 return mi;
590
591         json_object_object_get_ex(jobj_keyslot, "mode", &jobj_mode);
592         mode = json_object_get_string(jobj_mode);
593
594         /* validation enforces allowed values */
595         if (!strcmp(mode, "encrypt"))
596                 mi = CRYPT_REENCRYPT_ENCRYPT;
597         else if (!strcmp(mode, "decrypt"))
598                 mi = CRYPT_REENCRYPT_DECRYPT;
599
600         return mi;
601 }
602
603 static crypt_reencrypt_direction_info reencrypt_direction(struct luks2_hdr *hdr)
604 {
605         const char *value;
606         json_object *jobj_keyslot, *jobj_mode;
607         crypt_reencrypt_direction_info di = CRYPT_REENCRYPT_FORWARD;
608
609         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
610         if (!jobj_keyslot)
611                 return di;
612
613         json_object_object_get_ex(jobj_keyslot, "direction", &jobj_mode);
614         value = json_object_get_string(jobj_mode);
615
616         /* validation enforces allowed values */
617         if (strcmp(value, "forward"))
618                 di = CRYPT_REENCRYPT_BACKWARD;
619
620         return di;
621 }
622
623 typedef enum { REENC_OK = 0, REENC_ERR, REENC_ROLLBACK, REENC_FATAL } reenc_status_t;
624
625 void LUKS2_reenc_context_free(struct crypt_device *cd, struct luks2_reenc_context *rh)
626 {
627         if (!rh)
628                 return;
629
630         if (rh->rp.type == REENC_PROTECTION_CHECKSUM) {
631                 if (rh->rp.p.csum.ch) {
632                         crypt_hash_destroy(rh->rp.p.csum.ch);
633                         rh->rp.p.csum.ch = NULL;
634                 }
635                 if (rh->rp.p.csum.checksums) {
636                         memset(rh->rp.p.csum.checksums, 0, rh->rp.p.csum.checksums_len);
637                         free(rh->rp.p.csum.checksums);
638                         rh->rp.p.csum.checksums = NULL;
639                 }
640         }
641
642         json_object_put(rh->jobj_segs_hot);
643         rh->jobj_segs_hot = NULL;
644         json_object_put(rh->jobj_segs_post);
645         rh->jobj_segs_post = NULL;
646         json_object_put(rh->jobj_segment_old);
647         rh->jobj_segment_old = NULL;
648         json_object_put(rh->jobj_segment_new);
649         rh->jobj_segment_new = NULL;
650         json_object_put(rh->jobj_segment_moved);
651         rh->jobj_segment_moved = NULL;
652
653         free(rh->reenc_buffer);
654         rh->reenc_buffer = NULL;
655         crypt_storage_wrapper_destroy(rh->cw1);
656         rh->cw1 = NULL;
657         crypt_storage_wrapper_destroy(rh->cw2);
658         rh->cw2 = NULL;
659
660         free(rh->device_name);
661         free(rh->overlay_name);
662         free(rh->hotzone_name);
663         crypt_drop_keyring_key(cd, rh->vks);
664         crypt_free_volume_key(rh->vks);
665         device_release_excl(cd, crypt_data_device(cd));
666         crypt_unlock_internal(cd, rh->reenc_lock);
667         free(rh);
668 }
669
670 static size_t reencrypt_get_alignment(struct crypt_device *cd,
671                 struct luks2_hdr *hdr)
672 {
673         int ss;
674         size_t alignment = device_block_size(cd, crypt_data_device(cd));
675
676         ss = reencrypt_get_sector_size_old(hdr);
677         if (ss > 0 && (size_t)ss > alignment)
678                 alignment = ss;
679         ss = reencrypt_get_sector_size_new(hdr);
680         if (ss > 0 && (size_t)ss > alignment)
681                 alignment = (size_t)ss;
682
683         return alignment;
684 }
685
686 /* returns void because it must not fail on valid LUKS2 header */
687 static void _load_backup_segments(struct luks2_hdr *hdr,
688                 struct luks2_reenc_context *rh)
689 {
690         int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
691
692         if (segment >= 0) {
693                 rh->jobj_segment_new = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
694                 rh->digest_new = LUKS2_digest_by_segment(hdr, segment);
695         } else {
696                 rh->jobj_segment_new = NULL;
697                 rh->digest_new = -ENOENT;
698         }
699
700         segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
701         if (segment >= 0) {
702                 rh->jobj_segment_old = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
703                 rh->digest_old = LUKS2_digest_by_segment(hdr, segment);
704         } else {
705                 rh->jobj_segment_old = NULL;
706                 rh->digest_old = -ENOENT;
707         }
708
709         segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
710         if (segment >= 0)
711                 rh->jobj_segment_moved = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
712         else
713                 rh->jobj_segment_moved = NULL;
714 }
715
716 static int reencrypt_offset_backward_moved(struct luks2_hdr *hdr, json_object *jobj_segments, uint64_t *reencrypt_length, uint64_t data_shift, uint64_t *offset)
717 {
718         uint64_t tmp, linear_length = 0;
719         int sg, segs = json_segments_count(jobj_segments);
720
721         /* find reencrypt offset with data shift */
722         for (sg = 0; sg < segs; sg++)
723                 if (LUKS2_segment_is_type(hdr, sg, "linear"))
724                         linear_length += LUKS2_segment_size(hdr, sg, 0);
725
726         /* all active linear segments length */
727         if (linear_length) {
728                 if (linear_length < data_shift)
729                         return -EINVAL;
730                 tmp = linear_length - data_shift;
731                 if (tmp && tmp < data_shift) {
732                         *offset = data_shift;
733                         *reencrypt_length = tmp;
734                 } else
735                         *offset = tmp;
736                 return 0;
737         }
738
739         if (segs == 1) {
740                 *offset = 0;
741                 return 0;
742         }
743
744         /* should be unreachable */
745
746         return -EINVAL;
747 }
748
749 static int _offset_forward(struct luks2_hdr *hdr, json_object *jobj_segments, uint64_t *offset)
750 {
751         int segs = json_segments_count(jobj_segments);
752
753         if (segs == 1)
754                 *offset = 0;
755         else if (segs == 2) {
756                 *offset = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
757                 if (!*offset)
758                         return -EINVAL;
759         } else
760                 return -EINVAL;
761
762         return 0;
763 }
764
765 static int _offset_backward(struct luks2_hdr *hdr, json_object *jobj_segments, uint64_t device_size, uint64_t *length, uint64_t *offset)
766 {
767         int segs = json_segments_count(jobj_segments);
768         uint64_t tmp;
769
770         if (segs == 1) {
771                 if (device_size < *length)
772                         *length = device_size;
773                 *offset = device_size - *length;
774         } else if (segs == 2) {
775                 tmp = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
776                 if (tmp < *length)
777                         *length = tmp;
778                 *offset =  tmp - *length;
779         } else
780                 return -EINVAL;
781
782         return 0;
783 }
784
785 /* must be always relative to data offset */
786 /* the LUKS2 header MUST be valid */
787 static int reencrypt_offset(struct luks2_hdr *hdr,
788                 crypt_reencrypt_direction_info di,
789                 uint64_t device_size,
790                 uint64_t *reencrypt_length,
791                 uint64_t *offset)
792 {
793         int sg;
794         json_object *jobj_segments;
795         uint64_t data_shift = reencrypt_data_shift(hdr);
796
797         if (!offset)
798                 return -EINVAL;
799
800         /* if there's segment in reencryption return directly offset of it */
801         json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments);
802         sg = json_segments_segment_in_reencrypt(jobj_segments);
803         if (sg >= 0) {
804                 *offset = LUKS2_segment_offset(hdr, sg, 0) - (reencrypt_get_data_offset_new(hdr));
805                 return 0;
806         }
807
808         if (di == CRYPT_REENCRYPT_FORWARD)
809                 return _offset_forward(hdr, jobj_segments, offset);
810         else if (di == CRYPT_REENCRYPT_BACKWARD) {
811                 if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_ENCRYPT &&
812                     LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
813                         return reencrypt_offset_backward_moved(hdr, jobj_segments, reencrypt_length, data_shift, offset);
814                 return _offset_backward(hdr, jobj_segments, device_size, reencrypt_length, offset);
815         }
816
817         return -EINVAL;
818 }
819
820 static uint64_t reencrypt_length(struct crypt_device *cd,
821                 struct luks2_hdr *hdr,
822                 struct luks2_reenc_context *rh,
823                 uint64_t keyslot_area_length,
824                 uint64_t length_max)
825 {
826         unsigned long dummy, optimal_alignment;
827         uint64_t length, soft_mem_limit;
828
829         if (rh->rp.type == REENC_PROTECTION_NONE)
830                 length = length_max ?: LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
831         else if (rh->rp.type == REENC_PROTECTION_CHECKSUM)
832                 length = (keyslot_area_length / rh->rp.p.csum.hash_size) * rh->alignment;
833         else if (rh->rp.type == REENC_PROTECTION_DATASHIFT)
834                 return reencrypt_data_shift(hdr);
835         else
836                 length = keyslot_area_length;
837
838         /* hard limit */
839         if (length > LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH)
840                 length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
841
842         /* soft limit is 1/4 of system memory */
843         soft_mem_limit = crypt_getphysmemory_kb() << 8; /* multiply by (1024/4) */
844
845         if (soft_mem_limit && length > soft_mem_limit)
846                 length = soft_mem_limit;
847
848         if (length_max && length > length_max)
849                 length = length_max;
850
851         length -= (length % rh->alignment);
852
853         /* Emits error later */
854         if (!length)
855                 return length;
856
857         device_topology_alignment(cd, crypt_data_device(cd), &optimal_alignment, &dummy, length);
858
859         /* we have to stick with encryption sector size alignment */
860         if (optimal_alignment % rh->alignment)
861                 return length;
862
863         /* align to opt-io size only if remaining size allows it */
864         if (length > optimal_alignment)
865                 length -= (length % optimal_alignment);
866
867         return length;
868 }
869
870 static int reencrypt_context_init(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reenc_context *rh, uint64_t device_size, const struct crypt_params_reencrypt *params)
871 {
872         int r;
873         uint64_t dummy, area_length;
874
875         rh->reenc_keyslot = LUKS2_find_keyslot(hdr, "reencrypt");
876         if (rh->reenc_keyslot < 0)
877                 return -EINVAL;
878         if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &dummy, &area_length) < 0)
879                 return -EINVAL;
880
881         rh->mode = reencrypt_mode(hdr);
882
883         rh->alignment = reencrypt_get_alignment(cd, hdr);
884         if (!rh->alignment)
885                 return -EINVAL;
886
887         log_dbg(cd, "Hotzone size: %" PRIu64 ", device size: %" PRIu64 ", alignment: %zu.",
888                 params->max_hotzone_size << SECTOR_SHIFT,
889                 params->device_size << SECTOR_SHIFT, rh->alignment);
890
891         if ((params->max_hotzone_size << SECTOR_SHIFT) % rh->alignment) {
892                 log_err(cd, _("Hotzone size must be multiple of calculated zone alignment (%zu bytes)."), rh->alignment);
893                 return -EINVAL;
894         }
895
896         if ((params->device_size << SECTOR_SHIFT) % rh->alignment) {
897                 log_err(cd, _("Device size must be multiple of calculated zone alignment (%zu bytes)."), rh->alignment);
898                 return -EINVAL;
899         }
900
901         rh->direction = reencrypt_direction(hdr);
902
903         if (!strcmp(params->resilience, "datashift")) {
904                 log_dbg(cd, "Initializing reencryption context with data_shift resilience.");
905                 rh->rp.type = REENC_PROTECTION_DATASHIFT;
906                 rh->data_shift = reencrypt_data_shift(hdr);
907         } else if (!strcmp(params->resilience, "journal")) {
908                 log_dbg(cd, "Initializing reencryption context with journal resilience.");
909                 rh->rp.type = REENC_PROTECTION_JOURNAL;
910         } else if (!strcmp(params->resilience, "checksum")) {
911                 log_dbg(cd, "Initializing reencryption context with checksum resilience.");
912                 rh->rp.type = REENC_PROTECTION_CHECKSUM;
913
914                 r = snprintf(rh->rp.p.csum.hash,
915                         sizeof(rh->rp.p.csum.hash), "%s", params->hash);
916                 if (r < 0 || (size_t)r >= sizeof(rh->rp.p.csum.hash)) {
917                         log_dbg(cd, "Invalid hash parameter");
918                         return -EINVAL;
919                 }
920
921                 if (crypt_hash_init(&rh->rp.p.csum.ch, params->hash)) {
922                         log_dbg(cd, "Failed to initialize checksum resilience hash %s", params->hash);
923                         return -EINVAL;
924                 }
925
926                 r = crypt_hash_size(params->hash);
927                 if (r < 1) {
928                         log_dbg(cd, "Invalid hash size");
929                         return -EINVAL;
930                 }
931                 rh->rp.p.csum.hash_size = r;
932
933                 rh->rp.p.csum.checksums_len = area_length;
934                 if (posix_memalign(&rh->rp.p.csum.checksums, device_alignment(crypt_metadata_device(cd)),
935                                    rh->rp.p.csum.checksums_len))
936                         return -ENOMEM;
937         } else if (!strcmp(params->resilience, "none")) {
938                 log_dbg(cd, "Initializing reencryption context with none resilience.");
939                 rh->rp.type = REENC_PROTECTION_NONE;
940         } else {
941                 log_err(cd, _("Unsupported resilience mode %s"), params->resilience);
942                 return -EINVAL;
943         }
944
945         if (params->device_size) {
946                 log_dbg(cd, "Switching reencryption to fixed size mode.");
947                 device_size = params->device_size << SECTOR_SHIFT;
948                 rh->fixed_length = true;
949         } else
950                 rh->fixed_length = false;
951
952         rh->length = reencrypt_length(cd, hdr, rh, area_length, params->max_hotzone_size << SECTOR_SHIFT);
953         if (!rh->length) {
954                 log_dbg(cd, "Invalid reencryption length.");
955                 return -EINVAL;
956         }
957
958         if (reencrypt_offset(hdr, rh->direction, device_size, &rh->length, &rh->offset)) {
959                 log_dbg(cd, "Failed to get reencryption offset.");
960                 return -EINVAL;
961         }
962
963         if (rh->offset > device_size)
964                 return -EINVAL;
965         if (rh->length > device_size - rh->offset)
966                 rh->length = device_size - rh->offset;
967
968         log_dbg(cd, "reencrypt-direction: %s", rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward");
969
970         _load_backup_segments(hdr, rh);
971
972         if (rh->direction == CRYPT_REENCRYPT_BACKWARD)
973                 rh->progress = device_size - rh->offset - rh->length;
974         else
975                 rh->progress = rh->offset;
976
977         log_dbg(cd, "backup-previous digest id: %d", rh->digest_old);
978         log_dbg(cd, "backup-final digest id: %d", rh->digest_new);
979         log_dbg(cd, "reencrypt length: %" PRIu64, rh->length);
980         log_dbg(cd, "reencrypt offset: %" PRIu64, rh->offset);
981         log_dbg(cd, "reencrypt shift: %s%" PRIu64, (rh->data_shift && rh->direction == CRYPT_REENCRYPT_BACKWARD ? "-" : ""), rh->data_shift);
982         log_dbg(cd, "reencrypt alignment: %zu", rh->alignment);
983         log_dbg(cd, "reencrypt progress: %" PRIu64, rh->progress);
984
985         rh->device_size = device_size;
986
987         return rh->length < 512 ? -EINVAL : 0;
988 }
989
990 static size_t reencrypt_buffer_length(struct luks2_reenc_context *rh)
991 {
992         if (rh->data_shift)
993                 return rh->data_shift;
994         return rh->length;
995 }
996
997 static int reencrypt_load_clean(struct crypt_device *cd,
998         struct luks2_hdr *hdr,
999         uint64_t device_size,
1000         struct luks2_reenc_context **rh,
1001         const struct crypt_params_reencrypt *params)
1002 {
1003         int r;
1004         const struct crypt_params_reencrypt hdr_reenc_params = {
1005                 .resilience = reencrypt_resilience_type(hdr),
1006                 .hash = reencrypt_resilience_hash(hdr),
1007                 .device_size = params ? params->device_size : 0
1008         };
1009         struct luks2_reenc_context *tmp = crypt_zalloc(sizeof (*tmp));
1010
1011         if (!tmp)
1012                 return -ENOMEM;
1013
1014         r = -EINVAL;
1015         if (!hdr_reenc_params.resilience)
1016                 goto err;
1017
1018         /* skip context update if data shift is detected in header */
1019         if (!strcmp(hdr_reenc_params.resilience, "datashift"))
1020                 params = NULL;
1021
1022         log_dbg(cd, "Initializing reencryption context (%s).", params ? "update" : "load");
1023
1024         if (!params || !params->resilience)
1025                 params = &hdr_reenc_params;
1026
1027         r = reencrypt_context_init(cd, hdr, tmp, device_size, params);
1028         if (r)
1029                 goto err;
1030
1031         if (posix_memalign(&tmp->reenc_buffer, device_alignment(crypt_data_device(cd)),
1032                            reencrypt_buffer_length(tmp))) {
1033                 r = -ENOMEM;
1034                 goto err;
1035         }
1036
1037         *rh = tmp;
1038
1039         return 0;
1040 err:
1041         LUKS2_reenc_context_free(cd, tmp);
1042
1043         return r;
1044 }
1045
1046 static int reencrypt_make_segments(struct crypt_device *cd,
1047         struct luks2_hdr *hdr,
1048         struct luks2_reenc_context *rh,
1049         uint64_t device_size)
1050 {
1051         int r;
1052         uint64_t data_offset = reencrypt_get_data_offset_new(hdr);
1053
1054         log_dbg(cd, "Calculating segments.");
1055
1056         r = reencrypt_make_hot_segments(cd, hdr, rh, device_size, data_offset);
1057         if (!r) {
1058                 r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
1059                 if (r)
1060                         json_object_put(rh->jobj_segs_hot);
1061         }
1062
1063         if (r)
1064                 log_dbg(cd, "Failed to make reencryption segments.");
1065
1066         return r;
1067 }
1068
1069 static int reencrypt_make_segments_crashed(struct crypt_device *cd,
1070                                 struct luks2_hdr *hdr,
1071                                 struct luks2_reenc_context *rh)
1072 {
1073         int r;
1074         uint64_t data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
1075
1076         if (!rh)
1077                 return -EINVAL;
1078
1079         rh->jobj_segs_hot = json_object_new_object();
1080         if (!rh->jobj_segs_hot)
1081                 return -ENOMEM;
1082
1083         json_object_object_foreach(LUKS2_get_segments_jobj(hdr), key, val) {
1084                 if (json_segment_is_backup(val))
1085                         continue;
1086                 json_object_object_add(rh->jobj_segs_hot, key, json_object_get(val));
1087         }
1088
1089         r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
1090         if (r) {
1091                 json_object_put(rh->jobj_segs_hot);
1092                 rh->jobj_segs_hot = NULL;
1093         }
1094
1095         return r;
1096 }
1097
1098 static int reencrypt_load_crashed(struct crypt_device *cd,
1099         struct luks2_hdr *hdr, uint64_t device_size, struct luks2_reenc_context **rh)
1100 {
1101         bool dynamic;
1102         uint64_t minimal_size;
1103         int r, reenc_seg;
1104         struct crypt_params_reencrypt params = {};
1105
1106         if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic))
1107                 return -EINVAL;
1108
1109         if (!dynamic)
1110                 params.device_size = minimal_size >> SECTOR_SHIFT;
1111
1112         r = reencrypt_load_clean(cd, hdr, device_size, rh, &params);
1113
1114         if (!r) {
1115                 reenc_seg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
1116                 if (reenc_seg < 0)
1117                         r = -EINVAL;
1118                 else
1119                         (*rh)->length = LUKS2_segment_size(hdr, reenc_seg, 0);
1120         }
1121
1122         if (!r && ((*rh)->rp.type == REENC_PROTECTION_CHECKSUM)) {
1123                 /* we have to override calculated alignment with value stored in mda */
1124                 (*rh)->alignment = reencrypt_alignment(hdr);
1125                 if (!(*rh)->alignment) {
1126                         log_dbg(cd, "Failed to get read resilience sector_size from metadata.");
1127                         r = -EINVAL;
1128                 }
1129         }
1130
1131         if (!r)
1132                 r = reencrypt_make_segments_crashed(cd, hdr, *rh);
1133
1134         if (r) {
1135                 LUKS2_reenc_context_free(cd, *rh);
1136                 *rh = NULL;
1137         }
1138         return r;
1139 }
1140
1141 static int reencrypt_init_storage_wrappers(struct crypt_device *cd,
1142                 struct luks2_hdr *hdr,
1143                 struct luks2_reenc_context *rh,
1144                 struct volume_key *vks)
1145 {
1146         int r;
1147         struct volume_key *vk;
1148         uint32_t wrapper_flags = (getuid() || geteuid()) ? 0 : DISABLE_KCAPI;
1149
1150         vk = crypt_volume_key_by_id(vks, rh->digest_old);
1151         r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
1152                         reencrypt_get_data_offset_old(hdr),
1153                         crypt_get_iv_offset(cd),
1154                         reencrypt_get_sector_size_old(hdr),
1155                         reencrypt_segment_cipher_old(hdr),
1156                         vk, wrapper_flags | OPEN_READONLY);
1157         if (r) {
1158                 log_err(cd, _("Failed to initialize old segment storage wrapper."));
1159                 return r;
1160         }
1161         rh->wflags1 = wrapper_flags | OPEN_READONLY;
1162         log_dbg(cd, "Old cipher storage wrapper type: %d.", crypt_storage_wrapper_get_type(rh->cw1));
1163
1164         vk = crypt_volume_key_by_id(vks, rh->digest_new);
1165         r = crypt_storage_wrapper_init(cd, &rh->cw2, crypt_data_device(cd),
1166                         reencrypt_get_data_offset_new(hdr),
1167                         crypt_get_iv_offset(cd),
1168                         reencrypt_get_sector_size_new(hdr),
1169                         reencrypt_segment_cipher_new(hdr),
1170                         vk, wrapper_flags);
1171         if (r) {
1172                 log_err(cd, _("Failed to initialize new segment storage wrapper."));
1173                 return r;
1174         }
1175         rh->wflags2 = wrapper_flags;
1176         log_dbg(cd, "New cipher storage wrapper type: %d", crypt_storage_wrapper_get_type(rh->cw2));
1177
1178         return 0;
1179 }
1180
1181 static int reencrypt_context_set_names(struct luks2_reenc_context *rh, const char *name)
1182 {
1183         if (!rh | !name)
1184                 return -EINVAL;
1185
1186         if (*name == '/') {
1187                 if (!(rh->device_name = dm_device_name(name)))
1188                         return -EINVAL;
1189         } else if (!(rh->device_name = strdup(name)))
1190                 return -ENOMEM;
1191
1192         if (asprintf(&rh->hotzone_name, "%s-hotzone-%s", rh->device_name,
1193                      rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward") < 0) {
1194                 rh->hotzone_name = NULL;
1195                 return -ENOMEM;
1196         }
1197         if (asprintf(&rh->overlay_name, "%s-overlay", rh->device_name) < 0) {
1198                 rh->overlay_name = NULL;
1199                 return -ENOMEM;
1200         }
1201
1202         rh->online = true;
1203         return 0;
1204 }
1205
1206 static int modify_offset(uint64_t *offset, uint64_t data_shift, crypt_reencrypt_direction_info di)
1207 {
1208         int r = -EINVAL;
1209
1210         if (!offset)
1211                 return r;
1212
1213         if (di == CRYPT_REENCRYPT_FORWARD) {
1214                 if (*offset >= data_shift) {
1215                         *offset -= data_shift;
1216                         r = 0;
1217                 }
1218         } else if (di == CRYPT_REENCRYPT_BACKWARD) {
1219                 *offset += data_shift;
1220                 r = 0;
1221         }
1222
1223         return r;
1224 }
1225
1226 static int reencrypt_update_flag(struct crypt_device *cd, int enable, bool commit)
1227 {
1228         uint32_t reqs;
1229         struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
1230
1231         if (LUKS2_config_get_requirements(cd, hdr, &reqs))
1232                 return -EINVAL;
1233
1234         /* nothing to do */
1235         if (enable && (reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
1236                 return -EINVAL;
1237
1238         /* nothing to do */
1239         if (!enable && !(reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
1240                 return -EINVAL;
1241
1242         if (enable)
1243                 reqs |= CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
1244         else
1245                 reqs &= ~CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
1246
1247         log_dbg(cd, "Going to %s reencryption requirement flag.", enable ? "store" : "wipe");
1248
1249         return LUKS2_config_set_requirements(cd, hdr, reqs, commit);
1250 }
1251
1252 static int reencrypt_recover_segment(struct crypt_device *cd,
1253         struct luks2_hdr *hdr,
1254         struct luks2_reenc_context *rh,
1255         struct volume_key *vks)
1256 {
1257         struct volume_key *vk_old, *vk_new;
1258         size_t count, s;
1259         ssize_t read, w;
1260         unsigned resilience;
1261         uint64_t area_offset, area_length, area_length_read, crash_iv_offset,
1262                  data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
1263         int devfd, r, new_sector_size, old_sector_size, rseg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
1264         char *checksum_tmp = NULL, *data_buffer = NULL;
1265         struct crypt_storage_wrapper *cw1 = NULL, *cw2 = NULL;
1266
1267         resilience = rh->rp.type;
1268
1269         if (rseg < 0 || rh->length < 512)
1270                 return -EINVAL;
1271
1272         vk_new = crypt_volume_key_by_id(vks, rh->digest_new);
1273         if (!vk_new && rh->mode != CRYPT_REENCRYPT_DECRYPT)
1274                 return -EINVAL;
1275         vk_old = crypt_volume_key_by_id(vks, rh->digest_old);
1276         if (!vk_old && rh->mode != CRYPT_REENCRYPT_ENCRYPT)
1277                 return -EINVAL;
1278         old_sector_size = json_segment_get_sector_size(reencrypt_segment_old(hdr));
1279         new_sector_size = json_segment_get_sector_size(reencrypt_segment_new(hdr));
1280         if (rh->mode == CRYPT_REENCRYPT_DECRYPT)
1281                 crash_iv_offset = rh->offset >> SECTOR_SHIFT; /* TODO: + old iv_tweak */
1282         else
1283                 crash_iv_offset = json_segment_get_iv_offset(json_segments_get_segment(rh->jobj_segs_hot, rseg));
1284
1285         log_dbg(cd, "crash_offset: %" PRIu64 ", crash_length: %" PRIu64 ",  crash_iv_offset: %" PRIu64, data_offset + rh->offset, rh->length, crash_iv_offset);
1286
1287         r = crypt_storage_wrapper_init(cd, &cw2, crypt_data_device(cd),
1288                         data_offset + rh->offset, crash_iv_offset, new_sector_size,
1289                         reencrypt_segment_cipher_new(hdr), vk_new, 0);
1290         if (r) {
1291                 log_err(cd, _("Failed to initialize new segment storage wrapper."));
1292                 return r;
1293         }
1294
1295         if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &area_offset, &area_length)) {
1296                 r = -EINVAL;
1297                 goto out;
1298         }
1299
1300         if (posix_memalign((void**)&data_buffer, device_alignment(crypt_data_device(cd)), rh->length)) {
1301                 r = -ENOMEM;
1302                 goto out;
1303         }
1304
1305         switch (resilience) {
1306         case  REENC_PROTECTION_CHECKSUM:
1307                 log_dbg(cd, "Checksums based recovery.");
1308
1309                 r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1310                                 data_offset + rh->offset, crash_iv_offset, old_sector_size,
1311                                 reencrypt_segment_cipher_old(hdr), vk_old, 0);
1312                 if (r) {
1313                         log_err(cd, _("Failed to initialize old segment storage wrapper."));
1314                         goto out;
1315                 }
1316
1317                 count = rh->length / rh->alignment;
1318                 area_length_read = count * rh->rp.p.csum.hash_size;
1319                 if (area_length_read > area_length) {
1320                         log_dbg(cd, "Internal error in calculated area_length.");
1321                         r = -EINVAL;
1322                         goto out;
1323                 }
1324
1325                 checksum_tmp = malloc(rh->rp.p.csum.hash_size);
1326                 if (!checksum_tmp) {
1327                         r = -ENOMEM;
1328                         goto out;
1329                 }
1330
1331                 /* TODO: lock for read */
1332                 devfd = device_open(cd, crypt_metadata_device(cd), O_RDONLY);
1333                 if (devfd < 0)
1334                         goto out;
1335
1336                 /* read old data checksums */
1337                 read = read_lseek_blockwise(devfd, device_block_size(cd, crypt_metadata_device(cd)),
1338                                         device_alignment(crypt_metadata_device(cd)), rh->rp.p.csum.checksums, area_length_read, area_offset);
1339                 if (read < 0 || (size_t)read != area_length_read) {
1340                         log_err(cd, _("Failed to read checksums for current hotzone."));
1341                         r = -EINVAL;
1342                         goto out;
1343                 }
1344
1345                 read = crypt_storage_wrapper_read(cw2, 0, data_buffer, rh->length);
1346                 if (read < 0 || (size_t)read != rh->length) {
1347                         log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset + data_offset);
1348                         r = -EINVAL;
1349                         goto out;
1350                 }
1351
1352                 for (s = 0; s < count; s++) {
1353                         if (crypt_hash_write(rh->rp.p.csum.ch, data_buffer + (s * rh->alignment), rh->alignment)) {
1354                                 log_dbg(cd, "Failed to write hash.");
1355                                 r = EINVAL;
1356                                 goto out;
1357                         }
1358                         if (crypt_hash_final(rh->rp.p.csum.ch, checksum_tmp, rh->rp.p.csum.hash_size)) {
1359                                 log_dbg(cd, "Failed to finalize hash.");
1360                                 r = EINVAL;
1361                                 goto out;
1362                         }
1363                         if (!memcmp(checksum_tmp, (char *)rh->rp.p.csum.checksums + (s * rh->rp.p.csum.hash_size), rh->rp.p.csum.hash_size)) {
1364                                 log_dbg(cd, "Sector %zu (size %zu, offset %zu) needs recovery", s, rh->alignment, s * rh->alignment);
1365                                 if (crypt_storage_wrapper_decrypt(cw1, s * rh->alignment, data_buffer + (s * rh->alignment), rh->alignment)) {
1366                                         log_err(cd, _("Failed to decrypt sector %zu."), s);
1367                                         r = -EINVAL;
1368                                         goto out;
1369                                 }
1370                                 w = crypt_storage_wrapper_encrypt_write(cw2, s * rh->alignment, data_buffer + (s * rh->alignment), rh->alignment);
1371                                 if (w < 0 || (size_t)w != rh->alignment) {
1372                                         log_err(cd, _("Failed to recover sector %zu."), s);
1373                                         r = -EINVAL;
1374                                         goto out;
1375                                 }
1376                         }
1377                 }
1378
1379                 r = 0;
1380                 break;
1381         case  REENC_PROTECTION_JOURNAL:
1382                 log_dbg(cd, "Journal based recovery.");
1383
1384                 /* FIXME: validation candidate */
1385                 if (rh->length > area_length) {
1386                         r = -EINVAL;
1387                         log_dbg(cd, "Invalid journal size.");
1388                         goto out;
1389                 }
1390
1391                 /* TODO locking */
1392                 r = crypt_storage_wrapper_init(cd, &cw1, crypt_metadata_device(cd),
1393                                 area_offset, crash_iv_offset, old_sector_size,
1394                                 reencrypt_segment_cipher_old(hdr), vk_old, 0);
1395                 if (r) {
1396                         log_err(cd, _("Failed to initialize old segment storage wrapper."));
1397                         goto out;
1398                 }
1399                 read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
1400                 if (read < 0 || (size_t)read != rh->length) {
1401                         log_dbg(cd, "Failed to read journaled data.");
1402                         r = -EIO;
1403                         /* may content plaintext */
1404                         crypt_safe_memzero(data_buffer, rh->length);
1405                         goto out;
1406                 }
1407                 read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
1408                 /* may content plaintext */
1409                 crypt_safe_memzero(data_buffer, rh->length);
1410                 if (read < 0 || (size_t)read != rh->length) {
1411                         log_dbg(cd, "recovery write failed.");
1412                         r = -EINVAL;
1413                         goto out;
1414                 }
1415
1416                 r = 0;
1417                 break;
1418         case  REENC_PROTECTION_DATASHIFT:
1419                 log_dbg(cd, "Data shift based recovery.");
1420
1421                 if (rseg == 0) {
1422                         r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1423                                         json_segment_get_offset(rh->jobj_segment_moved, 0), 0, 0,
1424                                         reencrypt_segment_cipher_old(hdr), NULL, 0);
1425                 } else
1426                         r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1427                                         data_offset + rh->offset - rh->data_shift, 0, 0,
1428                                         reencrypt_segment_cipher_old(hdr), NULL, 0);
1429                 if (r) {
1430                         log_err(cd, _("Failed to initialize old segment storage wrapper."));
1431                         goto out;
1432                 }
1433
1434                 read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
1435                 if (read < 0 || (size_t)read != rh->length) {
1436                         log_dbg(cd, "Failed to read data.");
1437                         r = -EIO;
1438                         /* may content plaintext */
1439                         crypt_safe_memzero(data_buffer, rh->length);
1440                         goto out;
1441                 }
1442
1443                 read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
1444                 /* may content plaintext */
1445                 crypt_safe_memzero(data_buffer, rh->length);
1446                 if (read < 0 || (size_t)read != rh->length) {
1447                         log_dbg(cd, "recovery write failed.");
1448                         r = -EINVAL;
1449                         goto out;
1450                 }
1451                 r = 0;
1452                 break;
1453         default:
1454                 r = -EINVAL;
1455         }
1456
1457         if (!r)
1458                 rh->read = rh->length;
1459 out:
1460         free(data_buffer);
1461         free(checksum_tmp);
1462         crypt_storage_wrapper_destroy(cw1);
1463         crypt_storage_wrapper_destroy(cw2);
1464
1465         return r;
1466 }
1467
1468 static int reencrypt_add_moved_segment(struct crypt_device *cd,
1469                 struct luks2_hdr *hdr,
1470                 struct luks2_reenc_context *rh)
1471 {
1472         int s = LUKS2_segment_first_unused_id(hdr);
1473
1474         if (!rh->jobj_segment_moved)
1475                 return 0;
1476
1477         if (s < 0)
1478                 return s;
1479
1480         if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(rh->jobj_segment_moved))) {
1481                 json_object_put(rh->jobj_segment_moved);
1482                 return -EINVAL;
1483         }
1484
1485         return 0;
1486 }
1487
1488 static int reencrypt_add_backup_segment(struct crypt_device *cd,
1489                 struct luks2_hdr *hdr,
1490                 struct luks2_reenc_context *rh,
1491                 unsigned final)
1492 {
1493         int digest, s = LUKS2_segment_first_unused_id(hdr);
1494         json_object *jobj;
1495
1496         if (s < 0)
1497                 return s;
1498
1499         digest = final ? rh->digest_new : rh->digest_old;
1500         jobj = final ? rh->jobj_segment_new : rh->jobj_segment_old;
1501
1502         if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(jobj))) {
1503                 json_object_put(jobj);
1504                 return -EINVAL;
1505         }
1506
1507         if (strcmp(json_segment_type(jobj), "crypt"))
1508                 return 0;
1509
1510         return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
1511 }
1512
1513 static int reencrypt_assign_segments_simple(struct crypt_device *cd,
1514         struct luks2_hdr *hdr,
1515         struct luks2_reenc_context *rh,
1516         unsigned hot,
1517         unsigned commit)
1518 {
1519         int r, sg;
1520
1521         if (hot && json_segments_count(rh->jobj_segs_hot) > 0) {
1522                 log_dbg(cd, "Setting 'hot' segments.");
1523
1524                 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
1525                 if (!r)
1526                         rh->jobj_segs_hot = NULL;
1527         } else if (!hot && json_segments_count(rh->jobj_segs_post) > 0) {
1528                 log_dbg(cd, "Setting 'post' segments.");
1529                 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
1530                 if (!r)
1531                         rh->jobj_segs_post = NULL;
1532         } else {
1533                 log_dbg(cd, "No segments to set.");
1534                 return -EINVAL;
1535         }
1536
1537         if (r) {
1538                 log_dbg(cd, "Failed to assign new enc segments.");
1539                 return r;
1540         }
1541
1542         r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
1543         if (r) {
1544                 log_dbg(cd, "Failed to assign reencryption previous backup segment.");
1545                 return r;
1546         }
1547
1548         r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
1549         if (r) {
1550                 log_dbg(cd, "Failed to assign reencryption final backup segment.");
1551                 return r;
1552         }
1553
1554         r = reencrypt_add_moved_segment(cd, hdr, rh);
1555         if (r) {
1556                 log_dbg(cd, "Failed to assign reencryption moved backup segment.");
1557                 return r;
1558         }
1559
1560         for (sg = 0; sg < LUKS2_segments_count(hdr); sg++) {
1561                 if (LUKS2_segment_is_type(hdr, sg, "crypt") &&
1562                     LUKS2_digest_segment_assign(cd, hdr, sg, rh->mode == CRYPT_REENCRYPT_ENCRYPT ? rh->digest_new : rh->digest_old, 1, 0)) {
1563                         log_dbg(cd, "Failed to assign digest %u to segment %u.", rh->digest_new, sg);
1564                         return -EINVAL;
1565                 }
1566         }
1567
1568         return commit ? LUKS2_hdr_write(cd, hdr) : 0;
1569 }
1570
1571 static int reencrypt_assign_segments(struct crypt_device *cd,
1572                 struct luks2_hdr *hdr,
1573                 struct luks2_reenc_context *rh,
1574                 unsigned hot,
1575                 unsigned commit)
1576 {
1577         bool forward;
1578         int rseg, scount, r = -EINVAL;
1579
1580         /* FIXME: validate in reencrypt context load */
1581         if (rh->digest_new < 0 && rh->mode != CRYPT_REENCRYPT_DECRYPT)
1582                 return -EINVAL;
1583
1584         if (LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0))
1585                 return -EINVAL;
1586
1587         if (rh->mode == CRYPT_REENCRYPT_ENCRYPT || rh->mode == CRYPT_REENCRYPT_DECRYPT)
1588                 return reencrypt_assign_segments_simple(cd, hdr, rh, hot, commit);
1589
1590         if (hot && rh->jobj_segs_hot) {
1591                 log_dbg(cd, "Setting 'hot' segments.");
1592
1593                 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
1594                 if (!r)
1595                         rh->jobj_segs_hot = NULL;
1596         } else if (!hot && rh->jobj_segs_post) {
1597                 log_dbg(cd, "Setting 'post' segments.");
1598                 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
1599                 if (!r)
1600                         rh->jobj_segs_post = NULL;
1601         }
1602
1603         if (r)
1604                 return r;
1605
1606         scount = LUKS2_segments_count(hdr);
1607
1608         /* segment in reencryption has to hold reference on both digests */
1609         rseg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
1610         if (rseg < 0 && hot)
1611                 return -EINVAL;
1612
1613         if (rseg >= 0) {
1614                 LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_new, 1, 0);
1615                 LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_old, 1, 0);
1616         }
1617
1618         forward = (rh->direction == CRYPT_REENCRYPT_FORWARD);
1619         if (hot) {
1620                 if (rseg > 0)
1621                         LUKS2_digest_segment_assign(cd, hdr, 0, forward ? rh->digest_new : rh->digest_old, 1, 0);
1622                 if (scount > rseg + 1)
1623                         LUKS2_digest_segment_assign(cd, hdr, rseg + 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
1624         } else {
1625                 LUKS2_digest_segment_assign(cd, hdr, 0, forward || scount == 1 ? rh->digest_new : rh->digest_old, 1, 0);
1626                 if (scount > 1)
1627                         LUKS2_digest_segment_assign(cd, hdr, 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
1628         }
1629
1630         r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
1631         if (r) {
1632                 log_dbg(cd, "Failed to assign hot reencryption backup segment.");
1633                 return r;
1634         }
1635         r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
1636         if (r) {
1637                 log_dbg(cd, "Failed to assign post reencryption backup segment.");
1638                 return r;
1639         }
1640
1641         return commit ? LUKS2_hdr_write(cd, hdr) : 0;
1642 }
1643
1644 static int reencrypt_set_encrypt_segments(struct crypt_device *cd, struct luks2_hdr *hdr, uint64_t dev_size, uint64_t data_shift, bool move_first_segment, crypt_reencrypt_direction_info di)
1645 {
1646         int r;
1647         uint64_t first_segment_offset, first_segment_length,
1648                  second_segment_offset, second_segment_length,
1649                  data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
1650         json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
1651
1652         if (dev_size < data_shift)
1653                 return -EINVAL;
1654
1655         if (data_shift && (di == CRYPT_REENCRYPT_FORWARD))
1656                 return -ENOTSUP;
1657
1658         if (move_first_segment) {
1659                 /*
1660                  * future data_device layout:
1661                  * [future LUKS2 header (data shift size)][second data segment][gap (data shift size)][first data segment (data shift size)]
1662                  */
1663                 first_segment_offset = dev_size;
1664                 first_segment_length = data_shift;
1665                 second_segment_offset = data_shift;
1666                 second_segment_length = dev_size - 2 * data_shift;
1667         } else if (data_shift) {
1668                 first_segment_offset = data_offset;
1669                 first_segment_length = dev_size;
1670         } else {
1671                 /* future data_device layout with detached header: [first data segment] */
1672                 first_segment_offset = data_offset;
1673                 first_segment_length = 0; /* dynamic */
1674         }
1675
1676         jobj_segments = json_object_new_object();
1677         if (!jobj_segments)
1678                 return -ENOMEM;
1679
1680         r = -EINVAL;
1681         if (move_first_segment) {
1682                 jobj_segment_first =  json_segment_create_linear(first_segment_offset, &first_segment_length, 0);
1683                 if (second_segment_length &&
1684                     !(jobj_segment_second = json_segment_create_linear(second_segment_offset, &second_segment_length, 0))) {
1685                         log_dbg(cd, "Failed generate 2nd segment.");
1686                         goto err;
1687                 }
1688         } else
1689                 jobj_segment_first =  json_segment_create_linear(first_segment_offset, first_segment_length ? &first_segment_length : NULL, 0);
1690
1691         if (!jobj_segment_first) {
1692                 log_dbg(cd, "Failed generate 1st segment.");
1693                 goto err;
1694         }
1695
1696         json_object_object_add(jobj_segments, "0", jobj_segment_first);
1697         if (jobj_segment_second)
1698                 json_object_object_add(jobj_segments, "1", jobj_segment_second);
1699
1700         r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0);
1701
1702         if (!r)
1703                 r = LUKS2_segments_set(cd, hdr, jobj_segments, 0);
1704 err:
1705         return r;
1706 }
1707
1708 static int reencrypt_make_targets(struct crypt_device *cd,
1709                                 struct luks2_hdr *hdr,
1710                                 struct device *hz_device,
1711                                 struct volume_key *vks,
1712                                 struct dm_target *result,
1713                                 uint64_t size)
1714 {
1715         bool reenc_seg;
1716         struct volume_key *vk;
1717         uint64_t segment_size, segment_offset, segment_start = 0;
1718         int r;
1719         int s = 0;
1720         json_object *jobj, *jobj_segments = LUKS2_get_segments_jobj(hdr);
1721
1722         while (result) {
1723                 jobj = json_segments_get_segment(jobj_segments, s);
1724                 if (!jobj) {
1725                         log_dbg(cd, "Internal error. Segment %u is null.", s);
1726                         r = -EINVAL;
1727                         goto out;
1728                 }
1729
1730                 reenc_seg = (s == json_segments_segment_in_reencrypt(jobj_segments));
1731
1732                 segment_offset = json_segment_get_offset(jobj, 1);
1733                 segment_size = json_segment_get_size(jobj, 1);
1734                 /* 'dynamic' length allowed in last segment only */
1735                 if (!segment_size && !result->next)
1736                         segment_size = (size >> SECTOR_SHIFT) - segment_start;
1737                 if (!segment_size) {
1738                         log_dbg(cd, "Internal error. Wrong segment size %u", s);
1739                         r = -EINVAL;
1740                         goto out;
1741                 }
1742
1743                 if (!strcmp(json_segment_type(jobj), "crypt")) {
1744                         vk = crypt_volume_key_by_id(vks, reenc_seg ? LUKS2_reencrypt_digest_new(hdr) : LUKS2_digest_by_segment(hdr, s));
1745                         if (!vk) {
1746                                 log_err(cd, _("Missing key for dm-crypt segment %u"), s);
1747                                 r = -EINVAL;
1748                                 goto out;
1749                         }
1750
1751                         if (reenc_seg)
1752                                 segment_offset -= crypt_get_data_offset(cd);
1753
1754                         r = dm_crypt_target_set(result, segment_start, segment_size,
1755                                                 reenc_seg ? hz_device : crypt_data_device(cd),
1756                                                 vk,
1757                                                 json_segment_get_cipher(jobj),
1758                                                 json_segment_get_iv_offset(jobj),
1759                                                 segment_offset,
1760                                                 "none",
1761                                                 0,
1762                                                 json_segment_get_sector_size(jobj));
1763                         if (r) {
1764                                 log_err(cd, _("Failed to set dm-crypt segment."));
1765                                 goto out;
1766                         }
1767                 } else if (!strcmp(json_segment_type(jobj), "linear")) {
1768                         r = dm_linear_target_set(result, segment_start, segment_size, reenc_seg ? hz_device : crypt_data_device(cd), segment_offset);
1769                         if (r) {
1770                                 log_err(cd, _("Failed to set dm-linear segment."));
1771                                 goto out;
1772                         }
1773                 } else {
1774                         r = -EINVAL;
1775                         goto out;
1776                 }
1777
1778                 segment_start += segment_size;
1779                 s++;
1780                 result = result->next;
1781         }
1782
1783         return s;
1784 out:
1785         return r;
1786 }
1787
1788 /* GLOBAL FIXME: audit function names and parameters names */
1789
1790 /* FIXME:
1791  *      1) audit log routines
1792  *      2) can't we derive hotzone device name from crypt context? (unlocked name, device uuid, etc?)
1793  */
1794 static int reencrypt_load_overlay_device(struct crypt_device *cd, struct luks2_hdr *hdr,
1795         const char *overlay, const char *hotzone, struct volume_key *vks, uint64_t size,
1796         uint32_t flags)
1797 {
1798         char hz_path[PATH_MAX];
1799         int r;
1800
1801         struct device *hz_dev = NULL;
1802         struct crypt_dm_active_device dmd = {
1803                 .flags = flags,
1804         };
1805
1806         log_dbg(cd, "Loading new table for overlay device %s.", overlay);
1807
1808         r = snprintf(hz_path, PATH_MAX, "%s/%s", dm_get_dir(), hotzone);
1809         if (r < 0 || r >= PATH_MAX) {
1810                 r = -EINVAL;
1811                 goto out;
1812         }
1813
1814         r = device_alloc(cd, &hz_dev, hz_path);
1815         if (r)
1816                 goto out;
1817
1818         r = dm_targets_allocate(&dmd.segment, LUKS2_segments_count(hdr));
1819         if (r)
1820                 goto out;
1821
1822         r = reencrypt_make_targets(cd, hdr, hz_dev, vks, &dmd.segment, size);
1823         if (r < 0)
1824                 goto out;
1825
1826         r = dm_reload_device(cd, overlay, &dmd, 0, 0);
1827
1828         /* what else on error here ? */
1829 out:
1830         dm_targets_free(cd, &dmd);
1831         device_free(cd, hz_dev);
1832
1833         return r;
1834 }
1835
1836 static int reencrypt_replace_device(struct crypt_device *cd, const char *target, const char *source, uint32_t flags)
1837 {
1838         int r, exists = 1;
1839         struct crypt_dm_active_device dmd_source, dmd_target = {};
1840         uint32_t dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH;
1841
1842         log_dbg(cd, "Replacing table in device %s with table from device %s.", target, source);
1843
1844         /* check only whether target device exists */
1845         r = dm_status_device(cd, target);
1846         if (r < 0) {
1847                 if (r == -ENODEV)
1848                         exists = 0;
1849                 else
1850                         return r;
1851         }
1852
1853         r = dm_query_device(cd, source, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
1854                             DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY, &dmd_source);
1855
1856         if (r < 0)
1857                 return r;
1858
1859         if (exists && ((r = dm_query_device(cd, target, 0, &dmd_target)) < 0))
1860                 goto err;
1861
1862         dmd_source.flags |= flags;
1863         dmd_source.uuid = crypt_get_uuid(cd);
1864
1865         if (exists) {
1866                 if (dmd_target.size != dmd_source.size) {
1867                         log_err(cd, _("Source and target device sizes don't match. Source %" PRIu64 ", target: %" PRIu64 "."),
1868                                 dmd_source.size, dmd_target.size);
1869                         r = -EINVAL;
1870                         goto err;
1871                 }
1872                 r = dm_reload_device(cd, target, &dmd_source, 0, 0);
1873                 if (!r) {
1874                         log_dbg(cd, "Resuming device %s", target);
1875                         r = dm_resume_device(cd, target, dmflags | act2dmflags(dmd_source.flags));
1876                 }
1877         } else
1878                 r = dm_create_device(cd, target, CRYPT_SUBDEV, &dmd_source);
1879 err:
1880         dm_targets_free(cd, &dmd_source);
1881         dm_targets_free(cd, &dmd_target);
1882
1883         return r;
1884 }
1885
1886 static int reencrypt_swap_backing_device(struct crypt_device *cd, const char *name,
1887                               const char *new_backend_name)
1888 {
1889         int r;
1890         struct device *overlay_dev = NULL;
1891         char overlay_path[PATH_MAX] = { 0 };
1892         struct crypt_dm_active_device dmd = {};
1893
1894         log_dbg(cd, "Redirecting %s mapping to new backing device: %s.", name, new_backend_name);
1895
1896         r = snprintf(overlay_path, PATH_MAX, "%s/%s", dm_get_dir(), new_backend_name);
1897         if (r < 0 || r >= PATH_MAX) {
1898                 r = -EINVAL;
1899                 goto out;
1900         }
1901
1902         r = device_alloc(cd, &overlay_dev, overlay_path);
1903         if (r)
1904                 goto out;
1905
1906         r = device_block_adjust(cd, overlay_dev, DEV_OK,
1907                                 0, &dmd.size, &dmd.flags);
1908         if (r)
1909                 goto out;
1910
1911         r = dm_linear_target_set(&dmd.segment, 0, dmd.size, overlay_dev, 0);
1912         if (r)
1913                 goto out;
1914
1915         r = dm_reload_device(cd, name, &dmd, 0, 0);
1916         if (!r) {
1917                 log_dbg(cd, "Resuming device %s", name);
1918                 r = dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
1919         }
1920
1921 out:
1922         dm_targets_free(cd, &dmd);
1923         device_free(cd, overlay_dev);
1924
1925         return r;
1926 }
1927
1928 static int reencrypt_activate_hotzone_device(struct crypt_device *cd, const char *name, uint64_t device_size, uint32_t flags)
1929 {
1930         int r;
1931         uint64_t new_offset = reencrypt_get_data_offset_new(crypt_get_hdr(cd, CRYPT_LUKS2)) >> SECTOR_SHIFT;
1932
1933         struct crypt_dm_active_device dmd = {
1934                 .flags = flags,
1935                 .uuid = crypt_get_uuid(cd),
1936                 .size = device_size >> SECTOR_SHIFT
1937         };
1938
1939         log_dbg(cd, "Activating hotzone device %s.", name);
1940
1941         r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
1942                                 new_offset, &dmd.size, &dmd.flags);
1943         if (r)
1944                 goto err;
1945
1946         r = dm_linear_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), new_offset);
1947         if (r)
1948                 goto err;
1949
1950         r = dm_create_device(cd, name, CRYPT_SUBDEV, &dmd);
1951 err:
1952         dm_targets_free(cd, &dmd);
1953
1954         return r;
1955 }
1956
1957 static int reencrypt_init_device_stack(struct crypt_device *cd,
1958                                      const struct luks2_reenc_context *rh)
1959 {
1960         int r;
1961
1962         /* Activate hotzone device 1:1 linear mapping to data_device */
1963         r = reencrypt_activate_hotzone_device(cd, rh->hotzone_name, rh->device_size, CRYPT_ACTIVATE_PRIVATE);
1964         if (r) {
1965                 log_err(cd, _("Failed to activate hotzone device %s."), rh->hotzone_name);
1966                 return r;
1967         }
1968
1969         /*
1970          * Activate overlay device with exactly same table as original 'name' mapping.
1971          * Note that within this step the 'name' device may already include a table
1972          * constructed from more than single dm-crypt segment. Therefore transfer
1973          * mapping as is.
1974          *
1975          * If we're about to resume reencryption orig mapping has to be already validated for
1976          * abrupt shutdown and rchunk_offset has to point on next chunk to reencrypt!
1977          *
1978          * TODO: in crypt_activate_by*
1979          */
1980         r = reencrypt_replace_device(cd, rh->overlay_name, rh->device_name, CRYPT_ACTIVATE_PRIVATE);
1981         if (r) {
1982                 log_err(cd, _("Failed to activate overlay device %s with actual origin table."), rh->overlay_name);
1983                 goto err;
1984         }
1985
1986         /* swap origin mapping to overlay device */
1987         r = reencrypt_swap_backing_device(cd, rh->device_name, rh->overlay_name);
1988         if (r) {
1989                 log_err(cd, _("Failed to load new mapping for device %s."), rh->device_name);
1990                 goto err;
1991         }
1992
1993         /*
1994          * Now the 'name' (unlocked luks) device is mapped via dm-linear to an overlay dev.
1995          * The overlay device has a original live table of 'name' device in-before the swap.
1996          */
1997
1998         return 0;
1999 err:
2000         /* TODO: force error helper devices on error path */
2001         dm_remove_device(cd, rh->overlay_name, 0);
2002         dm_remove_device(cd, rh->hotzone_name, 0);
2003
2004         return r;
2005 }
2006
2007 /* TODO:
2008  *      1) audit error path. any error in this routine is fatal and should be unlikely.
2009  *         usually it would hint some collision with another userspace process touching
2010  *         dm devices directly.
2011  */
2012 static int reenc_refresh_helper_devices(struct crypt_device *cd, const char *overlay, const char *hotzone)
2013 {
2014         int r;
2015
2016         /*
2017          * we have to explicitly suspend the overlay device before suspending
2018          * the hotzone one. Resuming overlay device (aka switching tables) only
2019          * after suspending the hotzone may lead to deadlock.
2020          *
2021          * In other words: always suspend the stack from top to bottom!
2022          */
2023         r = dm_suspend_device(cd, overlay, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2024         if (r) {
2025                 log_err(cd, _("Failed to suspend device %s."), overlay);
2026                 return r;
2027         }
2028
2029         /* suspend HZ device */
2030         r = dm_suspend_device(cd, hotzone, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2031         if (r) {
2032                 log_err(cd, _("Failed to suspend device %s."), hotzone);
2033                 return r;
2034         }
2035
2036         /* resume overlay device: inactive table (with hotozne) -> live */
2037         r = dm_resume_device(cd, overlay, DM_RESUME_PRIVATE);
2038         if (r)
2039                 log_err(cd, _("Failed to resume device %s."), overlay);
2040
2041         return r;
2042 }
2043
2044 static int reencrypt_refresh_overlay_devices(struct crypt_device *cd,
2045                 struct luks2_hdr *hdr,
2046                 const char *overlay,
2047                 const char *hotzone,
2048                 struct volume_key *vks,
2049                 uint64_t device_size,
2050                 uint32_t flags)
2051 {
2052         int r = reencrypt_load_overlay_device(cd, hdr, overlay, hotzone, vks, device_size, flags);
2053         if (r) {
2054                 log_err(cd, _("Failed to reload device %s."), overlay);
2055                 return REENC_ERR;
2056         }
2057
2058         r = reenc_refresh_helper_devices(cd, overlay, hotzone);
2059         if (r) {
2060                 log_err(cd, _("Failed to refresh reencryption devices stack."));
2061                 return REENC_ROLLBACK;
2062         }
2063
2064         return REENC_OK;
2065 }
2066
2067 static int reencrypt_move_data(struct crypt_device *cd, int devfd, uint64_t data_shift)
2068 {
2069         void *buffer;
2070         int r;
2071         ssize_t ret;
2072         uint64_t buffer_len, offset;
2073         struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
2074
2075         log_dbg(cd, "Going to move data from head of data device.");
2076
2077         buffer_len = data_shift;
2078         if (!buffer_len)
2079                 return -EINVAL;
2080
2081         offset = json_segment_get_offset(LUKS2_get_segment_jobj(hdr, 0), 0);
2082
2083         /* this is nonsense anyway */
2084         if (buffer_len != json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0)) {
2085                 log_dbg(cd, "buffer_len %" PRIu64", segment size %" PRIu64, buffer_len, json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0));
2086                 return -EINVAL;
2087         }
2088
2089         if (posix_memalign(&buffer, device_alignment(crypt_data_device(cd)), buffer_len))
2090                 return -ENOMEM;
2091
2092         ret = read_lseek_blockwise(devfd,
2093                         device_block_size(cd, crypt_data_device(cd)),
2094                         device_alignment(crypt_data_device(cd)),
2095                         buffer, buffer_len, 0);
2096         if (ret < 0 || (uint64_t)ret != buffer_len) {
2097                 r = -EIO;
2098                 goto err;
2099         }
2100
2101         log_dbg(cd, "Going to write %" PRIu64 " bytes at offset %" PRIu64, buffer_len, offset);
2102         ret = write_lseek_blockwise(devfd,
2103                         device_block_size(cd, crypt_data_device(cd)),
2104                         device_alignment(crypt_data_device(cd)),
2105                         buffer, buffer_len, offset);
2106         if (ret < 0 || (uint64_t)ret != buffer_len) {
2107                 r = -EIO;
2108                 goto err;
2109         }
2110
2111         r = 0;
2112 err:
2113         memset(buffer, 0, buffer_len);
2114         free(buffer);
2115         return r;
2116 }
2117
2118 static int reencrypt_make_backup_segments(struct crypt_device *cd,
2119                 struct luks2_hdr *hdr,
2120                 int keyslot_new,
2121                 const char *cipher,
2122                 uint64_t data_offset,
2123                 const struct crypt_params_reencrypt *params)
2124 {
2125         int r, segment, moved_segment = -1, digest_old = -1, digest_new = -1;
2126         json_object *jobj_segment_new = NULL, *jobj_segment_old = NULL, *jobj_segment_bcp = NULL;
2127         uint32_t sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
2128         uint64_t segment_offset, tmp, data_shift = params->data_shift << SECTOR_SHIFT;
2129
2130         if (params->mode != CRYPT_REENCRYPT_DECRYPT) {
2131                 digest_new = LUKS2_digest_by_keyslot(hdr, keyslot_new);
2132                 if (digest_new < 0)
2133                         return -EINVAL;
2134         }
2135
2136         if (params->mode != CRYPT_REENCRYPT_ENCRYPT) {
2137                 digest_old = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT);
2138                 if (digest_old < 0)
2139                         return -EINVAL;
2140         }
2141
2142         segment = LUKS2_segment_first_unused_id(hdr);
2143         if (segment < 0)
2144                 return -EINVAL;
2145
2146         if (params->mode == CRYPT_REENCRYPT_ENCRYPT &&
2147             (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT)) {
2148                 json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_segment_bcp);
2149                 r = LUKS2_segment_set_flag(jobj_segment_bcp, "backup-moved-segment");
2150                 if (r)
2151                         goto err;
2152                 moved_segment = segment++;
2153                 json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), moved_segment, jobj_segment_bcp);
2154         }
2155
2156         /* FIXME: Add detection for case (digest old == digest new && old segment == new segment) */
2157         if (digest_old >= 0)
2158                 json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old);
2159         else if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
2160                 r = LUKS2_get_data_size(hdr, &tmp, NULL);
2161                 if (r)
2162                         goto err;
2163                 jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0);
2164         }
2165
2166         if (!jobj_segment_old) {
2167                 r = -EINVAL;
2168                 goto err;
2169         }
2170
2171         r = LUKS2_segment_set_flag(jobj_segment_old, "backup-previous");
2172         if (r)
2173                 goto err;
2174         json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_old);
2175         jobj_segment_old = NULL;
2176         if (digest_old >= 0)
2177                 LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0);
2178         segment++;
2179
2180         if (digest_new >= 0) {
2181                 segment_offset = data_offset;
2182                 if (params->mode != CRYPT_REENCRYPT_ENCRYPT &&
2183                     modify_offset(&segment_offset, data_shift, params->direction)) {
2184                         r = -EINVAL;
2185                         goto err;
2186                 }
2187                 jobj_segment_new = json_segment_create_crypt(segment_offset,
2188                                                         crypt_get_iv_offset(cd),
2189                                                         NULL, cipher, sector_size, 0);
2190         } else if (params->mode == CRYPT_REENCRYPT_DECRYPT) {
2191                 segment_offset = data_offset;
2192                 if (modify_offset(&segment_offset, data_shift, params->direction)) {
2193                         r = -EINVAL;
2194                         goto err;
2195                 }
2196                 jobj_segment_new = json_segment_create_linear(segment_offset, NULL, 0);
2197         }
2198
2199         if (!jobj_segment_new) {
2200                 r = -EINVAL;
2201                 goto err;
2202         }
2203
2204         r = LUKS2_segment_set_flag(jobj_segment_new, "backup-final");
2205         if (r)
2206                 goto err;
2207         json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_new);
2208         jobj_segment_new = NULL;
2209         if (digest_new >= 0)
2210                 LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0);
2211
2212         /* FIXME: also check occupied space by keyslot in shrunk area */
2213         if (params->direction == CRYPT_REENCRYPT_FORWARD && data_shift &&
2214             crypt_metadata_device(cd) == crypt_data_device(cd) &&
2215             LUKS2_set_keyslots_size(cd, hdr, json_segment_get_offset(reencrypt_segment_new(hdr), 0))) {
2216                 log_err(cd, _("Failed to set new keyslots area size."));
2217                 r = -EINVAL;
2218                 goto err;
2219         }
2220
2221         return 0;
2222 err:
2223         json_object_put(jobj_segment_new);
2224         json_object_put(jobj_segment_old);
2225         return r;
2226 }
2227
2228 static int reencrypt_verify_and_upload_keys(struct crypt_device *cd, struct luks2_hdr *hdr, int digest_old, int digest_new, struct volume_key *vks)
2229 {
2230         int r;
2231         struct volume_key *vk;
2232
2233         if (digest_new >= 0) {
2234                 vk = crypt_volume_key_by_id(vks, digest_new);
2235                 if (!vk)
2236                         return -ENOENT;
2237                 else {
2238                         if (LUKS2_digest_verify_by_digest(cd, hdr, digest_new, vk) != digest_new)
2239                                 return -EINVAL;
2240
2241                         if (crypt_use_keyring_for_vk(cd) &&
2242                             (r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk))))
2243                                 return r;
2244                 }
2245         }
2246
2247         if (digest_old >= 0 && digest_old != digest_new) {
2248                 vk = crypt_volume_key_by_id(vks, digest_old);
2249                 if (!vk) {
2250                         r = -ENOENT;
2251                         goto err;
2252                 } else {
2253                         if (LUKS2_digest_verify_by_digest(cd, hdr, digest_old, vk) != digest_old) {
2254                                 r = -EINVAL;
2255                                 goto err;
2256                         }
2257                         if (crypt_use_keyring_for_vk(cd) &&
2258                             (r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk))))
2259                                 goto err;
2260                 }
2261         }
2262
2263         return 0;
2264 err:
2265         crypt_drop_keyring_key(cd, vks);
2266         return r;
2267 }
2268
2269 /* This function must be called with metadata lock held */
2270 static int reencrypt_init(struct crypt_device *cd,
2271                 const char *name,
2272                 struct luks2_hdr *hdr,
2273                 const char *passphrase,
2274                 size_t passphrase_size,
2275                 int keyslot_old,
2276                 int keyslot_new,
2277                 const char *cipher,
2278                 const char *cipher_mode,
2279                 const struct crypt_params_reencrypt *params,
2280                 struct volume_key **vks)
2281 {
2282         bool move_first_segment;
2283         char _cipher[128];
2284         uint32_t sector_size;
2285         int r, reencrypt_keyslot, devfd = -1;
2286         uint64_t data_offset, dev_size = 0;
2287         struct crypt_dm_active_device dmd_target, dmd_source = {
2288                 .uuid = crypt_get_uuid(cd),
2289                 .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
2290         };
2291
2292         if (!params || params->mode > CRYPT_REENCRYPT_DECRYPT)
2293                 return -EINVAL;
2294
2295         if (params->mode != CRYPT_REENCRYPT_DECRYPT &&
2296             (!params->luks2 || !(cipher && cipher_mode) || keyslot_new < 0))
2297                 return -EINVAL;
2298
2299         log_dbg(cd, "Initializing reencryption (mode: %s) in LUKS2 metadata.",
2300                     crypt_reencrypt_mode_to_str(params->mode));
2301
2302         move_first_segment = (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT);
2303
2304         /* implicit sector size 512 for decryption */
2305         sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
2306         if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE ||
2307             NOTPOW2(sector_size)) {
2308                 log_err(cd, _("Unsupported encryption sector size."));
2309                 return -EINVAL;
2310         }
2311
2312         if (!cipher_mode || *cipher_mode == '\0')
2313                 snprintf(_cipher, sizeof(_cipher), "%s", cipher);
2314         else
2315                 snprintf(_cipher, sizeof(_cipher), "%s-%s", cipher, cipher_mode);
2316
2317         if (MISALIGNED(params->data_shift, sector_size >> SECTOR_SHIFT)) {
2318                 log_err(cd, _("Data shift is not aligned to requested encryption sector size (%" PRIu32 " bytes)."), sector_size);
2319                 return -EINVAL;
2320         }
2321
2322         data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
2323
2324         r = device_check_access(cd, crypt_data_device(cd), DEV_OK);
2325         if (r)
2326                 return r;
2327
2328         r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
2329         if (r)
2330                 return r;
2331
2332         r = device_size(crypt_data_device(cd), &dev_size);
2333         if (r)
2334                 return r;
2335
2336         dev_size -= data_offset;
2337
2338         if (MISALIGNED(dev_size, sector_size)) {
2339                 log_err(cd, _("Data device is not aligned to requested encryption sector size (%" PRIu32 " bytes)."), sector_size);
2340                 return -EINVAL;
2341         }
2342
2343         reencrypt_keyslot = LUKS2_keyslot_find_empty(hdr);
2344         if (reencrypt_keyslot < 0) {
2345                 log_err(cd, _("All key slots full."));
2346                 return -EINVAL;
2347         }
2348
2349         /*
2350          * We must perform data move with exclusive open data device
2351          * to exclude another cryptsetup process to colide with
2352          * encryption initialization (or mount)
2353          */
2354         if (move_first_segment) {
2355                 if (dev_size < 2 * (params->data_shift << SECTOR_SHIFT)) {
2356                         log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
2357                         return -EINVAL;
2358                 }
2359                 if (params->data_shift < LUKS2_get_data_offset(hdr)) {
2360                         log_err(cd, _("Data shift (%" PRIu64 " sectors) is less than future data offset (%" PRIu64 " sectors)."), params->data_shift, LUKS2_get_data_offset(hdr));
2361                         return -EINVAL;
2362                 }
2363                 devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
2364                 if (devfd < 0) {
2365                         if (devfd == -EBUSY)
2366                                 log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
2367                         return -EINVAL;
2368                 }
2369         }
2370
2371         if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
2372                 /* in-memory only */
2373                 r = reencrypt_set_encrypt_segments(cd, hdr, dev_size, params->data_shift << SECTOR_SHIFT, move_first_segment, params->direction);
2374                 if (r)
2375                         goto err;
2376         }
2377
2378         r = LUKS2_keyslot_reencrypt_create(cd, hdr, reencrypt_keyslot,
2379                                            params);
2380         if (r < 0)
2381                 goto err;
2382
2383         r = reencrypt_make_backup_segments(cd, hdr, keyslot_new, _cipher, data_offset, params);
2384         if (r) {
2385                 log_dbg(cd, "Failed to create reencryption backup device segments.");
2386                 goto err;
2387         }
2388
2389         r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
2390         if (r < 0)
2391                 goto err;
2392
2393         if (name && params->mode != CRYPT_REENCRYPT_ENCRYPT) {
2394                 r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
2395                 if (r)
2396                         goto err;
2397
2398                 r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
2399                                     DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
2400                                     DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
2401                 if (r < 0)
2402                         goto err;
2403
2404                 r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
2405                 if (!r) {
2406                         r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
2407                         if (r)
2408                                 log_err(cd, _("Mismatching parameters on device %s."), name);
2409                 }
2410
2411                 dm_targets_free(cd, &dmd_source);
2412                 dm_targets_free(cd, &dmd_target);
2413                 free(CONST_CAST(void*)dmd_target.uuid);
2414
2415                 if (r)
2416                         goto err;
2417         }
2418
2419         if (move_first_segment && reencrypt_move_data(cd, devfd, params->data_shift << SECTOR_SHIFT)) {
2420                 r = -EIO;
2421                 goto err;
2422         }
2423
2424         /* This must be first and only write in LUKS2 metadata during _reencrypt_init */
2425         r = reencrypt_update_flag(cd, 1, true);
2426         if (r) {
2427                 log_dbg(cd, "Failed to set online-reencryption requirement.");
2428                 r = -EINVAL;
2429         } else
2430                 r = reencrypt_keyslot;
2431 err:
2432         device_release_excl(cd, crypt_data_device(cd));
2433         if (r < 0)
2434                 crypt_load(cd, CRYPT_LUKS2, NULL);
2435
2436         return r;
2437 }
2438
2439 static int reencrypt_hotzone_protect_final(struct crypt_device *cd,
2440         struct luks2_hdr *hdr, struct luks2_reenc_context *rh,
2441         const void *buffer, size_t buffer_len)
2442 {
2443         const void *pbuffer;
2444         size_t data_offset, len;
2445         int r;
2446
2447         if (rh->rp.type == REENC_PROTECTION_NONE)
2448                 return 0;
2449
2450         if (rh->rp.type == REENC_PROTECTION_CHECKSUM) {
2451                 log_dbg(cd, "Checksums hotzone resilience.");
2452
2453                 for (data_offset = 0, len = 0; data_offset < buffer_len; data_offset += rh->alignment, len += rh->rp.p.csum.hash_size) {
2454                         if (crypt_hash_write(rh->rp.p.csum.ch, (const char *)buffer + data_offset, rh->alignment)) {
2455                                 log_dbg(cd, "Failed to hash sector at offset %zu.", data_offset);
2456                                 return -EINVAL;
2457                         }
2458                         if (crypt_hash_final(rh->rp.p.csum.ch, (char *)rh->rp.p.csum.checksums + len, rh->rp.p.csum.hash_size)) {
2459                                 log_dbg(cd, "Failed to finalize hash.");
2460                                 return -EINVAL;
2461                         }
2462                 }
2463                 pbuffer = rh->rp.p.csum.checksums;
2464         } else if (rh->rp.type == REENC_PROTECTION_JOURNAL) {
2465                 log_dbg(cd, "Journal hotzone resilience.");
2466                 len = buffer_len;
2467                 pbuffer = buffer;
2468         } else if (rh->rp.type == REENC_PROTECTION_DATASHIFT) {
2469                 log_dbg(cd, "Data shift hotzone resilience.");
2470                 return LUKS2_hdr_write(cd, hdr);
2471         } else
2472                 return -EINVAL;
2473
2474         log_dbg(cd, "Going to store %zu bytes in reencrypt keyslot.", len);
2475
2476         r = LUKS2_keyslot_reencrypt_store(cd, hdr, rh->reenc_keyslot, pbuffer, len);
2477
2478         return r > 0 ? 0 : r;
2479 }
2480
2481 static int reencrypt_context_update(struct crypt_device *cd,
2482         struct luks2_reenc_context *rh)
2483 {
2484         if (rh->read < 0)
2485                 return -EINVAL;
2486
2487         if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
2488                 if (rh->data_shift && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
2489                         if (rh->offset)
2490                                 rh->offset -= rh->data_shift;
2491                         if (rh->offset && (rh->offset < rh->data_shift)) {
2492                                 rh->length = rh->offset;
2493                                 rh->offset = rh->data_shift;
2494                         }
2495                         if (!rh->offset)
2496                                 rh->length = rh->data_shift;
2497                 } else {
2498                         if (rh->offset < rh->length)
2499                                 rh->length = rh->offset;
2500                         rh->offset -= rh->length;
2501                 }
2502         } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
2503                 rh->offset += (uint64_t)rh->read;
2504                 /* it fails in-case of device_size < rh->offset later */
2505                 if (rh->device_size - rh->offset < rh->length)
2506                         rh->length = rh->device_size - rh->offset;
2507         } else
2508                 return -EINVAL;
2509
2510         if (rh->device_size < rh->offset) {
2511                 log_dbg(cd, "Calculated reencryption offset %" PRIu64 " is beyond device size %" PRIu64 ".", rh->offset, rh->device_size);
2512                 return -EINVAL;
2513         }
2514
2515         rh->progress += (uint64_t)rh->read;
2516
2517         return 0;
2518 }
2519
2520 static int reencrypt_load(struct crypt_device *cd, struct luks2_hdr *hdr,
2521                 uint64_t device_size,
2522                 const struct crypt_params_reencrypt *params,
2523                 struct luks2_reenc_context **rh)
2524 {
2525         int r;
2526         struct luks2_reenc_context *tmp = NULL;
2527         crypt_reencrypt_info ri = LUKS2_reenc_status(hdr);
2528
2529         if (ri == CRYPT_REENCRYPT_CLEAN)
2530                 r = reencrypt_load_clean(cd, hdr, device_size, &tmp, params);
2531         else if (ri == CRYPT_REENCRYPT_CRASH)
2532                 r = reencrypt_load_crashed(cd, hdr, device_size, &tmp);
2533         else if (ri == CRYPT_REENCRYPT_NONE) {
2534                 log_err(cd, _("Device not marked for LUKS2 reencryption."));
2535                 return -EINVAL;
2536         } else
2537                 r = -EINVAL;
2538
2539         if (r < 0 || !tmp) {
2540                 log_err(cd, _("Failed to load LUKS2 reencryption context."));
2541                 return r;
2542         }
2543
2544         *rh = tmp;
2545
2546         return 0;
2547 }
2548
2549 static int reencrypt_lock_internal(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock)
2550 {
2551         int r;
2552         char *lock_resource;
2553
2554         if (!crypt_metadata_locking_enabled()) {
2555                 *reencrypt_lock = NULL;
2556                 return 0;
2557         }
2558
2559         r = asprintf(&lock_resource, "LUKS2-reencryption-%s", uuid);
2560         if (r < 0)
2561                 return -ENOMEM;
2562         if (r < 20) {
2563                 r = -EINVAL;
2564                 goto out;
2565         }
2566
2567         r = crypt_write_lock(cd, lock_resource, false, reencrypt_lock);
2568 out:
2569         free(lock_resource);
2570
2571         return r;
2572 }
2573
2574 /* internal only */
2575 int crypt_reencrypt_lock_by_dm_uuid(struct crypt_device *cd, const char *dm_uuid, struct crypt_lock_handle **reencrypt_lock)
2576 {
2577         int r;
2578         char hdr_uuid[37];
2579         const char *uuid = crypt_get_uuid(cd);
2580
2581         if (!dm_uuid)
2582                 return -EINVAL;
2583
2584         if (!uuid) {
2585                 r = snprintf(hdr_uuid, sizeof(hdr_uuid), "%.8s-%.4s-%.4s-%.4s-%.12s",
2586                          dm_uuid + 6, dm_uuid + 14, dm_uuid + 18, dm_uuid + 22, dm_uuid + 26);
2587                 if (r < 0 || (size_t)r != (sizeof(hdr_uuid) - 1))
2588                         return -EINVAL;
2589         } else if (crypt_uuid_cmp(dm_uuid, uuid))
2590                 return -EINVAL;
2591
2592         return reencrypt_lock_internal(cd, uuid, reencrypt_lock);
2593 }
2594
2595 /* internal only */
2596 int crypt_reencrypt_lock(struct crypt_device *cd, struct crypt_lock_handle **reencrypt_lock)
2597 {
2598         if (!cd || !crypt_get_type(cd) || strcmp(crypt_get_type(cd), CRYPT_LUKS2))
2599                 return -EINVAL;
2600
2601         return reencrypt_lock_internal(cd, crypt_get_uuid(cd), reencrypt_lock);
2602 }
2603
2604 /* internal only */
2605 void crypt_reencrypt_unlock(struct crypt_device *cd, struct crypt_lock_handle *reencrypt_lock)
2606 {
2607         crypt_unlock_internal(cd, reencrypt_lock);
2608 }
2609
2610 static int reencrypt_lock_and_verify(struct crypt_device *cd, struct luks2_hdr *hdr,
2611                 struct crypt_lock_handle **reencrypt_lock)
2612 {
2613         int r;
2614         crypt_reencrypt_info ri;
2615         struct crypt_lock_handle *h;
2616
2617         ri = LUKS2_reenc_status(hdr);
2618         if (ri == CRYPT_REENCRYPT_INVALID) {
2619                 log_err(cd, _("Failed to get reencryption state."));
2620                 return -EINVAL;
2621         }
2622         if (ri < CRYPT_REENCRYPT_CLEAN) {
2623                 log_err(cd, _("Device is not in reencryption."));
2624                 return -EINVAL;
2625         }
2626
2627         r = crypt_reencrypt_lock(cd, &h);
2628         if (r < 0) {
2629                 if (r == -EBUSY)
2630                         log_err(cd, _("Reencryption process is already running."));
2631                 else
2632                         log_err(cd, _("Failed to acquire reencryption lock."));
2633                 return r;
2634         }
2635
2636         /* With reencryption lock held, reload device context and verify metadata state */
2637         r = crypt_load(cd, CRYPT_LUKS2, NULL);
2638         if (r) {
2639                 crypt_reencrypt_unlock(cd, h);
2640                 return r;
2641         }
2642
2643         ri = LUKS2_reenc_status(hdr);
2644         if (ri == CRYPT_REENCRYPT_CLEAN) {
2645                 *reencrypt_lock = h;
2646                 return 0;
2647         }
2648
2649         crypt_reencrypt_unlock(cd, h);
2650         log_err(cd, _("Cannot proceed with reencryption. Run reencryption recovery first."));
2651         return -EINVAL;
2652 }
2653
2654 static int reencrypt_load_by_passphrase(struct crypt_device *cd,
2655                 const char *name,
2656                 const char *passphrase,
2657                 size_t passphrase_size,
2658                 int keyslot_old,
2659                 int keyslot_new,
2660                 struct volume_key **vks,
2661                 const struct crypt_params_reencrypt *params)
2662 {
2663         int r, old_ss, new_ss;
2664         struct luks2_hdr *hdr;
2665         struct crypt_lock_handle *reencrypt_lock;
2666         struct luks2_reenc_context *rh;
2667         struct crypt_dm_active_device dmd_target, dmd_source = {
2668                 .uuid = crypt_get_uuid(cd),
2669                 .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
2670         };
2671         uint64_t minimal_size, device_size, mapping_size = 0, required_size = 0;
2672         bool dynamic;
2673         struct crypt_params_reencrypt rparams = {};
2674         uint32_t flags = 0;
2675
2676         if (params) {
2677                 rparams = *params;
2678                 required_size = params->device_size;
2679         }
2680
2681         log_dbg(cd, "Loading LUKS2 reencryption context.");
2682
2683         rh = crypt_get_reenc_context(cd);
2684         if (rh) {
2685                 LUKS2_reenc_context_free(cd, rh);
2686                 crypt_set_reenc_context(cd, NULL);
2687                 rh = NULL;
2688         }
2689
2690         hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
2691
2692         r = reencrypt_lock_and_verify(cd, hdr, &reencrypt_lock);
2693         if (r)
2694                 return r;
2695
2696         /* From now on we hold reencryption lock */
2697
2698         if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic))
2699                 return -EINVAL;
2700
2701         /* some configurations provides fixed device size */
2702         r = luks2_check_device_size(cd, hdr, minimal_size, &device_size, false, dynamic);
2703         if (r) {
2704                 r = -EINVAL;
2705                 goto err;
2706         }
2707
2708         minimal_size >>= SECTOR_SHIFT;
2709
2710         old_ss = reencrypt_get_sector_size_old(hdr);
2711         new_ss = reencrypt_get_sector_size_new(hdr);
2712
2713         r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
2714         if (r == -ENOENT) {
2715                 log_dbg(cd, "Keys are not ready. Unlocking all volume keys.");
2716                 r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
2717                 if (r < 0)
2718                         goto err;
2719                 r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
2720         }
2721
2722         if (r < 0)
2723                 goto err;
2724
2725         if (name) {
2726                 r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
2727                                     DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
2728                                     DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
2729                 if (r < 0)
2730                         goto err;
2731                 flags = dmd_target.flags;
2732
2733                 r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
2734                 if (!r) {
2735                         r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
2736                         if (r)
2737                                 log_err(cd, _("Mismatching parameters on device %s."), name);
2738                 }
2739
2740                 dm_targets_free(cd, &dmd_source);
2741                 dm_targets_free(cd, &dmd_target);
2742                 free(CONST_CAST(void*)dmd_target.uuid);
2743                 if (r)
2744                         goto err;
2745                 mapping_size = dmd_target.size;
2746         }
2747
2748         r = -EINVAL;
2749         if (required_size && mapping_size && (required_size != mapping_size)) {
2750                 log_err(cd, _("Active device size and requested reencryption size don't match."));
2751                 goto err;
2752         }
2753
2754         if (mapping_size)
2755                 required_size = mapping_size;
2756
2757         if (required_size) {
2758                 /* TODO: Add support for changing fixed minimal size in reencryption mda where possible */
2759                 if ((minimal_size && (required_size < minimal_size)) ||
2760                     (required_size > (device_size >> SECTOR_SHIFT)) ||
2761                     (!dynamic && (required_size != minimal_size)) ||
2762                     (old_ss > 0 && MISALIGNED(required_size, old_ss >> SECTOR_SHIFT)) ||
2763                     (new_ss > 0 && MISALIGNED(required_size, new_ss >> SECTOR_SHIFT))) {
2764                         log_err(cd, _("Illegal device size requested in reencryption parameters."));
2765                         goto err;
2766                 }
2767                 rparams.device_size = required_size;
2768         }
2769
2770         r = reencrypt_load(cd, hdr, device_size, &rparams, &rh);
2771         if (r < 0 || !rh)
2772                 goto err;
2773
2774         if (name && (r = reencrypt_context_set_names(rh, name)))
2775                 goto err;
2776
2777         /* Reassure device is not mounted and there's no dm mapping active */
2778         if (!name && (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0)) {
2779                 log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
2780                 r = -EBUSY;
2781                 goto err;
2782         }
2783         device_release_excl(cd, crypt_data_device(cd));
2784
2785         /* FIXME: There's a race for dm device activation not managed by cryptsetup.
2786          *
2787          * 1) excl close
2788          * 2) rogue dm device activation
2789          * 3) one or more dm-crypt based wrapper activation
2790          * 4) next excl open get's skipped due to 3) device from 2) remains undetected.
2791          */
2792         r = reencrypt_init_storage_wrappers(cd, hdr, rh, *vks);
2793         if (r)
2794                 goto err;
2795
2796         /* If one of wrappers is based on dmcrypt fallback it already blocked mount */
2797         if (!name && crypt_storage_wrapper_get_type(rh->cw1) != DMCRYPT &&
2798             crypt_storage_wrapper_get_type(rh->cw2) != DMCRYPT) {
2799                 if (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0) {
2800                         log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
2801                         r = -EBUSY;
2802                         goto err;
2803                 }
2804         }
2805
2806         rh->flags = flags;
2807
2808         MOVE_REF(rh->vks, *vks);
2809         MOVE_REF(rh->reenc_lock, reencrypt_lock);
2810
2811         crypt_set_reenc_context(cd, rh);
2812
2813         return 0;
2814 err:
2815         crypt_reencrypt_unlock(cd, reencrypt_lock);
2816         LUKS2_reenc_context_free(cd, rh);
2817         return r;
2818 }
2819
2820 static int reencrypt_recovery_by_passphrase(struct crypt_device *cd,
2821         struct luks2_hdr *hdr,
2822         int keyslot_old,
2823         int keyslot_new,
2824         const char *passphrase,
2825         size_t passphrase_size)
2826 {
2827         int r;
2828         crypt_reencrypt_info ri;
2829         struct crypt_lock_handle *reencrypt_lock;
2830
2831         r = crypt_reencrypt_lock(cd, &reencrypt_lock);
2832         if (r) {
2833                 if (r == -EBUSY)
2834                         log_err(cd, _("Reencryption in-progress. Cannot perform recovery."));
2835                 else
2836                         log_err(cd, _("Failed to get reencryption lock."));
2837                 return r;
2838         }
2839
2840         if ((r = crypt_load(cd, CRYPT_LUKS2, NULL))) {
2841                 crypt_reencrypt_unlock(cd, reencrypt_lock);
2842                 return r;
2843         }
2844
2845         ri = LUKS2_reenc_status(hdr);
2846         if (ri == CRYPT_REENCRYPT_INVALID) {
2847                 crypt_reencrypt_unlock(cd, reencrypt_lock);
2848                 return -EINVAL;
2849         }
2850
2851         if (ri == CRYPT_REENCRYPT_CRASH) {
2852                 r = LUKS2_reencrypt_locked_recovery_by_passphrase(cd, keyslot_old, keyslot_new,
2853                                 passphrase, passphrase_size, 0, NULL);
2854                 if (r < 0)
2855                         log_err(cd, _("LUKS2 reencryption recovery failed."));
2856         } else {
2857                 log_dbg(cd, "No LUKS2 reencryption recovery needed.");
2858                 r = 0;
2859         }
2860
2861         crypt_reencrypt_unlock(cd, reencrypt_lock);
2862         return r;
2863 }
2864
2865 static int reencrypt_init_by_passphrase(struct crypt_device *cd,
2866         const char *name,
2867         const char *passphrase,
2868         size_t passphrase_size,
2869         int keyslot_old,
2870         int keyslot_new,
2871         const char *cipher,
2872         const char *cipher_mode,
2873         const struct crypt_params_reencrypt *params)
2874 {
2875         int r;
2876         crypt_reencrypt_info ri;
2877         struct volume_key *vks = NULL;
2878         uint32_t flags = params ? params->flags : 0;
2879         struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
2880
2881         /* short-circuit in recovery and finish immediately. */
2882         if (flags & CRYPT_REENCRYPT_RECOVERY)
2883                 return reencrypt_recovery_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
2884
2885         if (cipher) {
2886                 r = crypt_keyslot_get_key_size(cd, keyslot_new);
2887                 if (r < 0)
2888                         return r;
2889                 r = LUKS2_check_cipher(cd, r, cipher, cipher_mode);
2890                 if (r < 0)
2891                         return r;
2892         }
2893
2894         r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
2895         if (r)
2896                 return r;
2897
2898         ri = LUKS2_reenc_status(hdr);
2899         if (ri == CRYPT_REENCRYPT_INVALID) {
2900                 device_write_unlock(cd, crypt_metadata_device(cd));
2901                 return -EINVAL;
2902         }
2903
2904         if ((ri > CRYPT_REENCRYPT_NONE) && (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY)) {
2905                 device_write_unlock(cd, crypt_metadata_device(cd));
2906                 log_err(cd, _("LUKS2 reencryption already initialized in metadata."));
2907                 return -EBUSY;
2908         }
2909
2910         if (ri == CRYPT_REENCRYPT_NONE && !(flags & CRYPT_REENCRYPT_RESUME_ONLY)) {
2911                 r = reencrypt_init(cd, name, hdr, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params, &vks);
2912                 if (r < 0)
2913                         log_err(cd, _("Failed to initialize LUKS2 reencryption in metadata."));
2914         } else if (ri > CRYPT_REENCRYPT_NONE) {
2915                 log_dbg(cd, "LUKS2 reencryption already initialized.");
2916                 r = 0;
2917         }
2918
2919         device_write_unlock(cd, crypt_metadata_device(cd));
2920
2921         if (r < 0 || (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY))
2922                 goto out;
2923
2924         r = reencrypt_load_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, &vks, params);
2925 out:
2926         if (r < 0)
2927                 crypt_drop_keyring_key(cd, vks);
2928         crypt_free_volume_key(vks);
2929         return r < 0 ? r : LUKS2_find_keyslot(hdr, "reencrypt");
2930 }
2931
2932 int crypt_reencrypt_init_by_keyring(struct crypt_device *cd,
2933         const char *name,
2934         const char *passphrase_description,
2935         int keyslot_old,
2936         int keyslot_new,
2937         const char *cipher,
2938         const char *cipher_mode,
2939         const struct crypt_params_reencrypt *params)
2940 {
2941         int r;
2942         char *passphrase;
2943         size_t passphrase_size;
2944
2945         if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase_description)
2946                 return -EINVAL;
2947         if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
2948                 return -EINVAL;
2949
2950         r = keyring_get_passphrase(passphrase_description, &passphrase, &passphrase_size);
2951         if (r < 0) {
2952                 log_err(cd, _("Failed to read passphrase from keyring (error %d)."), r);
2953                 return -EINVAL;
2954         }
2955
2956         r = reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
2957
2958         crypt_safe_memzero(passphrase, passphrase_size);
2959         free(passphrase);
2960
2961         return r;
2962 }
2963
2964 int crypt_reencrypt_init_by_passphrase(struct crypt_device *cd,
2965         const char *name,
2966         const char *passphrase,
2967         size_t passphrase_size,
2968         int keyslot_old,
2969         int keyslot_new,
2970         const char *cipher,
2971         const char *cipher_mode,
2972         const struct crypt_params_reencrypt *params)
2973 {
2974         if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase)
2975                 return -EINVAL;
2976         if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
2977                 return -EINVAL;
2978
2979         return reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
2980 }
2981
2982 static reenc_status_t reencrypt_step(struct crypt_device *cd,
2983                 struct luks2_hdr *hdr,
2984                 struct luks2_reenc_context *rh,
2985                 uint64_t device_size,
2986                 bool online)
2987 {
2988         int r;
2989
2990         /* update reencrypt keyslot protection parameters in memory only */
2991         r = reenc_keyslot_update(cd, rh);
2992         if (r < 0) {
2993                 log_dbg(cd, "Keyslot update failed.");
2994                 return REENC_ERR;
2995         }
2996
2997         /* in memory only */
2998         r = reencrypt_make_segments(cd, hdr, rh, device_size);
2999         if (r)
3000                 return REENC_ERR;
3001
3002         r = reencrypt_assign_segments(cd, hdr, rh, 1, 0);
3003         if (r) {
3004                 log_err(cd, _("Failed to set device segments for next reencryption hotzone."));
3005                 return REENC_ERR;
3006         }
3007
3008         if (online) {
3009                 r = reencrypt_refresh_overlay_devices(cd, hdr, rh->overlay_name, rh->hotzone_name, rh->vks, rh->device_size, rh->flags);
3010                 /* Teardown overlay devices with dm-error. None bio shall pass! */
3011                 if (r != REENC_OK)
3012                         return r;
3013         }
3014
3015         log_dbg(cd, "Reencrypting chunk starting at offset: %" PRIu64 ", size :%" PRIu64 ".", rh->offset, rh->length);
3016         log_dbg(cd, "data_offset: %" PRIu64, crypt_get_data_offset(cd) << SECTOR_SHIFT);
3017
3018         if (!rh->offset && rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->data_shift &&
3019             rh->jobj_segment_moved) {
3020                 crypt_storage_wrapper_destroy(rh->cw1);
3021                 log_dbg(cd, "Reinitializing old segment storage wrapper for moved segment.");
3022                 r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
3023                                 LUKS2_reencrypt_get_data_offset_moved(hdr),
3024                                 crypt_get_iv_offset(cd),
3025                                 reencrypt_get_sector_size_old(hdr),
3026                                 reencrypt_segment_cipher_old(hdr),
3027                                 crypt_volume_key_by_id(rh->vks, rh->digest_old),
3028                                 rh->wflags1);
3029                 if (r) {
3030                         log_err(cd, _("Failed to initialize old segment storage wrapper."));
3031                         return REENC_ROLLBACK;
3032                 }
3033         }
3034
3035         rh->read = crypt_storage_wrapper_read(rh->cw1, rh->offset, rh->reenc_buffer, rh->length);
3036         if (rh->read < 0) {
3037                 /* severity normal */
3038                 log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset);
3039                 return REENC_ROLLBACK;
3040         }
3041
3042         /* metadata commit point */
3043         r = reencrypt_hotzone_protect_final(cd, hdr, rh, rh->reenc_buffer, rh->read);
3044         if (r < 0) {
3045                 /* severity normal */
3046                 log_err(cd, _("Failed to write reencryption resilience metadata."));
3047                 return REENC_ROLLBACK;
3048         }
3049
3050         r = crypt_storage_wrapper_decrypt(rh->cw1, rh->offset, rh->reenc_buffer, rh->read);
3051         if (r) {
3052                 /* severity normal */
3053                 log_err(cd, _("Decryption failed."));
3054                 return REENC_ROLLBACK;
3055         }
3056         if (rh->read != crypt_storage_wrapper_encrypt_write(rh->cw2, rh->offset, rh->reenc_buffer, rh->read)) {
3057                 /* severity fatal */
3058                 log_err(cd, _("Failed to write hotzone area starting at %" PRIu64 "."), rh->offset);
3059                 return REENC_FATAL;
3060         }
3061
3062         if (rh->rp.type != REENC_PROTECTION_NONE && crypt_storage_wrapper_datasync(rh->cw2)) {
3063                 log_err(cd, _("Failed to sync data."));
3064                 return REENC_FATAL;
3065         }
3066
3067         /* metadata commit safe point */
3068         r = reencrypt_assign_segments(cd, hdr, rh, 0, rh->rp.type != REENC_PROTECTION_NONE);
3069         if (r) {
3070                 /* severity fatal */
3071                 log_err(cd, _("Failed to update metadata after current reencryption hotzone completed."));
3072                 return REENC_FATAL;
3073         }
3074
3075         if (online) {
3076                 /* severity normal */
3077                 log_dbg(cd, "Resuming device %s", rh->hotzone_name);
3078                 r = dm_resume_device(cd, rh->hotzone_name, DM_RESUME_PRIVATE);
3079                 if (r) {
3080                         log_err(cd, _("Failed to resume device %s."), rh->hotzone_name);
3081                         return REENC_ERR;
3082                 }
3083         }
3084
3085         return REENC_OK;
3086 }
3087
3088 static int reencrypt_erase_backup_segments(struct crypt_device *cd,
3089                 struct luks2_hdr *hdr)
3090 {
3091         int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
3092         if (segment >= 0) {
3093                 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3094                         return -EINVAL;
3095                 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3096         }
3097         segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
3098         if (segment >= 0) {
3099                 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3100                         return -EINVAL;
3101                 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3102         }
3103         segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
3104         if (segment >= 0) {
3105                 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3106                         return -EINVAL;
3107                 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3108         }
3109
3110         return 0;
3111 }
3112
3113 static int reencrypt_wipe_moved_segment(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reenc_context *rh)
3114 {
3115         int r = 0;
3116         uint64_t offset, length;
3117
3118         if (rh->jobj_segment_moved) {
3119                 offset = json_segment_get_offset(rh->jobj_segment_moved, 0);
3120                 length = json_segment_get_size(rh->jobj_segment_moved, 0);
3121                 log_dbg(cd, "Wiping %" PRIu64 " bytes of backup segment data at offset %" PRIu64,
3122                         length, offset);
3123                 r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
3124                                 offset, length, 1024 * 1024, NULL, NULL);
3125         }
3126
3127         return r;
3128 }
3129
3130 static int reencrypt_teardown_ok(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reenc_context *rh)
3131 {
3132         int i, r;
3133         uint32_t dmt_flags;
3134         bool finished = !(rh->device_size > rh->progress);
3135
3136         if (rh->rp.type == REENC_PROTECTION_NONE &&
3137             LUKS2_hdr_write(cd, hdr)) {
3138                 log_err(cd, _("Failed to write LUKS2 metadata."));
3139                 return -EINVAL;
3140         }
3141
3142         if (rh->online) {
3143                 r = LUKS2_reload(cd, rh->device_name, rh->vks, rh->device_size, rh->flags);
3144                 if (r)
3145                         log_err(cd, _("Failed to reload device %s."), rh->device_name);
3146                 if (!r) {
3147                         r = dm_resume_device(cd, rh->device_name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
3148                         if (r)
3149                                 log_err(cd, _("Failed to resume device %s."), rh->device_name);
3150                 }
3151                 dm_remove_device(cd, rh->overlay_name, 0);
3152                 dm_remove_device(cd, rh->hotzone_name, 0);
3153
3154                 if (!r && finished && rh->mode == CRYPT_REENCRYPT_DECRYPT &&
3155                     !dm_flags(cd, DM_LINEAR, &dmt_flags) && (dmt_flags & DM_DEFERRED_SUPPORTED))
3156                     dm_remove_device(cd, rh->device_name, CRYPT_DEACTIVATE_DEFERRED);
3157         }
3158
3159         if (finished) {
3160                 if (reencrypt_wipe_moved_segment(cd, hdr, rh))
3161                         log_err(cd, _("Failed to wipe backup segment data."));
3162                 if (reencrypt_get_data_offset_new(hdr) && LUKS2_set_keyslots_size(cd, hdr, reencrypt_get_data_offset_new(hdr)))
3163                         log_dbg(cd, "Failed to set new keyslots area size.");
3164                 if (rh->digest_old >= 0 && rh->digest_new != rh->digest_old)
3165                         for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++)
3166                                 if (LUKS2_digest_by_keyslot(hdr, i) == rh->digest_old)
3167                                         crypt_keyslot_destroy(cd, i);
3168                 crypt_keyslot_destroy(cd, rh->reenc_keyslot);
3169                 if (reencrypt_erase_backup_segments(cd, hdr))
3170                         log_dbg(cd, "Failed to erase backup segments");
3171
3172                 /* do we need atomic erase? */
3173                 if (reencrypt_update_flag(cd, 0, true))
3174                         log_err(cd, _("Failed to disable reencryption requirement flag."));
3175         }
3176
3177         return 0;
3178 }
3179
3180 static void reencrypt_teardown_fatal(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reenc_context *rh)
3181 {
3182         log_err(cd, _("Fatal error while reencrypting chunk starting at %" PRIu64 ", %" PRIu64 " sectors long."),
3183                 (rh->offset >> SECTOR_SHIFT) + crypt_get_data_offset(cd), rh->length >> SECTOR_SHIFT);
3184
3185         if (rh->online) {
3186                 log_err(cd, "Reencryption was run in online mode.");
3187                 if (dm_status_suspended(cd, rh->hotzone_name) > 0) {
3188                         log_dbg(cd, "Hotzone device %s suspended, replacing with dm-error.", rh->hotzone_name);
3189                         if (dm_error_device(cd, rh->hotzone_name)) {
3190                                 log_err(cd, _("Failed to replace suspended device %s with dm-error target."), rh->hotzone_name);
3191                                 log_err(cd, _("Do not resume the device unless replaced with error target manually."));
3192                         }
3193                 }
3194         }
3195 }
3196
3197 static int reencrypt_teardown(struct crypt_device *cd, struct luks2_hdr *hdr,
3198                 struct luks2_reenc_context *rh, reenc_status_t rs, bool interrupted,
3199                 int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
3200 {
3201         int r;
3202
3203         switch (rs) {
3204         case REENC_OK:
3205                 if (progress && !interrupted)
3206                         progress(rh->device_size, rh->progress, NULL);
3207                 r = reencrypt_teardown_ok(cd, hdr, rh);
3208                 break;
3209         case REENC_FATAL:
3210                 reencrypt_teardown_fatal(cd, hdr, rh);
3211                 /* fall-through */
3212         default:
3213                 r = -EIO;
3214         }
3215
3216         /* this frees reencryption lock */
3217         LUKS2_reenc_context_free(cd, rh);
3218         crypt_set_reenc_context(cd, NULL);
3219
3220         return r;
3221 }
3222
3223 int crypt_reencrypt(struct crypt_device *cd,
3224                     int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
3225 {
3226         int r;
3227         crypt_reencrypt_info ri;
3228         struct luks2_hdr *hdr;
3229         struct luks2_reenc_context *rh;
3230         reenc_status_t rs;
3231         bool quit = false;
3232
3233         if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
3234                 return -EINVAL;
3235
3236         hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3237
3238         ri = LUKS2_reenc_status(hdr);
3239         if (ri > CRYPT_REENCRYPT_CLEAN) {
3240                 log_err(cd, _("Cannot proceed with reencryption. Unexpected reencryption status."));
3241                 return -EINVAL;
3242         }
3243
3244         rh = crypt_get_reenc_context(cd);
3245         if (!rh || (!rh->reenc_lock && crypt_metadata_locking_enabled())) {
3246                 log_err(cd, _("Missing or invalid reencrypt context."));
3247                 return -EINVAL;
3248         }
3249
3250         log_dbg(cd, "Resuming LUKS2 reencryption.");
3251
3252         if (rh->online && reencrypt_init_device_stack(cd, rh)) {
3253                 log_err(cd, _("Failed to initialize reencryption device stack."));
3254                 return -EINVAL;
3255         }
3256
3257         log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
3258
3259         rs = REENC_OK;
3260
3261         while (!quit && (rh->device_size > rh->progress)) {
3262                 rs = reencrypt_step(cd, hdr, rh, rh->device_size, rh->online);
3263                 if (rs != REENC_OK)
3264                         break;
3265
3266                 log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
3267                 if (progress && progress(rh->device_size, rh->progress, NULL))
3268                         quit = true;
3269
3270                 r = reencrypt_context_update(cd, rh);
3271                 if (r) {
3272                         log_err(cd, _("Failed to update reencryption context."));
3273                         rs = REENC_ERR;
3274                         break;
3275                 }
3276
3277                 log_dbg(cd, "Next reencryption offset will be %" PRIu64 " sectors.", rh->offset);
3278                 log_dbg(cd, "Next reencryption chunk size will be %" PRIu64 " sectors).", rh->length);
3279         }
3280
3281         r = reencrypt_teardown(cd, hdr, rh, rs, quit, progress);
3282         return r;
3283 }
3284
3285 static int reencrypt_recovery(struct crypt_device *cd,
3286                 struct luks2_hdr *hdr,
3287                 uint64_t device_size,
3288                 struct volume_key *vks)
3289 {
3290         int r;
3291         struct luks2_reenc_context *rh = NULL;
3292
3293         r = reencrypt_load(cd, hdr, device_size, NULL, &rh);
3294         if (r < 0) {
3295                 log_err(cd, _("Failed to load LUKS2 reencryption context."));
3296                 return r;
3297         }
3298
3299         r = reencrypt_recover_segment(cd, hdr, rh, vks);
3300         if (r < 0)
3301                 goto err;
3302
3303         if ((r = reencrypt_assign_segments(cd, hdr, rh, 0, 0)))
3304                 goto err;
3305
3306         r = reencrypt_context_update(cd, rh);
3307         if (r) {
3308                 log_err(cd, _("Failed to update reencryption context."));
3309                 goto err;
3310         }
3311
3312         r = reencrypt_teardown_ok(cd, hdr, rh);
3313         if (!r)
3314                 r = LUKS2_hdr_write(cd, hdr);
3315 err:
3316         LUKS2_reenc_context_free(cd, rh);
3317
3318         return r;
3319 }
3320
3321 /*
3322  * use only for calculation of minimal data device size.
3323  * The real data offset is taken directly from segments!
3324  */
3325 int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise)
3326 {
3327         crypt_reencrypt_info ri = LUKS2_reenc_status(hdr);
3328         uint64_t data_offset = LUKS2_get_data_offset(hdr);
3329
3330         if (ri == CRYPT_REENCRYPT_CLEAN && reencrypt_direction(hdr) == CRYPT_REENCRYPT_FORWARD)
3331                 data_offset += reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
3332
3333         return blockwise ? data_offset : data_offset << SECTOR_SHIFT;
3334 }
3335
3336 /* internal only */
3337 int luks2_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr, uint64_t check_size, uint64_t *dev_size, bool activation, bool dynamic)
3338 {
3339         int r;
3340         uint64_t data_offset, real_size = 0;
3341
3342         if (reencrypt_direction(hdr) == CRYPT_REENCRYPT_BACKWARD &&
3343             (LUKS2_get_segment_by_flag(hdr, "backup-moved-segment") || dynamic))
3344                 check_size += reencrypt_data_shift(hdr);
3345
3346         r = device_check_access(cd, crypt_data_device(cd), activation ? DEV_EXCL : DEV_OK);
3347         if (r)
3348                 return r;
3349
3350         data_offset = LUKS2_reencrypt_data_offset(hdr, false);
3351
3352         r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
3353         if (r)
3354                 return r;
3355
3356         r = device_size(crypt_data_device(cd), &real_size);
3357         if (r)
3358                 return r;
3359
3360         log_dbg(cd, "Required minimal device size: %" PRIu64 " (%" PRIu64 " sectors)"
3361                     ", real device size: %" PRIu64 " (%" PRIu64 " sectors)\n"
3362                     "calculated device size: %" PRIu64 " (%" PRIu64 " sectors)",
3363                     check_size, check_size >> SECTOR_SHIFT, real_size, real_size >> SECTOR_SHIFT,
3364                     real_size - data_offset, (real_size - data_offset) >> SECTOR_SHIFT);
3365
3366         if (real_size < data_offset || (check_size && (real_size - data_offset) < check_size)) {
3367                 log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
3368                 return -EINVAL;
3369         }
3370
3371         *dev_size = real_size - data_offset;
3372
3373         return 0;
3374 }
3375
3376 /* returns keyslot number on success (>= 0) or negative errnor otherwise */
3377 int LUKS2_reencrypt_locked_recovery_by_passphrase(struct crypt_device *cd,
3378         int keyslot_old,
3379         int keyslot_new,
3380         const char *passphrase,
3381         size_t passphrase_size,
3382         uint32_t flags,
3383         struct volume_key **vks)
3384 {
3385         uint64_t minimal_size, device_size;
3386         int keyslot, r = -EINVAL;
3387         struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3388         struct volume_key *vk = NULL, *_vks = NULL;
3389
3390         log_dbg(cd, "Entering reencryption crash recovery.");
3391
3392         if (LUKS2_get_data_size(hdr, &minimal_size, NULL))
3393                 return r;
3394
3395         r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new,
3396                         passphrase, passphrase_size, &_vks);
3397         if (r < 0)
3398                 goto err;
3399         keyslot = r;
3400
3401         if (crypt_use_keyring_for_vk(cd))
3402                 vk = _vks;
3403
3404         while (vk) {
3405                 r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk));
3406                 if (r < 0)
3407                         goto err;
3408                 vk = crypt_volume_key_next(vk);
3409         }
3410
3411         if (luks2_check_device_size(cd, hdr, minimal_size, &device_size, true, false))
3412                 goto err;
3413
3414         r = reencrypt_recovery(cd, hdr, device_size, _vks);
3415
3416         if (!r && vks)
3417                 MOVE_REF(*vks, _vks);
3418 err:
3419         if (r < 0)
3420                 crypt_drop_keyring_key(cd, _vks);
3421         crypt_free_volume_key(_vks);
3422
3423         return r < 0 ? r : keyslot;
3424 }
3425
3426 crypt_reencrypt_info LUKS2_reencrypt_status(struct crypt_device *cd, struct crypt_params_reencrypt *params)
3427 {
3428         crypt_reencrypt_info ri;
3429         struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3430
3431         ri = LUKS2_reenc_status(hdr);
3432         if (ri == CRYPT_REENCRYPT_NONE || ri == CRYPT_REENCRYPT_INVALID || !params)
3433                 return ri;
3434
3435         params->mode = reencrypt_mode(hdr);
3436         params->direction = reencrypt_direction(hdr);
3437         params->resilience = reencrypt_resilience_type(hdr);
3438         params->hash = reencrypt_resilience_hash(hdr);
3439         params->data_shift = reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
3440         params->max_hotzone_size = 0;
3441         if (LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
3442                 params->flags |= CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT;
3443
3444         return ri;
3445 }