Imported Upstream version 2.6.1
[platform/upstream/cryptsetup.git] / lib / luks2 / luks2_reencrypt.c
1 /*
2  * LUKS - Linux Unified Key Setup v2, reencryption helpers
3  *
4  * Copyright (C) 2015-2023 Red Hat, Inc. All rights reserved.
5  * Copyright (C) 2015-2023 Ondrej Kozina
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version 2
10  * of the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21
22 #include "luks2_internal.h"
23 #include "utils_device_locking.h"
24
25 struct luks2_reencrypt {
26         /* reencryption window attributes */
27         uint64_t offset;
28         uint64_t progress;
29         uint64_t length;
30         uint64_t device_size;
31         bool online;
32         bool fixed_length;
33         crypt_reencrypt_direction_info direction;
34         crypt_reencrypt_mode_info mode;
35
36         char *device_name;
37         char *hotzone_name;
38         char *overlay_name;
39         uint32_t flags;
40
41         /* reencryption window persistence attributes */
42         struct reenc_protection rp;
43         struct reenc_protection rp_moved_segment;
44
45         int reenc_keyslot;
46
47         /* already running reencryption */
48         json_object *jobj_segs_hot;
49         struct json_object *jobj_segs_post;
50
51         /* backup segments */
52         json_object *jobj_segment_new;
53         int digest_new;
54         json_object *jobj_segment_old;
55         int digest_old;
56         json_object *jobj_segment_moved;
57
58         struct volume_key *vks;
59
60         void *reenc_buffer;
61         ssize_t read;
62
63         struct crypt_storage_wrapper *cw1;
64         struct crypt_storage_wrapper *cw2;
65
66         uint32_t wflags1;
67         uint32_t wflags2;
68
69         struct crypt_lock_handle *reenc_lock;
70 };
71 #if USE_LUKS2_REENCRYPTION
72 static uint64_t data_shift_value(struct reenc_protection *rp)
73 {
74         return rp->type == REENC_PROTECTION_DATASHIFT ? rp->p.ds.data_shift : 0;
75 }
76
77 static json_object *reencrypt_segment(struct luks2_hdr *hdr, unsigned new)
78 {
79         return LUKS2_get_segment_by_flag(hdr, new ? "backup-final" : "backup-previous");
80 }
81
82 static json_object *reencrypt_segment_new(struct luks2_hdr *hdr)
83 {
84         return reencrypt_segment(hdr, 1);
85 }
86
87 static json_object *reencrypt_segment_old(struct luks2_hdr *hdr)
88 {
89         return reencrypt_segment(hdr, 0);
90 }
91
92 static json_object *reencrypt_segments_old(struct luks2_hdr *hdr)
93 {
94         json_object *jobj_segments, *jobj = NULL;
95
96         if (json_object_copy(reencrypt_segment_old(hdr), &jobj))
97                 return NULL;
98
99         json_segment_remove_flag(jobj, "backup-previous");
100
101         jobj_segments = json_object_new_object();
102         if (!jobj_segments) {
103                 json_object_put(jobj);
104                 return NULL;
105         }
106
107         if (json_object_object_add_by_uint(jobj_segments, 0, jobj)) {
108                 json_object_put(jobj);
109                 json_object_put(jobj_segments);
110                 return NULL;
111         }
112
113         return jobj_segments;
114 }
115
116 static const char *reencrypt_segment_cipher_new(struct luks2_hdr *hdr)
117 {
118         return json_segment_get_cipher(reencrypt_segment(hdr, 1));
119 }
120
121 static const char *reencrypt_segment_cipher_old(struct luks2_hdr *hdr)
122 {
123         return json_segment_get_cipher(reencrypt_segment(hdr, 0));
124 }
125
126 static uint32_t reencrypt_get_sector_size_new(struct luks2_hdr *hdr)
127 {
128         return json_segment_get_sector_size(reencrypt_segment(hdr, 1));
129 }
130
131 static uint32_t reencrypt_get_sector_size_old(struct luks2_hdr *hdr)
132 {
133         return json_segment_get_sector_size(reencrypt_segment(hdr, 0));
134 }
135
136 static uint64_t reencrypt_data_offset(struct luks2_hdr *hdr, unsigned new)
137 {
138         json_object *jobj = reencrypt_segment(hdr, new);
139         if (jobj)
140                 return json_segment_get_offset(jobj, 0);
141
142         return LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
143 }
144
145 static uint64_t LUKS2_reencrypt_get_data_offset_moved(struct luks2_hdr *hdr)
146 {
147         json_object *jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-moved-segment");
148
149         if (!jobj_segment)
150                 return 0;
151
152         return json_segment_get_offset(jobj_segment, 0);
153 }
154
155 static uint64_t reencrypt_get_data_offset_new(struct luks2_hdr *hdr)
156 {
157         return reencrypt_data_offset(hdr, 1);
158 }
159
160 static uint64_t reencrypt_get_data_offset_old(struct luks2_hdr *hdr)
161 {
162         return reencrypt_data_offset(hdr, 0);
163 }
164 #endif
165 static int reencrypt_digest(struct luks2_hdr *hdr, unsigned new)
166 {
167         int segment = LUKS2_get_segment_id_by_flag(hdr, new ? "backup-final" : "backup-previous");
168
169         if (segment < 0)
170                 return segment;
171
172         return LUKS2_digest_by_segment(hdr, segment);
173 }
174
175 int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr)
176 {
177         return reencrypt_digest(hdr, 1);
178 }
179
180 int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr)
181 {
182         return reencrypt_digest(hdr, 0);
183 }
184
185 /* none, checksums, journal or shift */
186 static const char *reencrypt_resilience_type(struct luks2_hdr *hdr)
187 {
188         json_object *jobj_keyslot, *jobj_area, *jobj_type;
189         int ks = LUKS2_find_keyslot(hdr, "reencrypt");
190
191         if (ks < 0)
192                 return NULL;
193
194         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
195
196         json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
197         if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
198                 return NULL;
199
200         return json_object_get_string(jobj_type);
201 }
202
203 static const char *reencrypt_resilience_hash(struct luks2_hdr *hdr)
204 {
205         json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash;
206         int ks = LUKS2_find_keyslot(hdr, "reencrypt");
207
208         if (ks < 0)
209                 return NULL;
210
211         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
212
213         json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
214         if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
215                 return NULL;
216         if (strcmp(json_object_get_string(jobj_type), "checksum"))
217                 return NULL;
218         if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
219                 return NULL;
220
221         return json_object_get_string(jobj_hash);
222 }
223 #if USE_LUKS2_REENCRYPTION
224 static json_object *_enc_create_segments_shift_after(struct luks2_reencrypt *rh, uint64_t data_offset)
225 {
226         int reenc_seg, i = 0;
227         json_object *jobj_copy, *jobj_seg_new = NULL, *jobj_segs_post = json_object_new_object();
228         uint64_t tmp;
229
230         if (!rh->jobj_segs_hot || !jobj_segs_post)
231                 goto err;
232
233         if (json_segments_count(rh->jobj_segs_hot) == 0)
234                 return jobj_segs_post;
235
236         reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
237         if (reenc_seg < 0)
238                 goto err;
239
240         while (i < reenc_seg) {
241                 jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, i);
242                 if (!jobj_copy)
243                         goto err;
244                 json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
245         }
246
247         if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1), &jobj_seg_new)) {
248                 if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg), &jobj_seg_new))
249                         goto err;
250                 json_segment_remove_flag(jobj_seg_new, "in-reencryption");
251                 tmp = rh->length;
252         } else {
253                 json_object_object_add(jobj_seg_new, "offset", crypt_jobj_new_uint64(rh->offset + data_offset));
254                 json_object_object_add(jobj_seg_new, "iv_tweak", crypt_jobj_new_uint64(rh->offset >> SECTOR_SHIFT));
255                 tmp = json_segment_get_size(jobj_seg_new, 0) + rh->length;
256         }
257
258         /* alter size of new segment, reenc_seg == 0 we're finished */
259         json_object_object_add(jobj_seg_new, "size", reenc_seg > 0 ? crypt_jobj_new_uint64(tmp) : json_object_new_string("dynamic"));
260         json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_seg_new);
261
262         return jobj_segs_post;
263 err:
264         json_object_put(jobj_segs_post);
265         return NULL;
266 }
267
268 static json_object *reencrypt_make_hot_segments_encrypt_shift(struct luks2_hdr *hdr,
269         struct luks2_reencrypt *rh,
270         uint64_t data_offset)
271 {
272         int sg, crypt_seg, i = 0;
273         uint64_t segment_size;
274         json_object *jobj_seg_shrunk, *jobj_seg_new, *jobj_copy, *jobj_enc_seg = NULL,
275                      *jobj_segs_hot = json_object_new_object();
276
277         if (!jobj_segs_hot)
278                 return NULL;
279
280         crypt_seg = LUKS2_segment_by_type(hdr, "crypt");
281
282         /* FIXME: This is hack. Find proper way to fix it. */
283         sg = LUKS2_last_segment_by_type(hdr, "linear");
284         if (rh->offset && sg < 0)
285                 goto err;
286         if (sg < 0)
287                 return jobj_segs_hot;
288
289         jobj_enc_seg = json_segment_create_crypt(data_offset + rh->offset,
290                                                       rh->offset >> SECTOR_SHIFT,
291                                                       &rh->length,
292                                                       reencrypt_segment_cipher_new(hdr),
293                                                       reencrypt_get_sector_size_new(hdr),
294                                                       1);
295
296         while (i < sg) {
297                 jobj_copy = LUKS2_get_segment_jobj(hdr, i);
298                 if (!jobj_copy)
299                         goto err;
300                 json_object_object_add_by_uint(jobj_segs_hot, i++, json_object_get(jobj_copy));
301         }
302
303         segment_size = LUKS2_segment_size(hdr, sg, 0);
304         if (segment_size > rh->length) {
305                 jobj_seg_shrunk = NULL;
306                 if (json_object_copy(LUKS2_get_segment_jobj(hdr, sg), &jobj_seg_shrunk))
307                         goto err;
308                 json_object_object_add(jobj_seg_shrunk, "size", crypt_jobj_new_uint64(segment_size - rh->length));
309                 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_seg_shrunk);
310         }
311
312         json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_enc_seg);
313         jobj_enc_seg = NULL; /* see err: label */
314
315         /* first crypt segment after encryption ? */
316         if (crypt_seg >= 0) {
317                 jobj_seg_new = LUKS2_get_segment_jobj(hdr, crypt_seg);
318                 if (!jobj_seg_new)
319                         goto err;
320                 json_object_object_add_by_uint(jobj_segs_hot, sg, json_object_get(jobj_seg_new));
321         }
322
323         return jobj_segs_hot;
324 err:
325         json_object_put(jobj_enc_seg);
326         json_object_put(jobj_segs_hot);
327
328         return NULL;
329 }
330
331 static json_object *reencrypt_make_segment_new(struct crypt_device *cd,
332                 struct luks2_hdr *hdr,
333                 const struct luks2_reencrypt *rh,
334                 uint64_t data_offset,
335                 uint64_t segment_offset,
336                 uint64_t iv_offset,
337                 const uint64_t *segment_length)
338 {
339         switch (rh->mode) {
340         case CRYPT_REENCRYPT_REENCRYPT:
341         case CRYPT_REENCRYPT_ENCRYPT:
342                 return json_segment_create_crypt(data_offset + segment_offset,
343                                                   crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
344                                                   segment_length,
345                                                   reencrypt_segment_cipher_new(hdr),
346                                                   reencrypt_get_sector_size_new(hdr), 0);
347         case CRYPT_REENCRYPT_DECRYPT:
348                 return json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
349         }
350
351         return NULL;
352 }
353
354 static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd,
355         struct luks2_hdr *hdr,
356         struct luks2_reencrypt *rh,
357         uint64_t data_offset)
358 {
359         int reenc_seg;
360         json_object *jobj_new_seg_after, *jobj_old_seg, *jobj_old_seg_copy = NULL,
361                     *jobj_segs_post = json_object_new_object();
362         uint64_t fixed_length = rh->offset + rh->length;
363
364         if (!rh->jobj_segs_hot || !jobj_segs_post)
365                 goto err;
366
367         reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
368         if (reenc_seg < 0)
369                 return NULL;
370
371         jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
372
373         /*
374          * if there's no old segment after reencryption, we're done.
375          * Set size to 'dynamic' again.
376          */
377         jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &fixed_length : NULL);
378         if (!jobj_new_seg_after)
379                 goto err;
380         json_object_object_add_by_uint(jobj_segs_post, 0, jobj_new_seg_after);
381
382         if (jobj_old_seg) {
383                 if (rh->fixed_length) {
384                         if (json_object_copy(jobj_old_seg, &jobj_old_seg_copy))
385                                 goto err;
386                         jobj_old_seg = jobj_old_seg_copy;
387                         fixed_length = rh->device_size - fixed_length;
388                         json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(fixed_length));
389                 } else
390                         json_object_get(jobj_old_seg);
391                 json_object_object_add_by_uint(jobj_segs_post, 1, jobj_old_seg);
392         }
393
394         return jobj_segs_post;
395 err:
396         json_object_put(jobj_segs_post);
397         return NULL;
398 }
399
400 static json_object *reencrypt_make_post_segments_backward(struct crypt_device *cd,
401         struct luks2_hdr *hdr,
402         struct luks2_reencrypt *rh,
403         uint64_t data_offset)
404 {
405         int reenc_seg;
406         uint64_t fixed_length;
407
408         json_object *jobj_new_seg_after, *jobj_old_seg,
409                     *jobj_segs_post = json_object_new_object();
410
411         if (!rh->jobj_segs_hot || !jobj_segs_post)
412                 goto err;
413
414         reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
415         if (reenc_seg < 0)
416                 return NULL;
417
418         jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg - 1);
419         if (jobj_old_seg)
420                 json_object_object_add_by_uint(jobj_segs_post, reenc_seg - 1, json_object_get(jobj_old_seg));
421         if (rh->fixed_length && rh->offset) {
422                 fixed_length = rh->device_size - rh->offset;
423                 jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, &fixed_length);
424         } else
425                 jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL);
426         if (!jobj_new_seg_after)
427                 goto err;
428         json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_new_seg_after);
429
430         return jobj_segs_post;
431 err:
432         json_object_put(jobj_segs_post);
433         return NULL;
434 }
435
436 static json_object *reencrypt_make_segment_reencrypt(struct crypt_device *cd,
437                 struct luks2_hdr *hdr,
438                 const struct luks2_reencrypt *rh,
439                 uint64_t data_offset,
440                 uint64_t segment_offset,
441                 uint64_t iv_offset,
442                 const uint64_t *segment_length)
443 {
444         switch (rh->mode) {
445         case CRYPT_REENCRYPT_REENCRYPT:
446         case CRYPT_REENCRYPT_ENCRYPT:
447                 return json_segment_create_crypt(data_offset + segment_offset,
448                                 crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
449                                 segment_length,
450                                 reencrypt_segment_cipher_new(hdr),
451                                 reencrypt_get_sector_size_new(hdr), 1);
452         case CRYPT_REENCRYPT_DECRYPT:
453                 return json_segment_create_linear(data_offset + segment_offset, segment_length, 1);
454         }
455
456         return NULL;
457 }
458
459 static json_object *reencrypt_make_segment_old(struct crypt_device *cd,
460                 struct luks2_hdr *hdr,
461                 const struct luks2_reencrypt *rh,
462                 uint64_t data_offset,
463                 uint64_t segment_offset,
464                 const uint64_t *segment_length)
465 {
466         json_object *jobj_old_seg = NULL;
467
468         switch (rh->mode) {
469         case CRYPT_REENCRYPT_REENCRYPT:
470         case CRYPT_REENCRYPT_DECRYPT:
471                 jobj_old_seg = json_segment_create_crypt(data_offset + segment_offset,
472                                                     crypt_get_iv_offset(cd) + (segment_offset >> SECTOR_SHIFT),
473                                                     segment_length,
474                                                     reencrypt_segment_cipher_old(hdr),
475                                                     reencrypt_get_sector_size_old(hdr),
476                                                     0);
477                 break;
478         case CRYPT_REENCRYPT_ENCRYPT:
479                 jobj_old_seg = json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
480         }
481
482         return jobj_old_seg;
483 }
484
485 static json_object *reencrypt_make_hot_segments_forward(struct crypt_device *cd,
486                 struct luks2_hdr *hdr,
487                 struct luks2_reencrypt *rh,
488                 uint64_t device_size,
489                 uint64_t data_offset)
490 {
491         json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
492         uint64_t fixed_length, tmp = rh->offset + rh->length;
493         unsigned int sg = 0;
494
495         jobj_segs_hot = json_object_new_object();
496         if (!jobj_segs_hot)
497                 return NULL;
498
499         if (rh->offset) {
500                 jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, &rh->offset);
501                 if (!jobj_new_seg)
502                         goto err;
503                 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
504         }
505
506         jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
507         if (!jobj_reenc_seg)
508                 goto err;
509
510         json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
511
512         if (tmp < device_size) {
513                 fixed_length = device_size - tmp;
514                 jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh, data_offset + data_shift_value(&rh->rp),
515                                                           rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
516                 if (!jobj_old_seg)
517                         goto err;
518                 json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
519         }
520
521         return jobj_segs_hot;
522 err:
523         json_object_put(jobj_segs_hot);
524         return NULL;
525 }
526
527 static json_object *reencrypt_make_hot_segments_decrypt_shift(struct crypt_device *cd,
528         struct luks2_hdr *hdr, struct luks2_reencrypt *rh,
529         uint64_t device_size, uint64_t data_offset)
530 {
531         json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
532         uint64_t fixed_length, tmp = rh->offset + rh->length, linear_length = rh->progress;
533         unsigned int sg = 0;
534
535         jobj_segs_hot = json_object_new_object();
536         if (!jobj_segs_hot)
537                 return NULL;
538
539         if (rh->offset) {
540                 jobj_new_seg = LUKS2_get_segment_jobj(hdr, 0);
541                 if (!jobj_new_seg)
542                         goto err;
543                 json_object_object_add_by_uint(jobj_segs_hot, sg++, json_object_get(jobj_new_seg));
544
545                 if (linear_length) {
546                         jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh,
547                                                                   data_offset,
548                                                                   json_segment_get_size(jobj_new_seg, 0),
549                                                                   0,
550                                                                   &linear_length);
551                         if (!jobj_new_seg)
552                                 goto err;
553                         json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
554                 }
555         }
556
557         jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset,
558                                                           rh->offset,
559                                                           rh->offset,
560                                                           &rh->length);
561         if (!jobj_reenc_seg)
562                 goto err;
563
564         json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
565
566         if (!rh->offset && (jobj_new_seg = LUKS2_get_segment_jobj(hdr, 1)) &&
567             !json_segment_is_backup(jobj_new_seg))
568                 json_object_object_add_by_uint(jobj_segs_hot, sg++, json_object_get(jobj_new_seg));
569         else if (tmp < device_size) {
570                 fixed_length = device_size - tmp;
571                 jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh,
572                                                           data_offset + data_shift_value(&rh->rp),
573                                                           rh->offset + rh->length,
574                                                           rh->fixed_length ? &fixed_length : NULL);
575                 if (!jobj_old_seg)
576                         goto err;
577                 json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
578         }
579
580         return jobj_segs_hot;
581 err:
582         json_object_put(jobj_segs_hot);
583         return NULL;
584 }
585
586 static json_object *_dec_create_segments_shift_after(struct crypt_device *cd,
587         struct luks2_hdr *hdr,
588         struct luks2_reencrypt *rh,
589         uint64_t data_offset)
590 {
591         int reenc_seg, i = 0;
592         json_object *jobj_copy, *jobj_seg_old, *jobj_seg_new,
593                     *jobj_segs_post = json_object_new_object();
594         unsigned segs;
595         uint64_t tmp;
596
597         if (!rh->jobj_segs_hot || !jobj_segs_post)
598                 goto err;
599
600         segs = json_segments_count(rh->jobj_segs_hot);
601         if (segs == 0)
602                 return jobj_segs_post;
603
604         reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
605         if (reenc_seg < 0)
606                 goto err;
607
608         if (reenc_seg == 0) {
609                 jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, NULL);
610                 if (!jobj_seg_new)
611                         goto err;
612                 json_object_object_add_by_uint(jobj_segs_post, 0, jobj_seg_new);
613
614                 return jobj_segs_post;
615         }
616
617         jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, 0);
618         if (!jobj_copy)
619                 goto err;
620         json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
621
622         jobj_seg_old = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
623
624         tmp = rh->length + rh->progress;
625         jobj_seg_new = reencrypt_make_segment_new(cd, hdr, rh, data_offset,
626                                                   json_segment_get_size(rh->jobj_segment_moved, 0),
627                                                   data_shift_value(&rh->rp),
628                                                   jobj_seg_old ? &tmp : NULL);
629         json_object_object_add_by_uint(jobj_segs_post, i++, jobj_seg_new);
630
631         if (jobj_seg_old)
632                 json_object_object_add_by_uint(jobj_segs_post, i, json_object_get(jobj_seg_old));
633
634         return jobj_segs_post;
635 err:
636         json_object_put(jobj_segs_post);
637         return NULL;
638 }
639
640 static json_object *reencrypt_make_hot_segments_backward(struct crypt_device *cd,
641                 struct luks2_hdr *hdr,
642                 struct luks2_reencrypt *rh,
643                 uint64_t device_size,
644                 uint64_t data_offset)
645 {
646         json_object *jobj_reenc_seg, *jobj_new_seg, *jobj_old_seg = NULL,
647                     *jobj_segs_hot = json_object_new_object();
648         int sg = 0;
649         uint64_t fixed_length, tmp = rh->offset + rh->length;
650
651         if (!jobj_segs_hot)
652                 return NULL;
653
654         if (rh->offset) {
655                 if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_old_seg))
656                         goto err;
657                 json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(rh->offset));
658
659                 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_old_seg);
660         }
661
662         jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
663         if (!jobj_reenc_seg)
664                 goto err;
665
666         json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
667
668         if (tmp < device_size) {
669                 fixed_length = device_size - tmp;
670                 jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length,
671                                                           rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
672                 if (!jobj_new_seg)
673                         goto err;
674                 json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_new_seg);
675         }
676
677         return jobj_segs_hot;
678 err:
679         json_object_put(jobj_segs_hot);
680         return NULL;
681 }
682
683 static int reencrypt_make_hot_segments(struct crypt_device *cd,
684                 struct luks2_hdr *hdr,
685                 struct luks2_reencrypt *rh,
686                 uint64_t device_size,
687                 uint64_t data_offset)
688 {
689         rh->jobj_segs_hot = NULL;
690
691         if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
692             rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
693                 log_dbg(cd, "Calculating hot segments for encryption with data move.");
694                 rh->jobj_segs_hot = reencrypt_make_hot_segments_encrypt_shift(hdr, rh, data_offset);
695         } else if (rh->mode == CRYPT_REENCRYPT_DECRYPT && rh->direction == CRYPT_REENCRYPT_FORWARD &&
696                    rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
697                 log_dbg(cd, "Calculating hot segments for decryption with data move.");
698                 rh->jobj_segs_hot = reencrypt_make_hot_segments_decrypt_shift(cd, hdr, rh, device_size, data_offset);
699         } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
700                 log_dbg(cd, "Calculating hot segments (forward direction).");
701                 rh->jobj_segs_hot = reencrypt_make_hot_segments_forward(cd, hdr, rh, device_size, data_offset);
702         } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
703                 log_dbg(cd, "Calculating hot segments (backward direction).");
704                 rh->jobj_segs_hot = reencrypt_make_hot_segments_backward(cd, hdr, rh, device_size, data_offset);
705         }
706
707         return rh->jobj_segs_hot ? 0 : -EINVAL;
708 }
709
710 static int reencrypt_make_post_segments(struct crypt_device *cd,
711                 struct luks2_hdr *hdr,
712                 struct luks2_reencrypt *rh,
713                 uint64_t data_offset)
714 {
715         rh->jobj_segs_post = NULL;
716
717         if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
718             rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
719                 log_dbg(cd, "Calculating post segments for encryption with data move.");
720                 rh->jobj_segs_post = _enc_create_segments_shift_after(rh, data_offset);
721         } else if (rh->mode == CRYPT_REENCRYPT_DECRYPT && rh->direction == CRYPT_REENCRYPT_FORWARD &&
722                    rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
723                 log_dbg(cd, "Calculating post segments for decryption with data move.");
724                 rh->jobj_segs_post = _dec_create_segments_shift_after(cd, hdr, rh, data_offset);
725         } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
726                 log_dbg(cd, "Calculating post segments (forward direction).");
727                 rh->jobj_segs_post = reencrypt_make_post_segments_forward(cd, hdr, rh, data_offset);
728         } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
729                 log_dbg(cd, "Calculating segments (backward direction).");
730                 rh->jobj_segs_post = reencrypt_make_post_segments_backward(cd, hdr, rh, data_offset);
731         }
732
733         return rh->jobj_segs_post ? 0 : -EINVAL;
734 }
735 #endif
736 static uint64_t reencrypt_data_shift(struct luks2_hdr *hdr)
737 {
738         json_object *jobj_keyslot, *jobj_area, *jobj_data_shift;
739         int ks = LUKS2_find_keyslot(hdr, "reencrypt");
740
741         if (ks < 0)
742                 return 0;
743
744         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
745
746         json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
747         if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj_data_shift))
748                 return 0;
749
750         return crypt_jobj_get_uint64(jobj_data_shift);
751 }
752
753 static crypt_reencrypt_mode_info reencrypt_mode(struct luks2_hdr *hdr)
754 {
755         const char *mode;
756         crypt_reencrypt_mode_info mi = CRYPT_REENCRYPT_REENCRYPT;
757         json_object *jobj_keyslot, *jobj_mode;
758
759         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
760         if (!jobj_keyslot)
761                 return mi;
762
763         json_object_object_get_ex(jobj_keyslot, "mode", &jobj_mode);
764         mode = json_object_get_string(jobj_mode);
765
766         /* validation enforces allowed values */
767         if (!strcmp(mode, "encrypt"))
768                 mi = CRYPT_REENCRYPT_ENCRYPT;
769         else if (!strcmp(mode, "decrypt"))
770                 mi = CRYPT_REENCRYPT_DECRYPT;
771
772         return mi;
773 }
774
775 static crypt_reencrypt_direction_info reencrypt_direction(struct luks2_hdr *hdr)
776 {
777         const char *value;
778         json_object *jobj_keyslot, *jobj_mode;
779         crypt_reencrypt_direction_info di = CRYPT_REENCRYPT_FORWARD;
780
781         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
782         if (!jobj_keyslot)
783                 return di;
784
785         json_object_object_get_ex(jobj_keyslot, "direction", &jobj_mode);
786         value = json_object_get_string(jobj_mode);
787
788         /* validation enforces allowed values */
789         if (strcmp(value, "forward"))
790                 di = CRYPT_REENCRYPT_BACKWARD;
791
792         return di;
793 }
794
795 typedef enum { REENC_OK = 0, REENC_ERR, REENC_ROLLBACK, REENC_FATAL } reenc_status_t;
796
797 void LUKS2_reencrypt_protection_erase(struct reenc_protection *rp)
798 {
799         if (!rp || rp->type != REENC_PROTECTION_CHECKSUM)
800                 return;
801
802         if (rp->p.csum.ch) {
803                 crypt_hash_destroy(rp->p.csum.ch);
804                 rp->p.csum.ch = NULL;
805         }
806
807         if (rp->p.csum.checksums) {
808                 crypt_safe_memzero(rp->p.csum.checksums, rp->p.csum.checksums_len);
809                 free(rp->p.csum.checksums);
810                 rp->p.csum.checksums = NULL;
811         }
812 }
813
814 void LUKS2_reencrypt_free(struct crypt_device *cd, struct luks2_reencrypt *rh)
815 {
816         if (!rh)
817                 return;
818
819         LUKS2_reencrypt_protection_erase(&rh->rp);
820         LUKS2_reencrypt_protection_erase(&rh->rp_moved_segment);
821
822         json_object_put(rh->jobj_segs_hot);
823         rh->jobj_segs_hot = NULL;
824         json_object_put(rh->jobj_segs_post);
825         rh->jobj_segs_post = NULL;
826         json_object_put(rh->jobj_segment_old);
827         rh->jobj_segment_old = NULL;
828         json_object_put(rh->jobj_segment_new);
829         rh->jobj_segment_new = NULL;
830         json_object_put(rh->jobj_segment_moved);
831         rh->jobj_segment_moved = NULL;
832
833         free(rh->reenc_buffer);
834         rh->reenc_buffer = NULL;
835         crypt_storage_wrapper_destroy(rh->cw1);
836         rh->cw1 = NULL;
837         crypt_storage_wrapper_destroy(rh->cw2);
838         rh->cw2 = NULL;
839
840         free(rh->device_name);
841         free(rh->overlay_name);
842         free(rh->hotzone_name);
843         crypt_drop_keyring_key(cd, rh->vks);
844         crypt_free_volume_key(rh->vks);
845         device_release_excl(cd, crypt_data_device(cd));
846         crypt_unlock_internal(cd, rh->reenc_lock);
847         free(rh);
848 }
849
850 int LUKS2_reencrypt_max_hotzone_size(struct crypt_device *cd,
851         struct luks2_hdr *hdr,
852         const struct reenc_protection *rp,
853         int reencrypt_keyslot,
854         uint64_t *r_length)
855 {
856 #if USE_LUKS2_REENCRYPTION
857         int r;
858         uint64_t dummy, area_length;
859
860         assert(hdr);
861         assert(rp);
862         assert(r_length);
863
864         if (rp->type <= REENC_PROTECTION_NONE) {
865                 *r_length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
866                 return 0;
867         }
868
869         if (rp->type == REENC_PROTECTION_DATASHIFT) {
870                 *r_length = rp->p.ds.data_shift;
871                 return 0;
872         }
873
874         r = LUKS2_keyslot_area(hdr, reencrypt_keyslot, &dummy, &area_length);
875         if (r < 0)
876                 return -EINVAL;
877
878         if (rp->type == REENC_PROTECTION_JOURNAL) {
879                 *r_length = area_length;
880                 return 0;
881         }
882
883         if (rp->type == REENC_PROTECTION_CHECKSUM) {
884                 *r_length = (area_length / rp->p.csum.hash_size) * rp->p.csum.block_size;
885                 return 0;
886         }
887
888         return -EINVAL;
889 #else
890         return -ENOTSUP;
891 #endif
892 }
893 #if USE_LUKS2_REENCRYPTION
894 static size_t reencrypt_get_alignment(struct crypt_device *cd,
895                 struct luks2_hdr *hdr)
896 {
897         size_t ss, alignment = device_block_size(cd, crypt_data_device(cd));
898
899         ss = reencrypt_get_sector_size_old(hdr);
900         if (ss > alignment)
901                 alignment = ss;
902         ss = reencrypt_get_sector_size_new(hdr);
903         if (ss > alignment)
904                 alignment = ss;
905
906         return alignment;
907 }
908
909 /* returns void because it must not fail on valid LUKS2 header */
910 static void _load_backup_segments(struct luks2_hdr *hdr,
911                 struct luks2_reencrypt *rh)
912 {
913         int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
914
915         if (segment >= 0) {
916                 rh->jobj_segment_new = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
917                 rh->digest_new = LUKS2_digest_by_segment(hdr, segment);
918         } else {
919                 rh->jobj_segment_new = NULL;
920                 rh->digest_new = -ENOENT;
921         }
922
923         segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
924         if (segment >= 0) {
925                 rh->jobj_segment_old = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
926                 rh->digest_old = LUKS2_digest_by_segment(hdr, segment);
927         } else {
928                 rh->jobj_segment_old = NULL;
929                 rh->digest_old = -ENOENT;
930         }
931
932         segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
933         if (segment >= 0)
934                 rh->jobj_segment_moved = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
935         else
936                 rh->jobj_segment_moved = NULL;
937 }
938
939 static int reencrypt_offset_backward_moved(struct luks2_hdr *hdr, json_object *jobj_segments,
940                                            uint64_t *reencrypt_length, uint64_t data_shift, uint64_t *offset)
941 {
942         uint64_t tmp, linear_length = 0;
943         int sg, segs = json_segments_count(jobj_segments);
944
945         /* find reencrypt offset with data shift */
946         for (sg = 0; sg < segs; sg++)
947                 if (LUKS2_segment_is_type(hdr, sg, "linear"))
948                         linear_length += LUKS2_segment_size(hdr, sg, 0);
949
950         /* all active linear segments length */
951         if (linear_length && segs > 1) {
952                 if (linear_length < data_shift)
953                         return -EINVAL;
954                 tmp = linear_length - data_shift;
955                 if (tmp && tmp < data_shift) {
956                         *offset = data_shift;
957                         *reencrypt_length = tmp;
958                 } else
959                         *offset = tmp;
960                 return 0;
961         }
962
963         if (segs == 1) {
964                 *offset = 0;
965                 return 0;
966         }
967
968         /* should be unreachable */
969
970         return -EINVAL;
971 }
972
973 static int reencrypt_offset_forward_moved(struct luks2_hdr *hdr,
974         json_object *jobj_segments,
975         uint64_t data_shift,
976         uint64_t *offset)
977 {
978         int last_crypt = LUKS2_last_segment_by_type(hdr, "crypt");
979
980         /* if last crypt segment exists and it's first one, just return offset = 0 */
981         if (last_crypt <= 0) {
982                 *offset = 0;
983                 return 0;
984         }
985
986         *offset = LUKS2_segment_offset(hdr, last_crypt, 0) - data_shift;
987         return 0;
988 }
989
990 static int _offset_forward(json_object *jobj_segments, uint64_t *offset)
991 {
992         int segs = json_segments_count(jobj_segments);
993
994         if (segs == 1)
995                 *offset = 0;
996         else if (segs == 2) {
997                 *offset = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
998                 if (!*offset)
999                         return -EINVAL;
1000         } else
1001                 return -EINVAL;
1002
1003         return 0;
1004 }
1005
1006 static int _offset_backward(json_object *jobj_segments, uint64_t device_size, uint64_t *length, uint64_t *offset)
1007 {
1008         int segs = json_segments_count(jobj_segments);
1009         uint64_t tmp;
1010
1011         if (segs == 1) {
1012                 if (device_size < *length)
1013                         *length = device_size;
1014                 *offset = device_size - *length;
1015         } else if (segs == 2) {
1016                 tmp = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
1017                 if (tmp < *length)
1018                         *length = tmp;
1019                 *offset =  tmp - *length;
1020         } else
1021                 return -EINVAL;
1022
1023         return 0;
1024 }
1025
1026 /* must be always relative to data offset */
1027 /* the LUKS2 header MUST be valid */
1028 static int reencrypt_offset(struct luks2_hdr *hdr,
1029                 crypt_reencrypt_direction_info di,
1030                 uint64_t device_size,
1031                 uint64_t *reencrypt_length,
1032                 uint64_t *offset)
1033 {
1034         int r, sg;
1035         json_object *jobj_segments;
1036         uint64_t data_shift = reencrypt_data_shift(hdr);
1037
1038         if (!offset)
1039                 return -EINVAL;
1040
1041         /* if there's segment in reencryption return directly offset of it */
1042         json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments);
1043         sg = json_segments_segment_in_reencrypt(jobj_segments);
1044         if (sg >= 0) {
1045                 *offset = LUKS2_segment_offset(hdr, sg, 0) - (reencrypt_get_data_offset_new(hdr));
1046                 return 0;
1047         }
1048
1049         if (di == CRYPT_REENCRYPT_FORWARD) {
1050                 if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT &&
1051                     LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0) {
1052                         r = reencrypt_offset_forward_moved(hdr, jobj_segments, data_shift, offset);
1053                         if (!r && *offset > device_size)
1054                                 *offset = device_size;
1055                         return r;
1056                 }
1057                 return _offset_forward(jobj_segments, offset);
1058         } else if (di == CRYPT_REENCRYPT_BACKWARD) {
1059                 if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_ENCRYPT &&
1060                     LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
1061                         return reencrypt_offset_backward_moved(hdr, jobj_segments, reencrypt_length, data_shift, offset);
1062                 return _offset_backward(jobj_segments, device_size, reencrypt_length, offset);
1063         }
1064
1065         return -EINVAL;
1066 }
1067
1068 static uint64_t reencrypt_length(struct crypt_device *cd,
1069                 struct reenc_protection *rp,
1070                 uint64_t keyslot_area_length,
1071                 uint64_t length_max,
1072                 size_t alignment)
1073 {
1074         unsigned long dummy, optimal_alignment;
1075         uint64_t length, soft_mem_limit;
1076
1077         if (rp->type == REENC_PROTECTION_NONE)
1078                 length = length_max ?: LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
1079         else if (rp->type == REENC_PROTECTION_CHECKSUM)
1080                 length = (keyslot_area_length / rp->p.csum.hash_size) * rp->p.csum.block_size;
1081         else if (rp->type == REENC_PROTECTION_DATASHIFT)
1082                 return rp->p.ds.data_shift;
1083         else
1084                 length = keyslot_area_length;
1085
1086         /* hard limit */
1087         if (length > LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH)
1088                 length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
1089
1090         /* soft limit is 1/4 of system memory */
1091         soft_mem_limit = crypt_getphysmemory_kb() << 8; /* multiply by (1024/4) */
1092
1093         if (soft_mem_limit && length > soft_mem_limit)
1094                 length = soft_mem_limit;
1095
1096         if (length_max && length > length_max)
1097                 length = length_max;
1098
1099         length -= (length % alignment);
1100
1101         /* Emits error later */
1102         if (!length)
1103                 return length;
1104
1105         device_topology_alignment(cd, crypt_data_device(cd), &optimal_alignment, &dummy, length);
1106
1107         /* we have to stick with encryption sector size alignment */
1108         if (optimal_alignment % alignment)
1109                 return length;
1110
1111         /* align to opt-io size only if remaining size allows it */
1112         if (length > optimal_alignment)
1113                 length -= (length % optimal_alignment);
1114
1115         return length;
1116 }
1117
1118 static int reencrypt_context_init(struct crypt_device *cd,
1119         struct luks2_hdr *hdr,
1120         struct luks2_reencrypt *rh,
1121         uint64_t device_size,
1122         uint64_t max_hotzone_size,
1123         uint64_t fixed_device_size)
1124 {
1125         int r;
1126         size_t alignment;
1127         uint64_t dummy, area_length;
1128
1129         rh->reenc_keyslot = LUKS2_find_keyslot(hdr, "reencrypt");
1130         if (rh->reenc_keyslot < 0)
1131                 return -EINVAL;
1132         if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &dummy, &area_length) < 0)
1133                 return -EINVAL;
1134
1135         rh->mode = reencrypt_mode(hdr);
1136
1137         rh->direction = reencrypt_direction(hdr);
1138
1139         r = LUKS2_keyslot_reencrypt_load(cd, hdr, rh->reenc_keyslot, &rh->rp, true);
1140         if (r < 0)
1141                 return r;
1142
1143         if (rh->rp.type == REENC_PROTECTION_CHECKSUM)
1144                 alignment = rh->rp.p.csum.block_size;
1145         else
1146                 alignment = reencrypt_get_alignment(cd, hdr);
1147
1148         if (!alignment)
1149                 return -EINVAL;
1150
1151         if ((max_hotzone_size << SECTOR_SHIFT) % alignment) {
1152                 log_err(cd, _("Hotzone size must be multiple of calculated zone alignment (%zu bytes)."), alignment);
1153                 return -EINVAL;
1154         }
1155
1156         if ((fixed_device_size << SECTOR_SHIFT) % alignment) {
1157                 log_err(cd, _("Device size must be multiple of calculated zone alignment (%zu bytes)."), alignment);
1158                 return -EINVAL;
1159         }
1160
1161         if (fixed_device_size) {
1162                 log_dbg(cd, "Switching reencryption to fixed size mode.");
1163                 device_size = fixed_device_size << SECTOR_SHIFT;
1164                 rh->fixed_length = true;
1165         } else
1166                 rh->fixed_length = false;
1167
1168         rh->length = reencrypt_length(cd, &rh->rp, area_length, max_hotzone_size << SECTOR_SHIFT, alignment);
1169         if (!rh->length) {
1170                 log_dbg(cd, "Invalid reencryption length.");
1171                 return -EINVAL;
1172         }
1173
1174         if (reencrypt_offset(hdr, rh->direction, device_size, &rh->length, &rh->offset)) {
1175                 log_dbg(cd, "Failed to get reencryption offset.");
1176                 return -EINVAL;
1177         }
1178
1179         if (rh->offset > device_size)
1180                 return -EINVAL;
1181         if (rh->length > device_size - rh->offset)
1182                 rh->length = device_size - rh->offset;
1183
1184         _load_backup_segments(hdr, rh);
1185
1186         r = LUKS2_keyslot_reencrypt_load(cd, hdr, rh->reenc_keyslot, &rh->rp_moved_segment, false);
1187         if (r < 0)
1188                 return r;
1189
1190         if (rh->rp_moved_segment.type == REENC_PROTECTION_NOT_SET)
1191                 log_dbg(cd, "No moved segment resilience configured.");
1192
1193         if (rh->direction == CRYPT_REENCRYPT_BACKWARD)
1194                 rh->progress = device_size - rh->offset - rh->length;
1195         else if (rh->jobj_segment_moved && rh->direction == CRYPT_REENCRYPT_FORWARD) {
1196                 if (rh->offset == json_segment_get_offset(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), false))
1197                         rh->progress = device_size - json_segment_get_size(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment"), false);
1198                 else
1199                         rh->progress = rh->offset - json_segment_get_size(rh->jobj_segment_moved, 0);
1200         } else
1201                 rh->progress = rh->offset;
1202
1203         log_dbg(cd, "reencrypt-direction: %s", rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward");
1204         log_dbg(cd, "backup-previous digest id: %d", rh->digest_old);
1205         log_dbg(cd, "backup-final digest id: %d", rh->digest_new);
1206         log_dbg(cd, "reencrypt length: %" PRIu64, rh->length);
1207         log_dbg(cd, "reencrypt offset: %" PRIu64, rh->offset);
1208         log_dbg(cd, "reencrypt shift: %s%" PRIu64,
1209                 (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->direction == CRYPT_REENCRYPT_BACKWARD ? "-" : ""),
1210                 data_shift_value(&rh->rp));
1211         log_dbg(cd, "reencrypt alignment: %zu", alignment);
1212         log_dbg(cd, "reencrypt progress: %" PRIu64, rh->progress);
1213
1214         rh->device_size = device_size;
1215
1216         return rh->length < 512 ? -EINVAL : 0;
1217 }
1218
1219 static size_t reencrypt_buffer_length(struct luks2_reencrypt *rh)
1220 {
1221         if (rh->rp.type == REENC_PROTECTION_DATASHIFT)
1222                 return data_shift_value(&rh->rp);
1223         return rh->length;
1224 }
1225
1226 static int reencrypt_load_clean(struct crypt_device *cd,
1227         struct luks2_hdr *hdr,
1228         uint64_t device_size,
1229         uint64_t max_hotzone_size,
1230         uint64_t fixed_device_size,
1231         struct luks2_reencrypt **rh)
1232 {
1233         int r;
1234         struct luks2_reencrypt *tmp = crypt_zalloc(sizeof (*tmp));
1235
1236         if (!tmp)
1237                 return -ENOMEM;
1238
1239         log_dbg(cd, "Loading stored reencryption context.");
1240
1241         r = reencrypt_context_init(cd, hdr, tmp, device_size, max_hotzone_size, fixed_device_size);
1242         if (r)
1243                 goto err;
1244
1245         if (posix_memalign(&tmp->reenc_buffer, device_alignment(crypt_data_device(cd)),
1246                            reencrypt_buffer_length(tmp))) {
1247                 r = -ENOMEM;
1248                 goto err;
1249         }
1250
1251         *rh = tmp;
1252
1253         return 0;
1254 err:
1255         LUKS2_reencrypt_free(cd, tmp);
1256
1257         return r;
1258 }
1259
1260 static int reencrypt_make_segments(struct crypt_device *cd,
1261         struct luks2_hdr *hdr,
1262         struct luks2_reencrypt *rh,
1263         uint64_t device_size)
1264 {
1265         int r;
1266         uint64_t data_offset = reencrypt_get_data_offset_new(hdr);
1267
1268         log_dbg(cd, "Calculating segments.");
1269
1270         r = reencrypt_make_hot_segments(cd, hdr, rh, device_size, data_offset);
1271         if (!r) {
1272                 r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
1273                 if (r)
1274                         json_object_put(rh->jobj_segs_hot);
1275         }
1276
1277         if (r)
1278                 log_dbg(cd, "Failed to make reencryption segments.");
1279
1280         return r;
1281 }
1282
1283 static int reencrypt_make_segments_crashed(struct crypt_device *cd,
1284                                 struct luks2_hdr *hdr,
1285                                 struct luks2_reencrypt *rh)
1286 {
1287         int r;
1288         uint64_t data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
1289
1290         if (!rh)
1291                 return -EINVAL;
1292
1293         rh->jobj_segs_hot = json_object_new_object();
1294         if (!rh->jobj_segs_hot)
1295                 return -ENOMEM;
1296
1297         json_object_object_foreach(LUKS2_get_segments_jobj(hdr), key, val) {
1298                 if (json_segment_is_backup(val))
1299                         continue;
1300                 json_object_object_add(rh->jobj_segs_hot, key, json_object_get(val));
1301         }
1302
1303         r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
1304         if (r) {
1305                 json_object_put(rh->jobj_segs_hot);
1306                 rh->jobj_segs_hot = NULL;
1307         }
1308
1309         return r;
1310 }
1311
1312 static int reencrypt_load_crashed(struct crypt_device *cd,
1313         struct luks2_hdr *hdr, uint64_t device_size, struct luks2_reencrypt **rh)
1314 {
1315         bool dynamic;
1316         uint64_t required_device_size;
1317         int r, reenc_seg;
1318
1319         if (LUKS2_get_data_size(hdr, &required_device_size, &dynamic))
1320                 return -EINVAL;
1321
1322         if (dynamic)
1323                 required_device_size = 0;
1324         else
1325                 required_device_size >>= SECTOR_SHIFT;
1326
1327         r = reencrypt_load_clean(cd, hdr, device_size, 0, required_device_size, rh);
1328
1329         if (!r) {
1330                 reenc_seg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
1331                 if (reenc_seg < 0)
1332                         r = -EINVAL;
1333                 else
1334                         (*rh)->length = LUKS2_segment_size(hdr, reenc_seg, 0);
1335         }
1336
1337         if (!r)
1338                 r = reencrypt_make_segments_crashed(cd, hdr, *rh);
1339
1340         if (r) {
1341                 LUKS2_reencrypt_free(cd, *rh);
1342                 *rh = NULL;
1343         }
1344         return r;
1345 }
1346
1347 static int reencrypt_init_storage_wrappers(struct crypt_device *cd,
1348                 struct luks2_hdr *hdr,
1349                 struct luks2_reencrypt *rh,
1350                 struct volume_key *vks)
1351 {
1352         int r;
1353         struct volume_key *vk;
1354         uint32_t wrapper_flags = (getuid() || geteuid()) ? 0 : DISABLE_KCAPI;
1355
1356         vk = crypt_volume_key_by_id(vks, rh->digest_old);
1357         r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
1358                         reencrypt_get_data_offset_old(hdr),
1359                         crypt_get_iv_offset(cd),
1360                         reencrypt_get_sector_size_old(hdr),
1361                         reencrypt_segment_cipher_old(hdr),
1362                         vk, wrapper_flags | OPEN_READONLY);
1363         if (r) {
1364                 log_err(cd, _("Failed to initialize old segment storage wrapper."));
1365                 return r;
1366         }
1367         rh->wflags1 = wrapper_flags | OPEN_READONLY;
1368         log_dbg(cd, "Old cipher storage wrapper type: %d.", crypt_storage_wrapper_get_type(rh->cw1));
1369
1370         vk = crypt_volume_key_by_id(vks, rh->digest_new);
1371         r = crypt_storage_wrapper_init(cd, &rh->cw2, crypt_data_device(cd),
1372                         reencrypt_get_data_offset_new(hdr),
1373                         crypt_get_iv_offset(cd),
1374                         reencrypt_get_sector_size_new(hdr),
1375                         reencrypt_segment_cipher_new(hdr),
1376                         vk, wrapper_flags);
1377         if (r) {
1378                 log_err(cd, _("Failed to initialize new segment storage wrapper."));
1379                 return r;
1380         }
1381         rh->wflags2 = wrapper_flags;
1382         log_dbg(cd, "New cipher storage wrapper type: %d", crypt_storage_wrapper_get_type(rh->cw2));
1383
1384         return 0;
1385 }
1386
1387 static int reencrypt_context_set_names(struct luks2_reencrypt *rh, const char *name)
1388 {
1389         if (!rh | !name)
1390                 return -EINVAL;
1391
1392         if (*name == '/') {
1393                 if (!(rh->device_name = dm_device_name(name)))
1394                         return -EINVAL;
1395         } else if (!(rh->device_name = strdup(name)))
1396                 return -ENOMEM;
1397
1398         if (asprintf(&rh->hotzone_name, "%s-hotzone-%s", rh->device_name,
1399                      rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward") < 0) {
1400                 rh->hotzone_name = NULL;
1401                 return -ENOMEM;
1402         }
1403         if (asprintf(&rh->overlay_name, "%s-overlay", rh->device_name) < 0) {
1404                 rh->overlay_name = NULL;
1405                 return -ENOMEM;
1406         }
1407
1408         rh->online = true;
1409         return 0;
1410 }
1411
1412 static int modify_offset(uint64_t *offset, uint64_t data_shift, crypt_reencrypt_direction_info di)
1413 {
1414         int r = -EINVAL;
1415
1416         if (!offset)
1417                 return r;
1418
1419         if (di == CRYPT_REENCRYPT_FORWARD) {
1420                 if (*offset >= data_shift) {
1421                         *offset -= data_shift;
1422                         r = 0;
1423                 }
1424         } else if (di == CRYPT_REENCRYPT_BACKWARD) {
1425                 *offset += data_shift;
1426                 r = 0;
1427         }
1428
1429         return r;
1430 }
1431
1432 static int reencrypt_update_flag(struct crypt_device *cd, uint8_t version,
1433         bool enable, bool commit)
1434 {
1435         uint32_t reqs;
1436         struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
1437
1438         if (enable) {
1439                 log_dbg(cd, "Going to store reencryption requirement flag (version: %u).", version);
1440                 return LUKS2_config_set_requirement_version(cd, hdr, CRYPT_REQUIREMENT_ONLINE_REENCRYPT, version, commit);
1441         }
1442
1443         if (LUKS2_config_get_requirements(cd, hdr, &reqs))
1444                 return -EINVAL;
1445
1446         reqs &= ~CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
1447
1448         log_dbg(cd, "Going to wipe reencryption requirement flag.");
1449
1450         return LUKS2_config_set_requirements(cd, hdr, reqs, commit);
1451 }
1452
1453 static int reencrypt_hotzone_protect_ready(struct crypt_device *cd,
1454         struct reenc_protection *rp)
1455 {
1456         assert(rp);
1457
1458         if (rp->type == REENC_PROTECTION_NOT_SET)
1459                 return -EINVAL;
1460
1461         if (rp->type != REENC_PROTECTION_CHECKSUM)
1462                 return 0;
1463
1464         if (!rp->p.csum.checksums) {
1465                 log_dbg(cd, "Allocating buffer for storing resilience checksums.");
1466                 if (posix_memalign(&rp->p.csum.checksums, device_alignment(crypt_metadata_device(cd)),
1467                                    rp->p.csum.checksums_len))
1468                         return -ENOMEM;
1469         }
1470
1471         return 0;
1472 }
1473
1474 static int reencrypt_recover_segment(struct crypt_device *cd,
1475         struct luks2_hdr *hdr,
1476         struct luks2_reencrypt *rh,
1477         struct volume_key *vks)
1478 {
1479         struct volume_key *vk_old, *vk_new;
1480         size_t count, s;
1481         ssize_t read, w;
1482         struct reenc_protection *rp;
1483         int devfd, r, new_sector_size, old_sector_size, rseg;
1484         uint64_t area_offset, area_length, area_length_read, crash_iv_offset,
1485                  data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
1486         char *checksum_tmp = NULL, *data_buffer = NULL;
1487         struct crypt_storage_wrapper *cw1 = NULL, *cw2 = NULL;
1488
1489         assert(hdr);
1490         assert(rh);
1491         assert(vks);
1492
1493         rseg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
1494         if (rh->offset == 0 && rh->rp_moved_segment.type > REENC_PROTECTION_NOT_SET) {
1495                 log_dbg(cd, "Recovery using moved segment protection.");
1496                 rp = &rh->rp_moved_segment;
1497         } else
1498                 rp = &rh->rp;
1499
1500         if (rseg < 0 || rh->length < 512)
1501                 return -EINVAL;
1502
1503         r = reencrypt_hotzone_protect_ready(cd, rp);
1504         if (r) {
1505                 log_err(cd, _("Failed to initialize hotzone protection."));
1506                 return -EINVAL;
1507         }
1508
1509         vk_new = crypt_volume_key_by_id(vks, rh->digest_new);
1510         if (!vk_new && rh->mode != CRYPT_REENCRYPT_DECRYPT)
1511                 return -EINVAL;
1512         vk_old = crypt_volume_key_by_id(vks, rh->digest_old);
1513         if (!vk_old && rh->mode != CRYPT_REENCRYPT_ENCRYPT)
1514                 return -EINVAL;
1515         old_sector_size = json_segment_get_sector_size(reencrypt_segment_old(hdr));
1516         new_sector_size = json_segment_get_sector_size(reencrypt_segment_new(hdr));
1517         if (rh->mode == CRYPT_REENCRYPT_DECRYPT)
1518                 crash_iv_offset = rh->offset >> SECTOR_SHIFT; /* TODO: + old iv_tweak */
1519         else
1520                 crash_iv_offset = json_segment_get_iv_offset(json_segments_get_segment(rh->jobj_segs_hot, rseg));
1521
1522         log_dbg(cd, "crash_offset: %" PRIu64 ", crash_length: %" PRIu64 ",  crash_iv_offset: %" PRIu64,
1523                 data_offset + rh->offset, rh->length, crash_iv_offset);
1524
1525         r = crypt_storage_wrapper_init(cd, &cw2, crypt_data_device(cd),
1526                         data_offset + rh->offset, crash_iv_offset, new_sector_size,
1527                         reencrypt_segment_cipher_new(hdr), vk_new, 0);
1528         if (r) {
1529                 log_err(cd, _("Failed to initialize new segment storage wrapper."));
1530                 return r;
1531         }
1532
1533         if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &area_offset, &area_length)) {
1534                 r = -EINVAL;
1535                 goto out;
1536         }
1537
1538         if (posix_memalign((void**)&data_buffer, device_alignment(crypt_data_device(cd)), rh->length)) {
1539                 r = -ENOMEM;
1540                 goto out;
1541         }
1542
1543         switch (rp->type) {
1544         case  REENC_PROTECTION_CHECKSUM:
1545                 log_dbg(cd, "Checksums based recovery.");
1546
1547                 r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1548                                 data_offset + rh->offset, crash_iv_offset, old_sector_size,
1549                                 reencrypt_segment_cipher_old(hdr), vk_old, 0);
1550                 if (r) {
1551                         log_err(cd, _("Failed to initialize old segment storage wrapper."));
1552                         goto out;
1553                 }
1554
1555                 count = rh->length / rp->p.csum.block_size;
1556                 area_length_read = count * rp->p.csum.hash_size;
1557                 if (area_length_read > area_length) {
1558                         log_dbg(cd, "Internal error in calculated area_length.");
1559                         r = -EINVAL;
1560                         goto out;
1561                 }
1562
1563                 checksum_tmp = malloc(rp->p.csum.hash_size);
1564                 if (!checksum_tmp) {
1565                         r = -ENOMEM;
1566                         goto out;
1567                 }
1568
1569                 /* TODO: lock for read */
1570                 devfd = device_open(cd, crypt_metadata_device(cd), O_RDONLY);
1571                 if (devfd < 0)
1572                         goto out;
1573
1574                 /* read old data checksums */
1575                 read = read_lseek_blockwise(devfd, device_block_size(cd, crypt_metadata_device(cd)),
1576                                         device_alignment(crypt_metadata_device(cd)), rp->p.csum.checksums, area_length_read, area_offset);
1577                 if (read < 0 || (size_t)read != area_length_read) {
1578                         log_err(cd, _("Failed to read checksums for current hotzone."));
1579                         r = -EINVAL;
1580                         goto out;
1581                 }
1582
1583                 read = crypt_storage_wrapper_read(cw2, 0, data_buffer, rh->length);
1584                 if (read < 0 || (size_t)read != rh->length) {
1585                         log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset + data_offset);
1586                         r = -EINVAL;
1587                         goto out;
1588                 }
1589
1590                 for (s = 0; s < count; s++) {
1591                         if (crypt_hash_write(rp->p.csum.ch, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size)) {
1592                                 log_dbg(cd, "Failed to write hash.");
1593                                 r = EINVAL;
1594                                 goto out;
1595                         }
1596                         if (crypt_hash_final(rp->p.csum.ch, checksum_tmp, rp->p.csum.hash_size)) {
1597                                 log_dbg(cd, "Failed to finalize hash.");
1598                                 r = EINVAL;
1599                                 goto out;
1600                         }
1601                         if (!memcmp(checksum_tmp, (char *)rp->p.csum.checksums + (s * rp->p.csum.hash_size), rp->p.csum.hash_size)) {
1602                                 log_dbg(cd, "Sector %zu (size %zu, offset %zu) needs recovery", s, rp->p.csum.block_size, s * rp->p.csum.block_size);
1603                                 if (crypt_storage_wrapper_decrypt(cw1, s * rp->p.csum.block_size, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size)) {
1604                                         log_err(cd, _("Failed to decrypt sector %zu."), s);
1605                                         r = -EINVAL;
1606                                         goto out;
1607                                 }
1608                                 w = crypt_storage_wrapper_encrypt_write(cw2, s * rp->p.csum.block_size, data_buffer + (s * rp->p.csum.block_size), rp->p.csum.block_size);
1609                                 if (w < 0 || (size_t)w != rp->p.csum.block_size) {
1610                                         log_err(cd, _("Failed to recover sector %zu."), s);
1611                                         r = -EINVAL;
1612                                         goto out;
1613                                 }
1614                         }
1615                 }
1616
1617                 r = 0;
1618                 break;
1619         case  REENC_PROTECTION_JOURNAL:
1620                 log_dbg(cd, "Journal based recovery.");
1621
1622                 /* FIXME: validation candidate */
1623                 if (rh->length > area_length) {
1624                         r = -EINVAL;
1625                         log_dbg(cd, "Invalid journal size.");
1626                         goto out;
1627                 }
1628
1629                 /* TODO locking */
1630                 r = crypt_storage_wrapper_init(cd, &cw1, crypt_metadata_device(cd),
1631                                 area_offset, crash_iv_offset, old_sector_size,
1632                                 reencrypt_segment_cipher_old(hdr), vk_old, 0);
1633                 if (r) {
1634                         log_err(cd, _("Failed to initialize old segment storage wrapper."));
1635                         goto out;
1636                 }
1637                 read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
1638                 if (read < 0 || (size_t)read != rh->length) {
1639                         log_dbg(cd, "Failed to read journaled data.");
1640                         r = -EIO;
1641                         /* may content plaintext */
1642                         crypt_safe_memzero(data_buffer, rh->length);
1643                         goto out;
1644                 }
1645                 read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
1646                 /* may content plaintext */
1647                 crypt_safe_memzero(data_buffer, rh->length);
1648                 if (read < 0 || (size_t)read != rh->length) {
1649                         log_dbg(cd, "recovery write failed.");
1650                         r = -EINVAL;
1651                         goto out;
1652                 }
1653
1654                 r = 0;
1655                 break;
1656         case  REENC_PROTECTION_DATASHIFT:
1657                 log_dbg(cd, "Data shift based recovery.");
1658
1659                 if (rseg == 0) {
1660                         r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1661                                         json_segment_get_offset(rh->jobj_segment_moved, 0), 0,
1662                                         reencrypt_get_sector_size_old(hdr),
1663                                         reencrypt_segment_cipher_old(hdr), vk_old, 0);
1664                 } else {
1665                         if (rh->direction == CRYPT_REENCRYPT_FORWARD)
1666                                 data_offset = data_offset + rh->offset + data_shift_value(rp);
1667                         else
1668                                 data_offset = data_offset + rh->offset - data_shift_value(rp);
1669                         r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1670                                         data_offset,
1671                                         crash_iv_offset,
1672                                         reencrypt_get_sector_size_old(hdr),
1673                                         reencrypt_segment_cipher_old(hdr), vk_old, 0);
1674                 }
1675                 if (r) {
1676                         log_err(cd, _("Failed to initialize old segment storage wrapper."));
1677                         goto out;
1678                 }
1679
1680                 read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
1681                 if (read < 0 || (size_t)read != rh->length) {
1682                         log_dbg(cd, "Failed to read data.");
1683                         r = -EIO;
1684                         /* may content plaintext */
1685                         crypt_safe_memzero(data_buffer, rh->length);
1686                         goto out;
1687                 }
1688
1689                 read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
1690                 /* may content plaintext */
1691                 crypt_safe_memzero(data_buffer, rh->length);
1692                 if (read < 0 || (size_t)read != rh->length) {
1693                         log_dbg(cd, "recovery write failed.");
1694                         r = -EINVAL;
1695                         goto out;
1696                 }
1697                 r = 0;
1698                 break;
1699         default:
1700                 r = -EINVAL;
1701         }
1702
1703         if (!r)
1704                 rh->read = rh->length;
1705 out:
1706         free(data_buffer);
1707         free(checksum_tmp);
1708         crypt_storage_wrapper_destroy(cw1);
1709         crypt_storage_wrapper_destroy(cw2);
1710
1711         return r;
1712 }
1713
1714 static int reencrypt_add_moved_segment(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
1715 {
1716         int digest = rh->digest_old, s = LUKS2_segment_first_unused_id(hdr);
1717
1718         if (!rh->jobj_segment_moved)
1719                 return 0;
1720
1721         if (s < 0)
1722                 return s;
1723
1724         if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(rh->jobj_segment_moved))) {
1725                 json_object_put(rh->jobj_segment_moved);
1726                 return -EINVAL;
1727         }
1728
1729         if (!strcmp(json_segment_type(rh->jobj_segment_moved), "crypt"))
1730                 return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
1731
1732         return 0;
1733 }
1734
1735 static int reencrypt_add_backup_segment(struct crypt_device *cd,
1736                 struct luks2_hdr *hdr,
1737                 struct luks2_reencrypt *rh,
1738                 unsigned final)
1739 {
1740         int digest, s = LUKS2_segment_first_unused_id(hdr);
1741         json_object *jobj;
1742
1743         if (s < 0)
1744                 return s;
1745
1746         digest = final ? rh->digest_new : rh->digest_old;
1747         jobj = final ? rh->jobj_segment_new : rh->jobj_segment_old;
1748
1749         if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(jobj))) {
1750                 json_object_put(jobj);
1751                 return -EINVAL;
1752         }
1753
1754         if (strcmp(json_segment_type(jobj), "crypt"))
1755                 return 0;
1756
1757         return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
1758 }
1759
1760 static int reencrypt_assign_segments_simple(struct crypt_device *cd,
1761         struct luks2_hdr *hdr,
1762         struct luks2_reencrypt *rh,
1763         unsigned hot,
1764         unsigned commit)
1765 {
1766         int r, sg;
1767
1768         if (hot && json_segments_count(rh->jobj_segs_hot) > 0) {
1769                 log_dbg(cd, "Setting 'hot' segments.");
1770
1771                 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
1772                 if (!r)
1773                         rh->jobj_segs_hot = NULL;
1774         } else if (!hot && json_segments_count(rh->jobj_segs_post) > 0) {
1775                 log_dbg(cd, "Setting 'post' segments.");
1776                 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
1777                 if (!r)
1778                         rh->jobj_segs_post = NULL;
1779         } else {
1780                 log_dbg(cd, "No segments to set.");
1781                 return -EINVAL;
1782         }
1783
1784         if (r) {
1785                 log_dbg(cd, "Failed to assign new enc segments.");
1786                 return r;
1787         }
1788
1789         r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
1790         if (r) {
1791                 log_dbg(cd, "Failed to assign reencryption previous backup segment.");
1792                 return r;
1793         }
1794
1795         r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
1796         if (r) {
1797                 log_dbg(cd, "Failed to assign reencryption final backup segment.");
1798                 return r;
1799         }
1800
1801         r = reencrypt_add_moved_segment(cd, hdr, rh);
1802         if (r) {
1803                 log_dbg(cd, "Failed to assign reencryption moved backup segment.");
1804                 return r;
1805         }
1806
1807         for (sg = 0; sg < LUKS2_segments_count(hdr); sg++) {
1808                 if (LUKS2_segment_is_type(hdr, sg, "crypt") &&
1809                     LUKS2_digest_segment_assign(cd, hdr, sg, rh->mode == CRYPT_REENCRYPT_ENCRYPT ? rh->digest_new : rh->digest_old, 1, 0)) {
1810                         log_dbg(cd, "Failed to assign digest %u to segment %u.", rh->digest_new, sg);
1811                         return -EINVAL;
1812                 }
1813         }
1814
1815         return commit ? LUKS2_hdr_write(cd, hdr) : 0;
1816 }
1817
1818 static int reencrypt_assign_segments(struct crypt_device *cd,
1819                 struct luks2_hdr *hdr,
1820                 struct luks2_reencrypt *rh,
1821                 unsigned hot,
1822                 unsigned commit)
1823 {
1824         bool forward;
1825         int rseg, scount, r = -EINVAL;
1826
1827         /* FIXME: validate in reencrypt context load */
1828         if (rh->digest_new < 0 && rh->mode != CRYPT_REENCRYPT_DECRYPT)
1829                 return -EINVAL;
1830
1831         if (LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0))
1832                 return -EINVAL;
1833
1834         if (rh->mode == CRYPT_REENCRYPT_ENCRYPT || rh->mode == CRYPT_REENCRYPT_DECRYPT)
1835                 return reencrypt_assign_segments_simple(cd, hdr, rh, hot, commit);
1836
1837         if (hot && rh->jobj_segs_hot) {
1838                 log_dbg(cd, "Setting 'hot' segments.");
1839
1840                 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
1841                 if (!r)
1842                         rh->jobj_segs_hot = NULL;
1843         } else if (!hot && rh->jobj_segs_post) {
1844                 log_dbg(cd, "Setting 'post' segments.");
1845                 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
1846                 if (!r)
1847                         rh->jobj_segs_post = NULL;
1848         }
1849
1850         if (r)
1851                 return r;
1852
1853         scount = LUKS2_segments_count(hdr);
1854
1855         /* segment in reencryption has to hold reference on both digests */
1856         rseg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
1857         if (rseg < 0 && hot)
1858                 return -EINVAL;
1859
1860         if (rseg >= 0) {
1861                 LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_new, 1, 0);
1862                 LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_old, 1, 0);
1863         }
1864
1865         forward = (rh->direction == CRYPT_REENCRYPT_FORWARD);
1866         if (hot) {
1867                 if (rseg > 0)
1868                         LUKS2_digest_segment_assign(cd, hdr, 0, forward ? rh->digest_new : rh->digest_old, 1, 0);
1869                 if (scount > rseg + 1)
1870                         LUKS2_digest_segment_assign(cd, hdr, rseg + 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
1871         } else {
1872                 LUKS2_digest_segment_assign(cd, hdr, 0, forward || scount == 1 ? rh->digest_new : rh->digest_old, 1, 0);
1873                 if (scount > 1)
1874                         LUKS2_digest_segment_assign(cd, hdr, 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
1875         }
1876
1877         r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
1878         if (r) {
1879                 log_dbg(cd, "Failed to assign hot reencryption backup segment.");
1880                 return r;
1881         }
1882         r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
1883         if (r) {
1884                 log_dbg(cd, "Failed to assign post reencryption backup segment.");
1885                 return r;
1886         }
1887
1888         return commit ? LUKS2_hdr_write(cd, hdr) : 0;
1889 }
1890
1891 static int reencrypt_set_encrypt_segments(struct crypt_device *cd, struct luks2_hdr *hdr,
1892                                           uint64_t dev_size, uint64_t data_shift, bool move_first_segment,
1893                                           crypt_reencrypt_direction_info di)
1894 {
1895         int r;
1896         uint64_t first_segment_offset, first_segment_length,
1897                  second_segment_offset, second_segment_length,
1898                  data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT,
1899                  data_size = dev_size - data_shift;
1900         json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
1901
1902         if (dev_size < data_shift)
1903                 return -EINVAL;
1904
1905         if (data_shift && (di == CRYPT_REENCRYPT_FORWARD))
1906                 return -ENOTSUP;
1907
1908         if (move_first_segment) {
1909                 /*
1910                  * future data_device layout:
1911                  * [future LUKS2 header (data shift size)][second data segment][gap (data shift size)][first data segment (data shift size)]
1912                  */
1913                 first_segment_offset = dev_size;
1914                 if (data_size < data_shift) {
1915                         first_segment_length = data_size;
1916                         second_segment_length = second_segment_offset = 0;
1917                 } else {
1918                         first_segment_length = data_shift;
1919                         second_segment_offset = data_shift;
1920                         second_segment_length = data_size - data_shift;
1921                 }
1922         } else if (data_shift) {
1923                 first_segment_offset = data_offset;
1924                 first_segment_length = dev_size;
1925         } else {
1926                 /* future data_device layout with detached header: [first data segment] */
1927                 first_segment_offset = data_offset;
1928                 first_segment_length = 0; /* dynamic */
1929         }
1930
1931         jobj_segments = json_object_new_object();
1932         if (!jobj_segments)
1933                 return -ENOMEM;
1934
1935         r = -EINVAL;
1936         if (move_first_segment) {
1937                 jobj_segment_first =  json_segment_create_linear(first_segment_offset, &first_segment_length, 0);
1938                 if (second_segment_length &&
1939                     !(jobj_segment_second = json_segment_create_linear(second_segment_offset, &second_segment_length, 0))) {
1940                         log_dbg(cd, "Failed generate 2nd segment.");
1941                         return r;
1942                 }
1943         } else
1944                 jobj_segment_first =  json_segment_create_linear(first_segment_offset, first_segment_length ? &first_segment_length : NULL, 0);
1945
1946         if (!jobj_segment_first) {
1947                 log_dbg(cd, "Failed generate 1st segment.");
1948                 return r;
1949         }
1950
1951         json_object_object_add(jobj_segments, "0", jobj_segment_first);
1952         if (jobj_segment_second)
1953                 json_object_object_add(jobj_segments, "1", jobj_segment_second);
1954
1955         r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0);
1956
1957         return r ?: LUKS2_segments_set(cd, hdr, jobj_segments, 0);
1958 }
1959
1960 static int reencrypt_set_decrypt_shift_segments(struct crypt_device *cd,
1961         struct luks2_hdr *hdr,
1962         uint64_t dev_size,
1963         uint64_t moved_segment_length,
1964         crypt_reencrypt_direction_info di)
1965 {
1966         int r;
1967         uint64_t first_segment_offset, first_segment_length,
1968                  second_segment_offset, second_segment_length,
1969                  data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
1970         json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
1971
1972         if (di == CRYPT_REENCRYPT_BACKWARD)
1973                 return -ENOTSUP;
1974
1975         /*
1976          * future data_device layout:
1977          * [encrypted first segment (max data shift size)][gap (data shift size)][second encrypted data segment]
1978          */
1979         first_segment_offset = 0;
1980         first_segment_length = moved_segment_length;
1981         if (dev_size > moved_segment_length) {
1982                 second_segment_offset = data_offset + first_segment_length;
1983                 second_segment_length = 0;
1984         }
1985
1986         jobj_segments = json_object_new_object();
1987         if (!jobj_segments)
1988                 return -ENOMEM;
1989
1990         r = -EINVAL;
1991         jobj_segment_first = json_segment_create_crypt(first_segment_offset,
1992                                 crypt_get_iv_offset(cd), &first_segment_length,
1993                                 crypt_get_cipher_spec(cd), crypt_get_sector_size(cd), 0);
1994
1995         if (!jobj_segment_first) {
1996                 log_dbg(cd, "Failed generate 1st segment.");
1997                 return r;
1998         }
1999
2000         if (dev_size > moved_segment_length) {
2001                 jobj_segment_second = json_segment_create_crypt(second_segment_offset,
2002                                                                 crypt_get_iv_offset(cd) + (first_segment_length >> SECTOR_SHIFT),
2003                                                                 second_segment_length ? &second_segment_length : NULL,
2004                                                                 crypt_get_cipher_spec(cd),
2005                                                                 crypt_get_sector_size(cd), 0);
2006                 if (!jobj_segment_second) {
2007                         json_object_put(jobj_segment_first);
2008                         log_dbg(cd, "Failed generate 2nd segment.");
2009                         return r;
2010                 }
2011         }
2012
2013         json_object_object_add(jobj_segments, "0", jobj_segment_first);
2014         if (jobj_segment_second)
2015                 json_object_object_add(jobj_segments, "1", jobj_segment_second);
2016
2017         r = LUKS2_segments_set(cd, hdr, jobj_segments, 0);
2018
2019         return r ?: LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, 0, 1, 0);
2020 }
2021
2022 static int reencrypt_make_targets(struct crypt_device *cd,
2023                                 struct luks2_hdr *hdr,
2024                                 struct device *hz_device,
2025                                 struct volume_key *vks,
2026                                 struct dm_target *result,
2027                                 uint64_t size)
2028 {
2029         bool reenc_seg;
2030         struct volume_key *vk;
2031         uint64_t segment_size, segment_offset, segment_start = 0;
2032         int r;
2033         int s = 0;
2034         json_object *jobj, *jobj_segments = LUKS2_get_segments_jobj(hdr);
2035
2036         while (result) {
2037                 jobj = json_segments_get_segment(jobj_segments, s);
2038                 if (!jobj) {
2039                         log_dbg(cd, "Internal error. Segment %u is null.", s);
2040                         return -EINVAL;
2041                 }
2042
2043                 reenc_seg = (s == json_segments_segment_in_reencrypt(jobj_segments));
2044
2045                 segment_offset = json_segment_get_offset(jobj, 1);
2046                 segment_size = json_segment_get_size(jobj, 1);
2047                 /* 'dynamic' length allowed in last segment only */
2048                 if (!segment_size && !result->next)
2049                         segment_size = (size >> SECTOR_SHIFT) - segment_start;
2050                 if (!segment_size) {
2051                         log_dbg(cd, "Internal error. Wrong segment size %u", s);
2052                         return -EINVAL;
2053                 }
2054
2055                 if (reenc_seg)
2056                         segment_offset -= crypt_get_data_offset(cd);
2057
2058                 if (!strcmp(json_segment_type(jobj), "crypt")) {
2059                         vk = crypt_volume_key_by_id(vks, reenc_seg ? LUKS2_reencrypt_digest_new(hdr) : LUKS2_digest_by_segment(hdr, s));
2060                         if (!vk) {
2061                                 log_err(cd, _("Missing key for dm-crypt segment %u"), s);
2062                                 return -EINVAL;
2063                         }
2064
2065                         r = dm_crypt_target_set(result, segment_start, segment_size,
2066                                                 reenc_seg ? hz_device : crypt_data_device(cd),
2067                                                 vk,
2068                                                 json_segment_get_cipher(jobj),
2069                                                 json_segment_get_iv_offset(jobj),
2070                                                 segment_offset,
2071                                                 "none",
2072                                                 0,
2073                                                 json_segment_get_sector_size(jobj));
2074                         if (r) {
2075                                 log_err(cd, _("Failed to set dm-crypt segment."));
2076                                 return r;
2077                         }
2078                 } else if (!strcmp(json_segment_type(jobj), "linear")) {
2079                         r = dm_linear_target_set(result, segment_start, segment_size, reenc_seg ? hz_device : crypt_data_device(cd), segment_offset);
2080                         if (r) {
2081                                 log_err(cd, _("Failed to set dm-linear segment."));
2082                                 return r;
2083                         }
2084                 } else
2085                         return EINVAL;
2086
2087                 segment_start += segment_size;
2088                 s++;
2089                 result = result->next;
2090         }
2091
2092         return s;
2093 }
2094
2095 /* GLOBAL FIXME: audit function names and parameters names */
2096
2097 /* FIXME:
2098  *      1) audit log routines
2099  *      2) can't we derive hotzone device name from crypt context? (unlocked name, device uuid, etc?)
2100  */
2101 static int reencrypt_load_overlay_device(struct crypt_device *cd, struct luks2_hdr *hdr,
2102         const char *overlay, const char *hotzone, struct volume_key *vks, uint64_t size,
2103         uint32_t flags)
2104 {
2105         char hz_path[PATH_MAX];
2106         int r;
2107
2108         struct device *hz_dev = NULL;
2109         struct crypt_dm_active_device dmd = {
2110                 .flags = flags,
2111         };
2112
2113         log_dbg(cd, "Loading new table for overlay device %s.", overlay);
2114
2115         r = snprintf(hz_path, PATH_MAX, "%s/%s", dm_get_dir(), hotzone);
2116         if (r < 0 || r >= PATH_MAX) {
2117                 r = -EINVAL;
2118                 goto out;
2119         }
2120
2121         r = device_alloc(cd, &hz_dev, hz_path);
2122         if (r)
2123                 goto out;
2124
2125         r = dm_targets_allocate(&dmd.segment, LUKS2_segments_count(hdr));
2126         if (r)
2127                 goto out;
2128
2129         r = reencrypt_make_targets(cd, hdr, hz_dev, vks, &dmd.segment, size);
2130         if (r < 0)
2131                 goto out;
2132
2133         r = dm_reload_device(cd, overlay, &dmd, 0, 0);
2134
2135         /* what else on error here ? */
2136 out:
2137         dm_targets_free(cd, &dmd);
2138         device_free(cd, hz_dev);
2139
2140         return r;
2141 }
2142
2143 static int reencrypt_replace_device(struct crypt_device *cd, const char *target, const char *source, uint32_t flags)
2144 {
2145         int r, exists = 1;
2146         struct crypt_dm_active_device dmd_source, dmd_target = {};
2147         uint32_t dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH;
2148
2149         log_dbg(cd, "Replacing table in device %s with table from device %s.", target, source);
2150
2151         /* check only whether target device exists */
2152         r = dm_status_device(cd, target);
2153         if (r < 0) {
2154                 if (r == -ENODEV)
2155                         exists = 0;
2156                 else
2157                         return r;
2158         }
2159
2160         r = dm_query_device(cd, source, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
2161                             DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY, &dmd_source);
2162
2163         if (r < 0)
2164                 return r;
2165
2166         if (exists && ((r = dm_query_device(cd, target, 0, &dmd_target)) < 0))
2167                 goto out;
2168
2169         dmd_source.flags |= flags;
2170         dmd_source.uuid = crypt_get_uuid(cd);
2171
2172         if (exists) {
2173                 if (dmd_target.size != dmd_source.size) {
2174                         log_err(cd, _("Source and target device sizes don't match. Source %" PRIu64 ", target: %" PRIu64 "."),
2175                                 dmd_source.size, dmd_target.size);
2176                         r = -EINVAL;
2177                         goto out;
2178                 }
2179                 r = dm_reload_device(cd, target, &dmd_source, 0, 0);
2180                 if (!r) {
2181                         log_dbg(cd, "Resuming device %s", target);
2182                         r = dm_resume_device(cd, target, dmflags | act2dmflags(dmd_source.flags));
2183                 }
2184         } else
2185                 r = dm_create_device(cd, target, CRYPT_SUBDEV, &dmd_source);
2186 out:
2187         dm_targets_free(cd, &dmd_source);
2188         dm_targets_free(cd, &dmd_target);
2189
2190         return r;
2191 }
2192
2193 static int reencrypt_swap_backing_device(struct crypt_device *cd, const char *name,
2194                               const char *new_backend_name)
2195 {
2196         int r;
2197         struct device *overlay_dev = NULL;
2198         char overlay_path[PATH_MAX] = { 0 };
2199         struct crypt_dm_active_device dmd = {};
2200
2201         log_dbg(cd, "Redirecting %s mapping to new backing device: %s.", name, new_backend_name);
2202
2203         r = snprintf(overlay_path, PATH_MAX, "%s/%s", dm_get_dir(), new_backend_name);
2204         if (r < 0 || r >= PATH_MAX) {
2205                 r = -EINVAL;
2206                 goto out;
2207         }
2208
2209         r = device_alloc(cd, &overlay_dev, overlay_path);
2210         if (r)
2211                 goto out;
2212
2213         r = device_block_adjust(cd, overlay_dev, DEV_OK,
2214                                 0, &dmd.size, &dmd.flags);
2215         if (r)
2216                 goto out;
2217
2218         r = dm_linear_target_set(&dmd.segment, 0, dmd.size, overlay_dev, 0);
2219         if (r)
2220                 goto out;
2221
2222         r = dm_reload_device(cd, name, &dmd, 0, 0);
2223         if (!r) {
2224                 log_dbg(cd, "Resuming device %s", name);
2225                 r = dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2226         }
2227
2228 out:
2229         dm_targets_free(cd, &dmd);
2230         device_free(cd, overlay_dev);
2231
2232         return r;
2233 }
2234
2235 static int reencrypt_activate_hotzone_device(struct crypt_device *cd, const char *name, uint64_t device_size, uint32_t flags)
2236 {
2237         int r;
2238         uint64_t new_offset = reencrypt_get_data_offset_new(crypt_get_hdr(cd, CRYPT_LUKS2)) >> SECTOR_SHIFT;
2239
2240         struct crypt_dm_active_device dmd = {
2241                 .flags = flags,
2242                 .uuid = crypt_get_uuid(cd),
2243                 .size = device_size >> SECTOR_SHIFT
2244         };
2245
2246         log_dbg(cd, "Activating hotzone device %s.", name);
2247
2248         r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
2249                                 new_offset, &dmd.size, &dmd.flags);
2250         if (r)
2251                 goto out;
2252
2253         r = dm_linear_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), new_offset);
2254         if (r)
2255                 goto out;
2256
2257         r = dm_create_device(cd, name, CRYPT_SUBDEV, &dmd);
2258 out:
2259         dm_targets_free(cd, &dmd);
2260
2261         return r;
2262 }
2263
2264 static int reencrypt_init_device_stack(struct crypt_device *cd,
2265                                      const struct luks2_reencrypt *rh)
2266 {
2267         int r;
2268
2269         /* Activate hotzone device 1:1 linear mapping to data_device */
2270         r = reencrypt_activate_hotzone_device(cd, rh->hotzone_name, rh->device_size, CRYPT_ACTIVATE_PRIVATE);
2271         if (r) {
2272                 log_err(cd, _("Failed to activate hotzone device %s."), rh->hotzone_name);
2273                 return r;
2274         }
2275
2276         /*
2277          * Activate overlay device with exactly same table as original 'name' mapping.
2278          * Note that within this step the 'name' device may already include a table
2279          * constructed from more than single dm-crypt segment. Therefore transfer
2280          * mapping as is.
2281          *
2282          * If we're about to resume reencryption orig mapping has to be already validated for
2283          * abrupt shutdown and rchunk_offset has to point on next chunk to reencrypt!
2284          *
2285          * TODO: in crypt_activate_by*
2286          */
2287         r = reencrypt_replace_device(cd, rh->overlay_name, rh->device_name, CRYPT_ACTIVATE_PRIVATE);
2288         if (r) {
2289                 log_err(cd, _("Failed to activate overlay device %s with actual origin table."), rh->overlay_name);
2290                 goto err;
2291         }
2292
2293         /* swap origin mapping to overlay device */
2294         r = reencrypt_swap_backing_device(cd, rh->device_name, rh->overlay_name);
2295         if (r) {
2296                 log_err(cd, _("Failed to load new mapping for device %s."), rh->device_name);
2297                 goto err;
2298         }
2299
2300         /*
2301          * Now the 'name' (unlocked luks) device is mapped via dm-linear to an overlay dev.
2302          * The overlay device has a original live table of 'name' device in-before the swap.
2303          */
2304
2305         return 0;
2306 err:
2307         /* TODO: force error helper devices on error path */
2308         dm_remove_device(cd, rh->overlay_name, 0);
2309         dm_remove_device(cd, rh->hotzone_name, 0);
2310
2311         return r;
2312 }
2313
2314 /* TODO:
2315  *      1) audit error path. any error in this routine is fatal and should be unlikely.
2316  *         usually it would hint some collision with another userspace process touching
2317  *         dm devices directly.
2318  */
2319 static int reenc_refresh_helper_devices(struct crypt_device *cd, const char *overlay, const char *hotzone)
2320 {
2321         int r;
2322
2323         /*
2324          * we have to explicitly suspend the overlay device before suspending
2325          * the hotzone one. Resuming overlay device (aka switching tables) only
2326          * after suspending the hotzone may lead to deadlock.
2327          *
2328          * In other words: always suspend the stack from top to bottom!
2329          */
2330         r = dm_suspend_device(cd, overlay, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2331         if (r) {
2332                 log_err(cd, _("Failed to suspend device %s."), overlay);
2333                 return r;
2334         }
2335
2336         /* suspend HZ device */
2337         r = dm_suspend_device(cd, hotzone, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2338         if (r) {
2339                 log_err(cd, _("Failed to suspend device %s."), hotzone);
2340                 return r;
2341         }
2342
2343         /* resume overlay device: inactive table (with hotozne) -> live */
2344         r = dm_resume_device(cd, overlay, DM_RESUME_PRIVATE);
2345         if (r)
2346                 log_err(cd, _("Failed to resume device %s."), overlay);
2347
2348         return r;
2349 }
2350
2351 static int reencrypt_refresh_overlay_devices(struct crypt_device *cd,
2352                 struct luks2_hdr *hdr,
2353                 const char *overlay,
2354                 const char *hotzone,
2355                 struct volume_key *vks,
2356                 uint64_t device_size,
2357                 uint32_t flags)
2358 {
2359         int r = reencrypt_load_overlay_device(cd, hdr, overlay, hotzone, vks, device_size, flags);
2360         if (r) {
2361                 log_err(cd, _("Failed to reload device %s."), overlay);
2362                 return REENC_ERR;
2363         }
2364
2365         r = reenc_refresh_helper_devices(cd, overlay, hotzone);
2366         if (r) {
2367                 log_err(cd, _("Failed to refresh reencryption devices stack."));
2368                 return REENC_ROLLBACK;
2369         }
2370
2371         return REENC_OK;
2372 }
2373
2374 static int reencrypt_move_data(struct crypt_device *cd,
2375         int devfd,
2376         uint64_t data_shift,
2377         crypt_reencrypt_mode_info mode)
2378 {
2379         void *buffer;
2380         int r;
2381         ssize_t ret;
2382         uint64_t buffer_len, offset,
2383                  read_offset = (mode == CRYPT_REENCRYPT_ENCRYPT ? 0 : data_shift);
2384         struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
2385
2386         offset = json_segment_get_offset(LUKS2_get_segment_jobj(hdr, 0), 0);
2387         buffer_len = json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0);
2388         if (!buffer_len || buffer_len > data_shift)
2389                 return -EINVAL;
2390
2391         if (posix_memalign(&buffer, device_alignment(crypt_data_device(cd)), buffer_len))
2392                 return -ENOMEM;
2393
2394         ret = read_lseek_blockwise(devfd,
2395                         device_block_size(cd, crypt_data_device(cd)),
2396                         device_alignment(crypt_data_device(cd)),
2397                         buffer, buffer_len, read_offset);
2398         if (ret < 0 || (uint64_t)ret != buffer_len) {
2399                 log_dbg(cd, "Failed to read data at offset %" PRIu64 " (size: %zu)",
2400                         read_offset, buffer_len);
2401                 r = -EIO;
2402                 goto out;
2403         }
2404
2405         log_dbg(cd, "Going to write %" PRIu64 " bytes read at offset %" PRIu64 " to new offset %" PRIu64,
2406                 buffer_len, read_offset, offset);
2407         ret = write_lseek_blockwise(devfd,
2408                         device_block_size(cd, crypt_data_device(cd)),
2409                         device_alignment(crypt_data_device(cd)),
2410                         buffer, buffer_len, offset);
2411         if (ret < 0 || (uint64_t)ret != buffer_len) {
2412                 log_dbg(cd, "Failed to write data at offset %" PRIu64 " (size: %zu)",
2413                         offset, buffer_len);
2414                 r = -EIO;
2415                 goto out;
2416         }
2417
2418         r = 0;
2419 out:
2420         crypt_safe_memzero(buffer, buffer_len);
2421         free(buffer);
2422         return r;
2423 }
2424
2425 static int reencrypt_make_backup_segments(struct crypt_device *cd,
2426                 struct luks2_hdr *hdr,
2427                 int keyslot_new,
2428                 const char *cipher,
2429                 uint64_t data_offset,
2430                 const struct crypt_params_reencrypt *params)
2431 {
2432         int r, segment, moved_segment = -1, digest_old = -1, digest_new = -1;
2433         json_object *jobj_tmp, *jobj_segment_new = NULL, *jobj_segment_old = NULL, *jobj_segment_bcp = NULL;
2434         uint32_t sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
2435         uint64_t segment_offset, tmp, data_shift = params->data_shift << SECTOR_SHIFT,
2436                  device_size = params->device_size << SECTOR_SHIFT;
2437
2438         if (params->mode != CRYPT_REENCRYPT_DECRYPT) {
2439                 digest_new = LUKS2_digest_by_keyslot(hdr, keyslot_new);
2440                 if (digest_new < 0)
2441                         return -EINVAL;
2442         }
2443
2444         if (params->mode != CRYPT_REENCRYPT_ENCRYPT) {
2445                 digest_old = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT);
2446                 if (digest_old < 0)
2447                         return -EINVAL;
2448         }
2449
2450         segment = LUKS2_segment_first_unused_id(hdr);
2451         if (segment < 0)
2452                 return -EINVAL;
2453
2454         if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) {
2455                 if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_segment_bcp)) {
2456                         r = -EINVAL;
2457                         goto err;
2458                 }
2459                 r = LUKS2_segment_set_flag(jobj_segment_bcp, "backup-moved-segment");
2460                 if (r)
2461                         goto err;
2462                 moved_segment = segment++;
2463                 json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), moved_segment, jobj_segment_bcp);
2464                 if (!strcmp(json_segment_type(jobj_segment_bcp), "crypt"))
2465                         LUKS2_digest_segment_assign(cd, hdr, moved_segment, digest_old, 1, 0);
2466         }
2467
2468         /* FIXME: Add detection for case (digest old == digest new && old segment == new segment) */
2469         if (digest_old >= 0) {
2470                 if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) {
2471                         jobj_tmp = LUKS2_get_segment_jobj(hdr, 0);
2472                         if (!jobj_tmp) {
2473                                 r = -EINVAL;
2474                                 goto err;
2475                         }
2476
2477                         jobj_segment_old = json_segment_create_crypt(data_offset,
2478                                                 json_segment_get_iv_offset(jobj_tmp),
2479                                                 device_size ? &device_size : NULL,
2480                                                 json_segment_get_cipher(jobj_tmp),
2481                                                 json_segment_get_sector_size(jobj_tmp),
2482                                                 0);
2483                 } else {
2484                         if (json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old)) {
2485                                 r = -EINVAL;
2486                                 goto err;
2487                         }
2488                 }
2489         } else if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
2490                 r = LUKS2_get_data_size(hdr, &tmp, NULL);
2491                 if (r)
2492                         goto err;
2493
2494                 if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT)
2495                         jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0);
2496                 else
2497                         jobj_segment_old = json_segment_create_linear(data_offset, tmp ? &tmp : NULL, 0);
2498         }
2499
2500         if (!jobj_segment_old) {
2501                 r = -EINVAL;
2502                 goto err;
2503         }
2504
2505         r = LUKS2_segment_set_flag(jobj_segment_old, "backup-previous");
2506         if (r)
2507                 goto err;
2508         json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_old);
2509         jobj_segment_old = NULL;
2510         if (digest_old >= 0)
2511                 LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0);
2512         segment++;
2513
2514         if (digest_new >= 0) {
2515                 segment_offset = data_offset;
2516                 if (params->mode != CRYPT_REENCRYPT_ENCRYPT &&
2517                     modify_offset(&segment_offset, data_shift, params->direction)) {
2518                         r = -EINVAL;
2519                         goto err;
2520                 }
2521                 jobj_segment_new = json_segment_create_crypt(segment_offset,
2522                                                         crypt_get_iv_offset(cd),
2523                                                         NULL, cipher, sector_size, 0);
2524         } else if (params->mode == CRYPT_REENCRYPT_DECRYPT) {
2525                 segment_offset = data_offset;
2526                 if (modify_offset(&segment_offset, data_shift, params->direction)) {
2527                         r = -EINVAL;
2528                         goto err;
2529                 }
2530                 jobj_segment_new = json_segment_create_linear(segment_offset, NULL, 0);
2531         }
2532
2533         if (!jobj_segment_new) {
2534                 r = -EINVAL;
2535                 goto err;
2536         }
2537
2538         r = LUKS2_segment_set_flag(jobj_segment_new, "backup-final");
2539         if (r)
2540                 goto err;
2541         json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_new);
2542         jobj_segment_new = NULL;
2543         if (digest_new >= 0)
2544                 LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0);
2545
2546         /* FIXME: also check occupied space by keyslot in shrunk area */
2547         if (params->direction == CRYPT_REENCRYPT_FORWARD && data_shift &&
2548             crypt_metadata_device(cd) == crypt_data_device(cd) &&
2549             LUKS2_set_keyslots_size(hdr, json_segment_get_offset(reencrypt_segment_new(hdr), 0))) {
2550                 log_err(cd, _("Failed to set new keyslots area size."));
2551                 r = -EINVAL;
2552                 goto err;
2553         }
2554
2555         return 0;
2556 err:
2557         json_object_put(jobj_segment_new);
2558         json_object_put(jobj_segment_old);
2559         return r;
2560 }
2561
2562 static int reencrypt_verify_single_key(struct crypt_device *cd, int digest, struct volume_key *vks)
2563 {
2564         struct volume_key *vk;
2565
2566         vk = crypt_volume_key_by_id(vks, digest);
2567         if (!vk)
2568                 return -ENOENT;
2569
2570         if (LUKS2_digest_verify_by_digest(cd, digest, vk) != digest)
2571                 return -EINVAL;
2572
2573         return 0;
2574 }
2575
2576 static int reencrypt_verify_keys(struct crypt_device *cd,
2577         int digest_old,
2578         int digest_new,
2579         struct volume_key *vks)
2580 {
2581         int r;
2582
2583         if (digest_new >= 0 && (r = reencrypt_verify_single_key(cd, digest_new, vks)))
2584                 return r;
2585
2586         if (digest_old >= 0 && (r = reencrypt_verify_single_key(cd, digest_old, vks)))
2587                 return r;
2588
2589         return 0;
2590 }
2591
2592 static int reencrypt_upload_single_key(struct crypt_device *cd,
2593         struct luks2_hdr *hdr,
2594         int digest,
2595         struct volume_key *vks)
2596 {
2597         struct volume_key *vk;
2598
2599         vk = crypt_volume_key_by_id(vks, digest);
2600         if (!vk)
2601                 return -EINVAL;
2602
2603         return LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, digest);
2604 }
2605
2606 static int reencrypt_upload_keys(struct crypt_device *cd,
2607         struct luks2_hdr *hdr,
2608         int digest_old,
2609         int digest_new,
2610         struct volume_key *vks)
2611 {
2612         int r;
2613
2614         if (!crypt_use_keyring_for_vk(cd))
2615                 return 0;
2616
2617         if (digest_new >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_new(hdr)) &&
2618             (r = reencrypt_upload_single_key(cd, hdr, digest_new, vks)))
2619                 return r;
2620
2621         if (digest_old >= 0 && !crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr)) &&
2622             (r = reencrypt_upload_single_key(cd, hdr, digest_old, vks))) {
2623                 crypt_drop_keyring_key(cd, vks);
2624                 return r;
2625         }
2626
2627         return 0;
2628 }
2629
2630 static int reencrypt_verify_and_upload_keys(struct crypt_device *cd,
2631         struct luks2_hdr *hdr,
2632         int digest_old,
2633         int digest_new,
2634         struct volume_key *vks)
2635 {
2636         int r;
2637
2638         r = reencrypt_verify_keys(cd, digest_old, digest_new, vks);
2639         if (r)
2640                 return r;
2641
2642         r = reencrypt_upload_keys(cd, hdr, digest_old, digest_new, vks);
2643         if (r)
2644                 return r;
2645
2646         return 0;
2647 }
2648
2649 static int reencrypt_verify_checksum_params(struct crypt_device *cd,
2650                 const struct crypt_params_reencrypt *params)
2651 {
2652         size_t len;
2653         struct crypt_hash *ch;
2654
2655         assert(params);
2656
2657         if (!params->hash)
2658                 return -EINVAL;
2659
2660         len = strlen(params->hash);
2661         if (!len || len > (LUKS2_CHECKSUM_ALG_L - 1))
2662                 return -EINVAL;
2663
2664         if (crypt_hash_size(params->hash) <= 0)
2665                 return -EINVAL;
2666
2667         if (crypt_hash_init(&ch, params->hash)) {
2668                 log_err(cd, _("Hash algorithm %s is not available."), params->hash);
2669                 return -EINVAL;
2670         }
2671         /* We just check for alg availability */
2672         crypt_hash_destroy(ch);
2673
2674         return 0;
2675 }
2676
2677 static int reencrypt_verify_datashift_params(struct crypt_device *cd,
2678                 const struct crypt_params_reencrypt *params,
2679                 uint32_t sector_size)
2680 {
2681         assert(params);
2682
2683         if (!params->data_shift)
2684                 return -EINVAL;
2685         if (MISALIGNED(params->data_shift, sector_size >> SECTOR_SHIFT)) {
2686                 log_err(cd, _("Data shift value is not aligned to encryption sector size (%" PRIu32 " bytes)."),
2687                         sector_size);
2688                 return -EINVAL;
2689         }
2690
2691         return 0;
2692 }
2693
2694 static int reencrypt_verify_resilience_params(struct crypt_device *cd,
2695                 const struct crypt_params_reencrypt *params,
2696                 uint32_t sector_size, bool move_first_segment)
2697 {
2698         /* no change requested */
2699         if (!params || !params->resilience)
2700                 return 0;
2701
2702         if (!strcmp(params->resilience, "journal"))
2703                 return (params->data_shift || move_first_segment) ? -EINVAL : 0;
2704         else if (!strcmp(params->resilience, "none"))
2705                 return (params->data_shift || move_first_segment) ? -EINVAL : 0;
2706         else if (!strcmp(params->resilience, "datashift"))
2707                 return reencrypt_verify_datashift_params(cd, params, sector_size);
2708         else if (!strcmp(params->resilience, "checksum")) {
2709                 if (params->data_shift || move_first_segment)
2710                         return -EINVAL;
2711                 return reencrypt_verify_checksum_params(cd, params);
2712         } else if (!strcmp(params->resilience, "datashift-checksum")) {
2713                 if (!move_first_segment ||
2714                      reencrypt_verify_datashift_params(cd, params, sector_size))
2715                         return -EINVAL;
2716                 return reencrypt_verify_checksum_params(cd, params);
2717         } else if (!strcmp(params->resilience, "datashift-journal")) {
2718                 if (!move_first_segment)
2719                         return -EINVAL;
2720                 return reencrypt_verify_datashift_params(cd, params, sector_size);
2721         }
2722
2723         log_err(cd, _("Unsupported resilience mode %s"), params->resilience);
2724         return -EINVAL;
2725 }
2726
2727 static int reencrypt_decrypt_with_datashift_init(struct crypt_device *cd,
2728                 const char *name,
2729                 struct luks2_hdr *hdr,
2730                 int reencrypt_keyslot,
2731                 uint32_t sector_size,
2732                 uint64_t data_size,
2733                 uint64_t data_offset,
2734                 const char *passphrase,
2735                 size_t passphrase_size,
2736                 int keyslot_old,
2737                 const struct crypt_params_reencrypt *params,
2738                 struct volume_key **vks)
2739 {
2740         bool clear_table = false;
2741         int r, devfd = -1;
2742         uint64_t data_shift, max_moved_segment_length, moved_segment_length;
2743         struct reenc_protection check_rp = {};
2744         struct crypt_dm_active_device dmd_target, dmd_source = {
2745                 .uuid = crypt_get_uuid(cd),
2746                 .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
2747         };
2748         json_object *jobj_segments_old;
2749
2750         assert(hdr);
2751         assert(params);
2752         assert(params->resilience);
2753         assert(params->data_shift);
2754         assert(vks);
2755
2756         if (!data_offset)
2757                 return -EINVAL;
2758
2759         if (params->max_hotzone_size > params->data_shift) {
2760                 log_err(cd, _("Moved segment size can not be greater than data shift value."));
2761                 return -EINVAL;
2762         }
2763
2764         log_dbg(cd, "Initializing decryption with datashift.");
2765
2766         data_shift = params->data_shift << SECTOR_SHIFT;
2767
2768         /*
2769          * In offline mode we must perform data move with exclusively opened data
2770          * device in order to exclude LUKS2 decryption process and filesystem mount.
2771          */
2772         if (name)
2773                 devfd = device_open(cd, crypt_data_device(cd), O_RDWR);
2774         else
2775                 devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
2776         if (devfd < 0)
2777                 return -EINVAL;
2778
2779         /* in-memory only */
2780         moved_segment_length = params->max_hotzone_size << SECTOR_SHIFT;
2781         if (!moved_segment_length)
2782                 moved_segment_length = data_shift < LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH ?
2783                                        data_shift : LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
2784
2785         if (moved_segment_length > data_size)
2786                 moved_segment_length = data_size;
2787
2788         r = reencrypt_set_decrypt_shift_segments(cd, hdr, data_size,
2789                                                  moved_segment_length,
2790                                                  params->direction);
2791         if (r)
2792                 goto out;
2793
2794         r = reencrypt_make_backup_segments(cd, hdr, CRYPT_ANY_SLOT, NULL, data_offset, params);
2795         if (r) {
2796                 log_dbg(cd, "Failed to create reencryption backup device segments.");
2797                 goto out;
2798         }
2799
2800         r = reencrypt_verify_resilience_params(cd, params, sector_size, true);
2801         if (r < 0) {
2802                 log_err(cd, _("Invalid reencryption resilience parameters."));
2803                 goto out;
2804         }
2805
2806         r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot,
2807                                            params, reencrypt_get_alignment(cd, hdr));
2808         if (r < 0)
2809                 goto out;
2810
2811         r = LUKS2_keyslot_reencrypt_load(cd, hdr, reencrypt_keyslot, &check_rp, false);
2812         if (r < 0)
2813                 goto out;
2814
2815         r = LUKS2_reencrypt_max_hotzone_size(cd, hdr, &check_rp,
2816                                              reencrypt_keyslot,
2817                                              &max_moved_segment_length);
2818         if (r < 0)
2819                 goto out;
2820
2821         LUKS2_reencrypt_protection_erase(&check_rp);
2822
2823         if (moved_segment_length > max_moved_segment_length) {
2824                 log_err(cd, _("Moved segment too large. Requested size %" PRIu64 ", available space for: %" PRIu64 "."),
2825                         moved_segment_length, max_moved_segment_length);
2826                 r = -EINVAL;
2827                 goto out;
2828         }
2829
2830         r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, CRYPT_ANY_SLOT,
2831                                             passphrase, passphrase_size, vks);
2832         if (r < 0)
2833                 goto out;
2834
2835         r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, LUKS2_DECRYPT_DATASHIFT_REQ_VERSION, *vks);
2836         if (r < 0)
2837                 goto out;
2838
2839         if (name) {
2840                 r = reencrypt_verify_and_upload_keys(cd, hdr,
2841                                                      LUKS2_reencrypt_digest_old(hdr),
2842                                                      LUKS2_reencrypt_digest_new(hdr),
2843                                                      *vks);
2844                 if (r)
2845                         goto out;
2846
2847                 r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
2848                                     DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
2849                                     DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
2850                 if (r < 0)
2851                         goto out;
2852
2853                 jobj_segments_old = reencrypt_segments_old(hdr);
2854                 if (!jobj_segments_old) {
2855                         r = -EINVAL;
2856                         goto out;
2857                 }
2858                 r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, jobj_segments_old, &dmd_source);
2859                 if (!r) {
2860                         r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
2861                         if (r)
2862                                 log_err(cd, _("Mismatching parameters on device %s."), name);
2863                 }
2864                 json_object_put(jobj_segments_old);
2865
2866                 dm_targets_free(cd, &dmd_source);
2867                 dm_targets_free(cd, &dmd_target);
2868                 free(CONST_CAST(void*)dmd_target.uuid);
2869
2870                 if (r)
2871                         goto out;
2872
2873                 dmd_source.size = dmd_target.size;
2874                 r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
2875                 if (!r) {
2876                         r = dm_reload_device(cd, name, &dmd_source, dmd_target.flags, 0);
2877                         if (r)
2878                                 log_err(cd, _("Failed to reload device %s."), name);
2879                         else
2880                                 clear_table = true;
2881                 }
2882
2883                 dm_targets_free(cd, &dmd_source);
2884
2885                 if (r)
2886                         goto out;
2887         }
2888
2889         if (name) {
2890                 r = dm_suspend_device(cd, name, DM_SUSPEND_SKIP_LOCKFS);
2891                 if (r) {
2892                         log_err(cd, _("Failed to suspend device %s."), name);
2893                         goto out;
2894                 }
2895         }
2896
2897         if (reencrypt_move_data(cd, devfd, data_shift, params->mode)) {
2898                 r = -EIO;
2899                 goto out;
2900         }
2901
2902         /* This must be first and only write in LUKS2 metadata during _reencrypt_init */
2903         r = reencrypt_update_flag(cd, LUKS2_DECRYPT_DATASHIFT_REQ_VERSION, true, true);
2904         if (r) {
2905                 log_dbg(cd, "Failed to set online-reencryption requirement.");
2906                 r = -EINVAL;
2907         } else
2908                 r = reencrypt_keyslot;
2909 out:
2910         if (r < 0 && clear_table && dm_clear_device(cd, name))
2911                 log_err(cd, _("Failed to clear table."));
2912         else if (clear_table && dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS))
2913                 log_err(cd, _("Failed to resume device %s."), name);
2914
2915         device_release_excl(cd, crypt_data_device(cd));
2916         if (r < 0 && LUKS2_hdr_rollback(cd, hdr) < 0)
2917                 log_dbg(cd, "Failed to rollback LUKS2 metadata after failure.");
2918
2919         return r;
2920 }
2921
2922 /* This function must be called with metadata lock held */
2923 static int reencrypt_init(struct crypt_device *cd,
2924                 const char *name,
2925                 struct luks2_hdr *hdr,
2926                 const char *passphrase,
2927                 size_t passphrase_size,
2928                 int keyslot_old,
2929                 int keyslot_new,
2930                 const char *cipher,
2931                 const char *cipher_mode,
2932                 const struct crypt_params_reencrypt *params,
2933                 struct volume_key **vks)
2934 {
2935         bool move_first_segment;
2936         char _cipher[128];
2937         uint32_t check_sector_size, new_sector_size, old_sector_size;
2938         int r, reencrypt_keyslot, devfd = -1;
2939         uint64_t data_offset, data_size = 0;
2940         struct crypt_dm_active_device dmd_target, dmd_source = {
2941                 .uuid = crypt_get_uuid(cd),
2942                 .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
2943         };
2944
2945         assert(cd);
2946         assert(hdr);
2947
2948         if (!params || !params->resilience || params->mode > CRYPT_REENCRYPT_DECRYPT)
2949                 return -EINVAL;
2950
2951         if (params->mode != CRYPT_REENCRYPT_DECRYPT &&
2952             (!params->luks2 || !(cipher && cipher_mode) || keyslot_new < 0))
2953                 return -EINVAL;
2954
2955         log_dbg(cd, "Initializing reencryption (mode: %s) in LUKS2 metadata.",
2956                     crypt_reencrypt_mode_to_str(params->mode));
2957
2958         move_first_segment = (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT);
2959
2960         old_sector_size = LUKS2_get_sector_size(hdr);
2961
2962         /* implicit sector size 512 for decryption */
2963         new_sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
2964         if (new_sector_size < SECTOR_SIZE || new_sector_size > MAX_SECTOR_SIZE ||
2965             NOTPOW2(new_sector_size)) {
2966                 log_err(cd, _("Unsupported encryption sector size."));
2967                 return -EINVAL;
2968         }
2969         /* check the larger encryption sector size only */
2970         check_sector_size = new_sector_size > old_sector_size ? new_sector_size : old_sector_size;
2971
2972         if (!cipher_mode || *cipher_mode == '\0')
2973                 r = snprintf(_cipher, sizeof(_cipher), "%s", cipher);
2974         else
2975                 r = snprintf(_cipher, sizeof(_cipher), "%s-%s", cipher, cipher_mode);
2976         if (r < 0 || (size_t)r >= sizeof(_cipher))
2977                 return -EINVAL;
2978
2979         data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
2980
2981         r = device_check_access(cd, crypt_data_device(cd), DEV_OK);
2982         if (r)
2983                 return r;
2984
2985         r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
2986         if (r)
2987                 return r;
2988
2989         r = device_size(crypt_data_device(cd), &data_size);
2990         if (r)
2991                 return r;
2992
2993         data_size -= data_offset;
2994
2995         if (params->device_size) {
2996                 if ((params->device_size << SECTOR_SHIFT) > data_size) {
2997                         log_err(cd, _("Reduced data size is larger than real device size."));
2998                         return -EINVAL;
2999                 } else
3000                         data_size = params->device_size << SECTOR_SHIFT;
3001         }
3002
3003         if (MISALIGNED(data_size, check_sector_size)) {
3004                 log_err(cd, _("Data device is not aligned to encryption sector size (%" PRIu32 " bytes)."), check_sector_size);
3005                 return -EINVAL;
3006         }
3007
3008         reencrypt_keyslot = LUKS2_keyslot_find_empty(cd, hdr, 0);
3009         if (reencrypt_keyslot < 0) {
3010                 log_err(cd, _("All key slots full."));
3011                 return -EINVAL;
3012         }
3013
3014         if (params->mode == CRYPT_REENCRYPT_DECRYPT && (params->data_shift > 0) && move_first_segment)
3015                 return reencrypt_decrypt_with_datashift_init(cd, name, hdr,
3016                                                              reencrypt_keyslot,
3017                                                              check_sector_size,
3018                                                              data_size,
3019                                                              data_offset,
3020                                                              passphrase,
3021                                                              passphrase_size,
3022                                                              keyslot_old,
3023                                                              params,
3024                                                              vks);
3025
3026
3027         /*
3028          * We must perform data move with exclusive open data device
3029          * to exclude another cryptsetup process to colide with
3030          * encryption initialization (or mount)
3031          */
3032         if (move_first_segment) {
3033                 if (data_size < (params->data_shift << SECTOR_SHIFT)) {
3034                         log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
3035                         return -EINVAL;
3036                 }
3037                 if (params->data_shift < LUKS2_get_data_offset(hdr)) {
3038                         log_err(cd, _("Data shift (%" PRIu64 " sectors) is less than future data offset (%" PRIu64 " sectors)."),
3039                                 params->data_shift, LUKS2_get_data_offset(hdr));
3040                         return -EINVAL;
3041                 }
3042                 devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
3043                 if (devfd < 0) {
3044                         if (devfd == -EBUSY)
3045                                 log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."),
3046                                         device_path(crypt_data_device(cd)));
3047                         return -EINVAL;
3048                 }
3049         }
3050
3051         if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
3052                 /* in-memory only */
3053                 r = reencrypt_set_encrypt_segments(cd, hdr, data_size,
3054                                                    params->data_shift << SECTOR_SHIFT,
3055                                                    move_first_segment,
3056                                                    params->direction);
3057                 if (r)
3058                         goto out;
3059         }
3060
3061         r = reencrypt_make_backup_segments(cd, hdr, keyslot_new, _cipher, data_offset, params);
3062         if (r) {
3063                 log_dbg(cd, "Failed to create reencryption backup device segments.");
3064                 goto out;
3065         }
3066
3067         r = reencrypt_verify_resilience_params(cd, params, check_sector_size, move_first_segment);
3068         if (r < 0)
3069                 goto out;
3070
3071         r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot, params,
3072                         reencrypt_get_alignment(cd, hdr));
3073         if (r < 0)
3074                 goto out;
3075
3076         r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
3077         if (r < 0)
3078                 goto out;
3079
3080         r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, LUKS2_REENCRYPT_REQ_VERSION, *vks);
3081         if (r < 0)
3082                 goto out;
3083
3084         if (name && params->mode != CRYPT_REENCRYPT_ENCRYPT) {
3085                 r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
3086                 if (r)
3087                         goto out;
3088
3089                 r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
3090                                     DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
3091                                     DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
3092                 if (r < 0)
3093                         goto out;
3094
3095                 r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
3096                 if (!r) {
3097                         r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
3098                         if (r)
3099                                 log_err(cd, _("Mismatching parameters on device %s."), name);
3100                 }
3101
3102                 dm_targets_free(cd, &dmd_source);
3103                 dm_targets_free(cd, &dmd_target);
3104                 free(CONST_CAST(void*)dmd_target.uuid);
3105
3106                 if (r)
3107                         goto out;
3108         }
3109
3110         if (move_first_segment && reencrypt_move_data(cd, devfd, params->data_shift << SECTOR_SHIFT, params->mode)) {
3111                 r = -EIO;
3112                 goto out;
3113         }
3114
3115         /* This must be first and only write in LUKS2 metadata during _reencrypt_init */
3116         r = reencrypt_update_flag(cd, LUKS2_REENCRYPT_REQ_VERSION, true, true);
3117         if (r) {
3118                 log_dbg(cd, "Failed to set online-reencryption requirement.");
3119                 r = -EINVAL;
3120         } else
3121                 r = reencrypt_keyslot;
3122 out:
3123         device_release_excl(cd, crypt_data_device(cd));
3124         if (r < 0 && LUKS2_hdr_rollback(cd, hdr) < 0)
3125                 log_dbg(cd, "Failed to rollback LUKS2 metadata after failure.");
3126
3127         return r;
3128 }
3129
3130 static int reencrypt_hotzone_protect_final(struct crypt_device *cd,
3131         struct luks2_hdr *hdr, int reencrypt_keyslot,
3132         const struct reenc_protection *rp,
3133         const void *buffer, size_t buffer_len)
3134 {
3135         const void *pbuffer;
3136         size_t data_offset, len;
3137         int r;
3138
3139         assert(hdr);
3140         assert(rp);
3141
3142         if (rp->type == REENC_PROTECTION_NONE)
3143                 return 0;
3144
3145         if (rp->type == REENC_PROTECTION_CHECKSUM) {
3146                 log_dbg(cd, "Checksums hotzone resilience.");
3147
3148                 for (data_offset = 0, len = 0; data_offset < buffer_len; data_offset += rp->p.csum.block_size, len += rp->p.csum.hash_size) {
3149                         if (crypt_hash_write(rp->p.csum.ch, (const char *)buffer + data_offset, rp->p.csum.block_size)) {
3150                                 log_dbg(cd, "Failed to hash sector at offset %zu.", data_offset);
3151                                 return -EINVAL;
3152                         }
3153                         if (crypt_hash_final(rp->p.csum.ch, (char *)rp->p.csum.checksums + len, rp->p.csum.hash_size)) {
3154                                 log_dbg(cd, "Failed to finalize hash.");
3155                                 return -EINVAL;
3156                         }
3157                 }
3158                 pbuffer = rp->p.csum.checksums;
3159         } else if (rp->type == REENC_PROTECTION_JOURNAL) {
3160                 log_dbg(cd, "Journal hotzone resilience.");
3161                 len = buffer_len;
3162                 pbuffer = buffer;
3163         } else if (rp->type == REENC_PROTECTION_DATASHIFT) {
3164                 log_dbg(cd, "Data shift hotzone resilience.");
3165                 return LUKS2_hdr_write(cd, hdr);
3166         } else
3167                 return -EINVAL;
3168
3169         log_dbg(cd, "Going to store %zu bytes in reencrypt keyslot.", len);
3170
3171         r = LUKS2_keyslot_reencrypt_store(cd, hdr, reencrypt_keyslot, pbuffer, len);
3172
3173         return r > 0 ? 0 : r;
3174 }
3175
3176 static int reencrypt_context_update(struct crypt_device *cd,
3177         struct luks2_reencrypt *rh)
3178 {
3179         if (rh->read < 0)
3180                 return -EINVAL;
3181
3182         if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
3183                 if (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
3184                         if (rh->offset)
3185                                 rh->offset -= data_shift_value(&rh->rp);
3186                         if (rh->offset && (rh->offset < data_shift_value(&rh->rp))) {
3187                                 rh->length = rh->offset;
3188                                 rh->offset = data_shift_value(&rh->rp);
3189                         }
3190                         if (!rh->offset)
3191                                 rh->length = data_shift_value(&rh->rp);
3192                 } else {
3193                         if (rh->offset < rh->length)
3194                                 rh->length = rh->offset;
3195                         rh->offset -= rh->length;
3196                 }
3197         } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
3198                 rh->offset += (uint64_t)rh->read;
3199                 if (rh->device_size == rh->offset &&
3200                     rh->jobj_segment_moved &&
3201                     rh->mode == CRYPT_REENCRYPT_DECRYPT &&
3202                     rh->rp.type == REENC_PROTECTION_DATASHIFT) {
3203                         rh->offset = 0;
3204                         rh->length = json_segment_get_size(rh->jobj_segment_moved, 0);
3205                 }
3206                 /* it fails in-case of device_size < rh->offset later */
3207                 else if (rh->device_size - rh->offset < rh->length)
3208                         rh->length = rh->device_size - rh->offset;
3209         } else
3210                 return -EINVAL;
3211
3212         if (rh->device_size < rh->offset) {
3213                 log_dbg(cd, "Calculated reencryption offset %" PRIu64 " is beyond device size %" PRIu64 ".", rh->offset, rh->device_size);
3214                 return -EINVAL;
3215         }
3216
3217         rh->progress += (uint64_t)rh->read;
3218
3219         return 0;
3220 }
3221
3222 static int reencrypt_load(struct crypt_device *cd, struct luks2_hdr *hdr,
3223                 uint64_t device_size,
3224                 uint64_t max_hotzone_size,
3225                 uint64_t required_device_size,
3226                 struct volume_key *vks,
3227                 struct luks2_reencrypt **rh)
3228 {
3229         int r;
3230         struct luks2_reencrypt *tmp = NULL;
3231         crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
3232
3233         if (ri == CRYPT_REENCRYPT_NONE) {
3234                 log_err(cd, _("Device not marked for LUKS2 reencryption."));
3235                 return -EINVAL;
3236         } else if (ri == CRYPT_REENCRYPT_INVALID)
3237                 return -EINVAL;
3238
3239         r = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
3240         if (r < 0)
3241                 return r;
3242
3243         if (ri == CRYPT_REENCRYPT_CLEAN)
3244                 r = reencrypt_load_clean(cd, hdr, device_size, max_hotzone_size, required_device_size, &tmp);
3245         else if (ri == CRYPT_REENCRYPT_CRASH)
3246                 r = reencrypt_load_crashed(cd, hdr, device_size, &tmp);
3247         else
3248                 r = -EINVAL;
3249
3250         if (r < 0 || !tmp) {
3251                 log_err(cd, _("Failed to load LUKS2 reencryption context."));
3252                 return r;
3253         }
3254
3255         *rh = tmp;
3256
3257         return 0;
3258 }
3259 #endif
3260 static int reencrypt_lock_internal(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock)
3261 {
3262         int r;
3263         char *lock_resource;
3264
3265         if (!crypt_metadata_locking_enabled()) {
3266                 *reencrypt_lock = NULL;
3267                 return 0;
3268         }
3269
3270         r = asprintf(&lock_resource, "LUKS2-reencryption-%s", uuid);
3271         if (r < 0)
3272                 return -ENOMEM;
3273         if (r < 20) {
3274                 free(lock_resource);
3275                 return -EINVAL;
3276         }
3277
3278         r = crypt_write_lock(cd, lock_resource, false, reencrypt_lock);
3279
3280         free(lock_resource);
3281
3282         return r;
3283 }
3284
3285 /* internal only */
3286 int LUKS2_reencrypt_lock_by_dm_uuid(struct crypt_device *cd, const char *dm_uuid,
3287         struct crypt_lock_handle **reencrypt_lock)
3288 {
3289         int r;
3290         char hdr_uuid[37];
3291         const char *uuid = crypt_get_uuid(cd);
3292
3293         if (!dm_uuid)
3294                 return -EINVAL;
3295
3296         if (!uuid) {
3297                 r = snprintf(hdr_uuid, sizeof(hdr_uuid), "%.8s-%.4s-%.4s-%.4s-%.12s",
3298                          dm_uuid + 6, dm_uuid + 14, dm_uuid + 18, dm_uuid + 22, dm_uuid + 26);
3299                 if (r < 0 || (size_t)r != (sizeof(hdr_uuid) - 1))
3300                         return -EINVAL;
3301         } else if (crypt_uuid_cmp(dm_uuid, uuid))
3302                 return -EINVAL;
3303
3304         return reencrypt_lock_internal(cd, uuid, reencrypt_lock);
3305 }
3306
3307 /* internal only */
3308 int LUKS2_reencrypt_lock(struct crypt_device *cd, struct crypt_lock_handle **reencrypt_lock)
3309 {
3310         if (!cd || !crypt_get_type(cd) || strcmp(crypt_get_type(cd), CRYPT_LUKS2))
3311                 return -EINVAL;
3312
3313         return reencrypt_lock_internal(cd, crypt_get_uuid(cd), reencrypt_lock);
3314 }
3315
3316 /* internal only */
3317 void LUKS2_reencrypt_unlock(struct crypt_device *cd, struct crypt_lock_handle *reencrypt_lock)
3318 {
3319         crypt_unlock_internal(cd, reencrypt_lock);
3320 }
3321 #if USE_LUKS2_REENCRYPTION
3322 static int reencrypt_lock_and_verify(struct crypt_device *cd, struct luks2_hdr *hdr,
3323                 struct crypt_lock_handle **reencrypt_lock)
3324 {
3325         int r;
3326         crypt_reencrypt_info ri;
3327         struct crypt_lock_handle *h;
3328
3329         ri = LUKS2_reencrypt_status(hdr);
3330         if (ri == CRYPT_REENCRYPT_INVALID) {
3331                 log_err(cd, _("Failed to get reencryption state."));
3332                 return -EINVAL;
3333         }
3334         if (ri < CRYPT_REENCRYPT_CLEAN) {
3335                 log_err(cd, _("Device is not in reencryption."));
3336                 return -EINVAL;
3337         }
3338
3339         r = LUKS2_reencrypt_lock(cd, &h);
3340         if (r < 0) {
3341                 if (r == -EBUSY)
3342                         log_err(cd, _("Reencryption process is already running."));
3343                 else
3344                         log_err(cd, _("Failed to acquire reencryption lock."));
3345                 return r;
3346         }
3347
3348         /* With reencryption lock held, reload device context and verify metadata state */
3349         r = crypt_load(cd, CRYPT_LUKS2, NULL);
3350         if (r) {
3351                 LUKS2_reencrypt_unlock(cd, h);
3352                 return r;
3353         }
3354
3355         ri = LUKS2_reencrypt_status(hdr);
3356         if (ri == CRYPT_REENCRYPT_CLEAN) {
3357                 *reencrypt_lock = h;
3358                 return 0;
3359         }
3360
3361         LUKS2_reencrypt_unlock(cd, h);
3362         log_err(cd, _("Cannot proceed with reencryption. Run reencryption recovery first."));
3363         return -EINVAL;
3364 }
3365
3366 static int reencrypt_load_by_passphrase(struct crypt_device *cd,
3367                 const char *name,
3368                 const char *passphrase,
3369                 size_t passphrase_size,
3370                 int keyslot_old,
3371                 int keyslot_new,
3372                 struct volume_key **vks,
3373                 const struct crypt_params_reencrypt *params)
3374 {
3375         int r, reencrypt_slot;
3376         struct luks2_hdr *hdr;
3377         struct crypt_lock_handle *reencrypt_lock;
3378         struct luks2_reencrypt *rh;
3379         const struct volume_key *vk;
3380         size_t alignment;
3381         uint32_t old_sector_size, new_sector_size, sector_size;
3382         struct crypt_dm_active_device dmd_target, dmd_source = {
3383                 .uuid = crypt_get_uuid(cd),
3384                 .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
3385         };
3386         uint64_t minimal_size, device_size, mapping_size = 0, required_size = 0,
3387                  max_hotzone_size = 0;
3388         bool dynamic;
3389         uint32_t flags = 0;
3390
3391         assert(cd);
3392
3393         hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3394         if (!hdr)
3395                 return -EINVAL;
3396
3397         log_dbg(cd, "Loading LUKS2 reencryption context.");
3398
3399         old_sector_size = reencrypt_get_sector_size_old(hdr);
3400         new_sector_size = reencrypt_get_sector_size_new(hdr);
3401         sector_size = new_sector_size > old_sector_size ? new_sector_size : old_sector_size;
3402
3403         r = reencrypt_verify_resilience_params(cd, params, sector_size,
3404                                                LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0);
3405         if (r < 0)
3406                 return r;
3407
3408         if (params) {
3409                 required_size = params->device_size;
3410                 max_hotzone_size = params->max_hotzone_size;
3411         }
3412
3413         rh = crypt_get_luks2_reencrypt(cd);
3414         if (rh) {
3415                 LUKS2_reencrypt_free(cd, rh);
3416                 crypt_set_luks2_reencrypt(cd, NULL);
3417                 rh = NULL;
3418         }
3419
3420         r = reencrypt_lock_and_verify(cd, hdr, &reencrypt_lock);
3421         if (r)
3422                 return r;
3423
3424         reencrypt_slot = LUKS2_find_keyslot(hdr, "reencrypt");
3425         if (reencrypt_slot < 0) {
3426                 r = -EINVAL;
3427                 goto err;
3428         }
3429
3430         /* From now on we hold reencryption lock */
3431
3432         if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic)) {
3433                 r = -EINVAL;
3434                 goto err;
3435         }
3436
3437         /* some configurations provides fixed device size */
3438         r = LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, false, dynamic);
3439         if (r) {
3440                 r = -EINVAL;
3441                 goto err;
3442         }
3443
3444         minimal_size >>= SECTOR_SHIFT;
3445
3446         r = reencrypt_verify_keys(cd, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
3447         if (r == -ENOENT) {
3448                 log_dbg(cd, "Keys are not ready. Unlocking all volume keys.");
3449                 r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
3450         }
3451
3452         if (r < 0)
3453                 goto err;
3454
3455         if (name) {
3456                 r = reencrypt_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
3457                 if (r < 0)
3458                         goto err;
3459
3460                 r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
3461                                     DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
3462                                     DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
3463                 if (r < 0)
3464                         goto err;
3465                 flags = dmd_target.flags;
3466
3467                 /*
3468                  * By default reencryption code aims to retain flags from existing dm device.
3469                  * The keyring activation flag can not be inherited if original cipher is null.
3470                  *
3471                  * In this case override the flag based on decision made in reencrypt_upload_keys
3472                  * above. The code checks if new VK is eligible for keyring.
3473                  */
3474                 vk = crypt_volume_key_by_id(*vks, LUKS2_reencrypt_digest_new(hdr));
3475                 if (vk && vk->key_description && crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr))) {
3476                         flags |= CRYPT_ACTIVATE_KEYRING_KEY;
3477                         dmd_source.flags |= CRYPT_ACTIVATE_KEYRING_KEY;
3478                 }
3479
3480                 r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
3481                 if (!r) {
3482                         r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
3483                         if (r)
3484                                 log_err(cd, _("Mismatching parameters on device %s."), name);
3485                 }
3486
3487                 dm_targets_free(cd, &dmd_source);
3488                 dm_targets_free(cd, &dmd_target);
3489                 free(CONST_CAST(void*)dmd_target.uuid);
3490                 if (r)
3491                         goto err;
3492                 mapping_size = dmd_target.size;
3493         }
3494
3495         r = -EINVAL;
3496         if (required_size && mapping_size && (required_size != mapping_size)) {
3497                 log_err(cd, _("Active device size and requested reencryption size don't match."));
3498                 goto err;
3499         }
3500
3501         if (mapping_size)
3502                 required_size = mapping_size;
3503
3504         if (required_size) {
3505                 /* TODO: Add support for changing fixed minimal size in reencryption mda where possible */
3506                 if ((minimal_size && (required_size < minimal_size)) ||
3507                     (required_size > (device_size >> SECTOR_SHIFT)) ||
3508                     (!dynamic && (required_size != minimal_size)) ||
3509                     (old_sector_size > 0 && MISALIGNED(required_size, old_sector_size >> SECTOR_SHIFT)) ||
3510                     (new_sector_size > 0 && MISALIGNED(required_size, new_sector_size >> SECTOR_SHIFT))) {
3511                         log_err(cd, _("Illegal device size requested in reencryption parameters."));
3512                         goto err;
3513                 }
3514         }
3515
3516         alignment = reencrypt_get_alignment(cd, hdr);
3517
3518         r = LUKS2_keyslot_reencrypt_update_needed(cd, hdr, reencrypt_slot, params, alignment);
3519         if (r > 0) /* metadata update needed */
3520                 r = LUKS2_keyslot_reencrypt_update(cd, hdr, reencrypt_slot, params, alignment, *vks);
3521         if (r < 0)
3522                 goto err;
3523
3524         r = reencrypt_load(cd, hdr, device_size, max_hotzone_size, required_size, *vks, &rh);
3525         if (r < 0 || !rh)
3526                 goto err;
3527
3528         if (name && (r = reencrypt_context_set_names(rh, name)))
3529                 goto err;
3530
3531         /* Reassure device is not mounted and there's no dm mapping active */
3532         if (!name && (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0)) {
3533                 log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
3534                 r = -EBUSY;
3535                 goto err;
3536         }
3537         device_release_excl(cd, crypt_data_device(cd));
3538
3539         /* There's a race for dm device activation not managed by cryptsetup.
3540          *
3541          * 1) excl close
3542          * 2) rogue dm device activation
3543          * 3) one or more dm-crypt based wrapper activation
3544          * 4) next excl open gets skipped due to 3) device from 2) remains undetected.
3545          */
3546         r = reencrypt_init_storage_wrappers(cd, hdr, rh, *vks);
3547         if (r)
3548                 goto err;
3549
3550         /* If one of wrappers is based on dmcrypt fallback it already blocked mount */
3551         if (!name && crypt_storage_wrapper_get_type(rh->cw1) != DMCRYPT &&
3552             crypt_storage_wrapper_get_type(rh->cw2) != DMCRYPT) {
3553                 if (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0) {
3554                         log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
3555                         r = -EBUSY;
3556                         goto err;
3557                 }
3558         }
3559
3560         rh->flags = flags;
3561
3562         MOVE_REF(rh->vks, *vks);
3563         MOVE_REF(rh->reenc_lock, reencrypt_lock);
3564
3565         crypt_set_luks2_reencrypt(cd, rh);
3566
3567         return 0;
3568 err:
3569         LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3570         LUKS2_reencrypt_free(cd, rh);
3571         return r;
3572 }
3573
3574 static int reencrypt_recovery_by_passphrase(struct crypt_device *cd,
3575         struct luks2_hdr *hdr,
3576         int keyslot_old,
3577         int keyslot_new,
3578         const char *passphrase,
3579         size_t passphrase_size)
3580 {
3581         int r;
3582         crypt_reencrypt_info ri;
3583         struct crypt_lock_handle *reencrypt_lock;
3584
3585         r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
3586         if (r) {
3587                 if (r == -EBUSY)
3588                         log_err(cd, _("Reencryption in-progress. Cannot perform recovery."));
3589                 else
3590                         log_err(cd, _("Failed to get reencryption lock."));
3591                 return r;
3592         }
3593
3594         if ((r = crypt_load(cd, CRYPT_LUKS2, NULL))) {
3595                 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3596                 return r;
3597         }
3598
3599         ri = LUKS2_reencrypt_status(hdr);
3600         if (ri == CRYPT_REENCRYPT_INVALID) {
3601                 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3602                 return -EINVAL;
3603         }
3604
3605         if (ri == CRYPT_REENCRYPT_CRASH) {
3606                 r = LUKS2_reencrypt_locked_recovery_by_passphrase(cd, keyslot_old, keyslot_new,
3607                                 passphrase, passphrase_size, NULL);
3608                 if (r < 0)
3609                         log_err(cd, _("LUKS2 reencryption recovery failed."));
3610         } else {
3611                 log_dbg(cd, "No LUKS2 reencryption recovery needed.");
3612                 r = 0;
3613         }
3614
3615         LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3616         return r;
3617 }
3618
3619 static int reencrypt_repair_by_passphrase(
3620                 struct crypt_device *cd,
3621                 struct luks2_hdr *hdr,
3622                 int keyslot_old,
3623                 int keyslot_new,
3624                 const char *passphrase,
3625                 size_t passphrase_size)
3626 {
3627         int r;
3628         struct crypt_lock_handle *reencrypt_lock;
3629         struct luks2_reencrypt *rh;
3630         crypt_reencrypt_info ri;
3631         uint8_t requirement_version;
3632         const char *resilience;
3633         struct volume_key *vks = NULL;
3634
3635         log_dbg(cd, "Loading LUKS2 reencryption context for metadata repair.");
3636
3637         rh = crypt_get_luks2_reencrypt(cd);
3638         if (rh) {
3639                 LUKS2_reencrypt_free(cd, rh);
3640                 crypt_set_luks2_reencrypt(cd, NULL);
3641                 rh = NULL;
3642         }
3643
3644         ri = LUKS2_reencrypt_status(hdr);
3645         if (ri == CRYPT_REENCRYPT_INVALID)
3646                 return -EINVAL;
3647
3648         if (ri < CRYPT_REENCRYPT_CLEAN) {
3649                 log_err(cd, _("Device is not in reencryption."));
3650                 return -EINVAL;
3651         }
3652
3653         r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
3654         if (r < 0) {
3655                 if (r == -EBUSY)
3656                         log_err(cd, _("Reencryption process is already running."));
3657                 else
3658                         log_err(cd, _("Failed to acquire reencryption lock."));
3659                 return r;
3660         }
3661
3662         /* With reencryption lock held, reload device context and verify metadata state */
3663         r = crypt_load(cd, CRYPT_LUKS2, NULL);
3664         if (r)
3665                 goto out;
3666
3667         ri = LUKS2_reencrypt_status(hdr);
3668         if (ri == CRYPT_REENCRYPT_INVALID) {
3669                 r = -EINVAL;
3670                 goto out;
3671         }
3672         if (ri == CRYPT_REENCRYPT_NONE) {
3673                 r = 0;
3674                 goto out;
3675         }
3676
3677         resilience = reencrypt_resilience_type(hdr);
3678         if (!resilience) {
3679                 r = -EINVAL;
3680                 goto out;
3681         }
3682
3683         if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_DECRYPT &&
3684             !strncmp(resilience, "datashift-", 10) &&
3685             LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
3686                 requirement_version = LUKS2_DECRYPT_DATASHIFT_REQ_VERSION;
3687         else
3688                 requirement_version = LUKS2_REENCRYPT_REQ_VERSION;
3689
3690         r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, &vks);
3691         if (r < 0)
3692                 goto out;
3693
3694         r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, requirement_version, vks);
3695         crypt_free_volume_key(vks);
3696         vks = NULL;
3697         if (r < 0)
3698                 goto out;
3699
3700         /* replaces old online-reencrypt flag with updated version and commits metadata */
3701         r = reencrypt_update_flag(cd, requirement_version, true, true);
3702 out:
3703         LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3704         crypt_free_volume_key(vks);
3705         return r;
3706
3707 }
3708 #endif
3709 static int reencrypt_init_by_passphrase(struct crypt_device *cd,
3710         const char *name,
3711         const char *passphrase,
3712         size_t passphrase_size,
3713         int keyslot_old,
3714         int keyslot_new,
3715         const char *cipher,
3716         const char *cipher_mode,
3717         const struct crypt_params_reencrypt *params)
3718 {
3719 #if USE_LUKS2_REENCRYPTION
3720         int r;
3721         crypt_reencrypt_info ri;
3722         struct volume_key *vks = NULL;
3723         uint32_t flags = params ? params->flags : 0;
3724         struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3725
3726         /* short-circuit in reencryption metadata update and finish immediately. */
3727         if (flags & CRYPT_REENCRYPT_REPAIR_NEEDED)
3728                 return reencrypt_repair_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
3729
3730         /* short-circuit in recovery and finish immediately. */
3731         if (flags & CRYPT_REENCRYPT_RECOVERY)
3732                 return reencrypt_recovery_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
3733
3734         if (cipher && !crypt_cipher_wrapped_key(cipher, cipher_mode)) {
3735                 r = crypt_keyslot_get_key_size(cd, keyslot_new);
3736                 if (r < 0)
3737                         return r;
3738                 r = LUKS2_check_cipher(cd, r, cipher, cipher_mode);
3739                 if (r < 0) {
3740                         log_err(cd, _("Unable to use cipher specification %s-%s for LUKS2."), cipher, cipher_mode);
3741                         return r;
3742                 }
3743         }
3744
3745         r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
3746         if (r)
3747                 return r;
3748
3749         ri = LUKS2_reencrypt_status(hdr);
3750         if (ri == CRYPT_REENCRYPT_INVALID) {
3751                 device_write_unlock(cd, crypt_metadata_device(cd));
3752                 return -EINVAL;
3753         }
3754
3755         if ((ri > CRYPT_REENCRYPT_NONE) && (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY)) {
3756                 device_write_unlock(cd, crypt_metadata_device(cd));
3757                 log_err(cd, _("LUKS2 reencryption already initialized in metadata."));
3758                 return -EBUSY;
3759         }
3760
3761         if (ri == CRYPT_REENCRYPT_NONE && !(flags & CRYPT_REENCRYPT_RESUME_ONLY)) {
3762                 r = reencrypt_init(cd, name, hdr, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params, &vks);
3763                 if (r < 0)
3764                         log_err(cd, _("Failed to initialize LUKS2 reencryption in metadata."));
3765         } else if (ri > CRYPT_REENCRYPT_NONE) {
3766                 log_dbg(cd, "LUKS2 reencryption already initialized.");
3767                 r = 0;
3768         }
3769
3770         device_write_unlock(cd, crypt_metadata_device(cd));
3771
3772         if (r < 0 || (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY))
3773                 goto out;
3774
3775         r = reencrypt_load_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, &vks, params);
3776 out:
3777         if (r < 0)
3778                 crypt_drop_keyring_key(cd, vks);
3779         crypt_free_volume_key(vks);
3780         return r < 0 ? r : LUKS2_find_keyslot(hdr, "reencrypt");
3781 #else
3782         log_err(cd, _("This operation is not supported for this device type."));
3783         return -ENOTSUP;
3784 #endif
3785 }
3786
3787 int crypt_reencrypt_init_by_keyring(struct crypt_device *cd,
3788         const char *name,
3789         const char *passphrase_description,
3790         int keyslot_old,
3791         int keyslot_new,
3792         const char *cipher,
3793         const char *cipher_mode,
3794         const struct crypt_params_reencrypt *params)
3795 {
3796         int r;
3797         char *passphrase;
3798         size_t passphrase_size;
3799
3800         if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase_description)
3801                 return -EINVAL;
3802         if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
3803                 return -EINVAL;
3804
3805         r = keyring_get_passphrase(passphrase_description, &passphrase, &passphrase_size);
3806         if (r < 0) {
3807                 log_err(cd, _("Failed to read passphrase from keyring (error %d)."), r);
3808                 return -EINVAL;
3809         }
3810
3811         r = reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
3812
3813         crypt_safe_memzero(passphrase, passphrase_size);
3814         free(passphrase);
3815
3816         return r;
3817 }
3818
3819 int crypt_reencrypt_init_by_passphrase(struct crypt_device *cd,
3820         const char *name,
3821         const char *passphrase,
3822         size_t passphrase_size,
3823         int keyslot_old,
3824         int keyslot_new,
3825         const char *cipher,
3826         const char *cipher_mode,
3827         const struct crypt_params_reencrypt *params)
3828 {
3829         if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase)
3830                 return -EINVAL;
3831         if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
3832                 return -EINVAL;
3833
3834         return reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
3835 }
3836
3837 #if USE_LUKS2_REENCRYPTION
3838 static reenc_status_t reencrypt_step(struct crypt_device *cd,
3839                 struct luks2_hdr *hdr,
3840                 struct luks2_reencrypt *rh,
3841                 uint64_t device_size,
3842                 bool online)
3843 {
3844         int r;
3845         struct reenc_protection *rp;
3846
3847         assert(hdr);
3848         assert(rh);
3849
3850         rp = &rh->rp;
3851
3852         /* in memory only */
3853         r = reencrypt_make_segments(cd, hdr, rh, device_size);
3854         if (r)
3855                 return REENC_ERR;
3856
3857         r = reencrypt_assign_segments(cd, hdr, rh, 1, 0);
3858         if (r) {
3859                 log_err(cd, _("Failed to set device segments for next reencryption hotzone."));
3860                 return REENC_ERR;
3861         }
3862
3863         log_dbg(cd, "Reencrypting chunk starting at offset: %" PRIu64 ", size :%" PRIu64 ".", rh->offset, rh->length);
3864         log_dbg(cd, "data_offset: %" PRIu64, crypt_get_data_offset(cd) << SECTOR_SHIFT);
3865
3866         if (!rh->offset && rp->type == REENC_PROTECTION_DATASHIFT && rh->jobj_segment_moved) {
3867                 crypt_storage_wrapper_destroy(rh->cw1);
3868                 log_dbg(cd, "Reinitializing old segment storage wrapper for moved segment.");
3869                 r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
3870                                 LUKS2_reencrypt_get_data_offset_moved(hdr),
3871                                 crypt_get_iv_offset(cd),
3872                                 reencrypt_get_sector_size_old(hdr),
3873                                 reencrypt_segment_cipher_old(hdr),
3874                                 crypt_volume_key_by_id(rh->vks, rh->digest_old),
3875                                 rh->wflags1);
3876                 if (r) {
3877                         log_err(cd, _("Failed to initialize old segment storage wrapper."));
3878                         return REENC_ROLLBACK;
3879                 }
3880
3881                 if (rh->rp_moved_segment.type != REENC_PROTECTION_NOT_SET) {
3882                         log_dbg(cd, "Switching to moved segment resilience type.");
3883                         rp = &rh->rp_moved_segment;
3884                 }
3885         }
3886
3887         r = reencrypt_hotzone_protect_ready(cd, rp);
3888         if (r) {
3889                 log_err(cd, _("Failed to initialize hotzone protection."));
3890                 return REENC_ROLLBACK;
3891         }
3892
3893         if (online) {
3894                 r = reencrypt_refresh_overlay_devices(cd, hdr, rh->overlay_name, rh->hotzone_name, rh->vks, rh->device_size, rh->flags);
3895                 /* Teardown overlay devices with dm-error. None bio shall pass! */
3896                 if (r != REENC_OK)
3897                         return r;
3898         }
3899
3900         rh->read = crypt_storage_wrapper_read(rh->cw1, rh->offset, rh->reenc_buffer, rh->length);
3901         if (rh->read < 0) {
3902                 /* severity normal */
3903                 log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset);
3904                 return REENC_ROLLBACK;
3905         }
3906
3907         /* metadata commit point */
3908         r = reencrypt_hotzone_protect_final(cd, hdr, rh->reenc_keyslot, rp, rh->reenc_buffer, rh->read);
3909         if (r < 0) {
3910                 /* severity normal */
3911                 log_err(cd, _("Failed to write reencryption resilience metadata."));
3912                 return REENC_ROLLBACK;
3913         }
3914
3915         r = crypt_storage_wrapper_decrypt(rh->cw1, rh->offset, rh->reenc_buffer, rh->read);
3916         if (r) {
3917                 /* severity normal */
3918                 log_err(cd, _("Decryption failed."));
3919                 return REENC_ROLLBACK;
3920         }
3921         if (rh->read != crypt_storage_wrapper_encrypt_write(rh->cw2, rh->offset, rh->reenc_buffer, rh->read)) {
3922                 /* severity fatal */
3923                 log_err(cd, _("Failed to write hotzone area starting at %" PRIu64 "."), rh->offset);
3924                 return REENC_FATAL;
3925         }
3926
3927         if (rp->type != REENC_PROTECTION_NONE && crypt_storage_wrapper_datasync(rh->cw2)) {
3928                 log_err(cd, _("Failed to sync data."));
3929                 return REENC_FATAL;
3930         }
3931
3932         /* metadata commit safe point */
3933         r = reencrypt_assign_segments(cd, hdr, rh, 0, rp->type != REENC_PROTECTION_NONE);
3934         if (r) {
3935                 /* severity fatal */
3936                 log_err(cd, _("Failed to update metadata after current reencryption hotzone completed."));
3937                 return REENC_FATAL;
3938         }
3939
3940         if (online) {
3941                 /* severity normal */
3942                 log_dbg(cd, "Resuming device %s", rh->hotzone_name);
3943                 r = dm_resume_device(cd, rh->hotzone_name, DM_RESUME_PRIVATE);
3944                 if (r) {
3945                         log_err(cd, _("Failed to resume device %s."), rh->hotzone_name);
3946                         return REENC_ERR;
3947                 }
3948         }
3949
3950         return REENC_OK;
3951 }
3952
3953 static int reencrypt_erase_backup_segments(struct crypt_device *cd,
3954                 struct luks2_hdr *hdr)
3955 {
3956         int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
3957         if (segment >= 0) {
3958                 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3959                         return -EINVAL;
3960                 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3961         }
3962         segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
3963         if (segment >= 0) {
3964                 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3965                         return -EINVAL;
3966                 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3967         }
3968         segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
3969         if (segment >= 0) {
3970                 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3971                         return -EINVAL;
3972                 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3973         }
3974
3975         return 0;
3976 }
3977
3978 static int reencrypt_wipe_unused_device_area(struct crypt_device *cd, struct luks2_reencrypt *rh)
3979 {
3980         uint64_t offset, length, dev_size;
3981         int r = 0;
3982
3983         assert(cd);
3984         assert(rh);
3985
3986         if (rh->jobj_segment_moved && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
3987                 offset = json_segment_get_offset(rh->jobj_segment_moved, 0);
3988                 length = json_segment_get_size(rh->jobj_segment_moved, 0);
3989                 log_dbg(cd, "Wiping %" PRIu64 " bytes of backup segment data at offset %" PRIu64,
3990                         length, offset);
3991                 r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
3992                                 offset, length, 1024 * 1024, NULL, NULL);
3993         }
3994
3995         if (r < 0)
3996                 return r;
3997
3998         if (rh->rp.type == REENC_PROTECTION_DATASHIFT && rh->direction == CRYPT_REENCRYPT_FORWARD) {
3999                 r = device_size(crypt_data_device(cd), &dev_size);
4000                 if (r < 0)
4001                         return r;
4002
4003                 if (dev_size < data_shift_value(&rh->rp))
4004                         return -EINVAL;
4005
4006                 offset = dev_size - data_shift_value(&rh->rp);
4007                 length = data_shift_value(&rh->rp);
4008                 log_dbg(cd, "Wiping %" PRIu64 " bytes of data at offset %" PRIu64,
4009                         length, offset);
4010                 r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
4011                                 offset, length, 1024 * 1024, NULL, NULL);
4012         }
4013
4014         return r;
4015 }
4016
4017 static int reencrypt_teardown_ok(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
4018 {
4019         int i, r;
4020         uint32_t dmt_flags;
4021         bool finished = !(rh->device_size > rh->progress);
4022
4023         if (rh->rp.type == REENC_PROTECTION_NONE &&
4024             LUKS2_hdr_write(cd, hdr)) {
4025                 log_err(cd, _("Failed to write LUKS2 metadata."));
4026                 return -EINVAL;
4027         }
4028
4029         if (rh->online) {
4030                 r = LUKS2_reload(cd, rh->device_name, rh->vks, rh->device_size, rh->flags);
4031                 if (r)
4032                         log_err(cd, _("Failed to reload device %s."), rh->device_name);
4033                 if (!r) {
4034                         r = dm_resume_device(cd, rh->device_name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
4035                         if (r)
4036                                 log_err(cd, _("Failed to resume device %s."), rh->device_name);
4037                 }
4038                 dm_remove_device(cd, rh->overlay_name, 0);
4039                 dm_remove_device(cd, rh->hotzone_name, 0);
4040
4041                 if (!r && finished && rh->mode == CRYPT_REENCRYPT_DECRYPT &&
4042                     !dm_flags(cd, DM_LINEAR, &dmt_flags) && (dmt_flags & DM_DEFERRED_SUPPORTED))
4043                     dm_remove_device(cd, rh->device_name, CRYPT_DEACTIVATE_DEFERRED);
4044         }
4045
4046         if (finished) {
4047                 if (reencrypt_wipe_unused_device_area(cd, rh))
4048                         log_err(cd, _("Failed to wipe unused data device area."));
4049                 if (reencrypt_get_data_offset_new(hdr) && LUKS2_set_keyslots_size(hdr, reencrypt_get_data_offset_new(hdr)))
4050                         log_dbg(cd, "Failed to set new keyslots area size.");
4051                 if (rh->digest_old >= 0 && rh->digest_new != rh->digest_old)
4052                         for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++)
4053                                 if (LUKS2_digest_by_keyslot(hdr, i) == rh->digest_old && crypt_keyslot_destroy(cd, i))
4054                                         log_err(cd, _("Failed to remove unused (unbound) keyslot %d."), i);
4055
4056                 if (reencrypt_erase_backup_segments(cd, hdr))
4057                         log_dbg(cd, "Failed to erase backup segments");
4058
4059                 if (reencrypt_update_flag(cd, 0, false, false))
4060                         log_dbg(cd, "Failed to disable reencryption requirement flag.");
4061
4062                 /* metadata commit point also removing reencryption flag on-disk */
4063                 if (crypt_keyslot_destroy(cd, rh->reenc_keyslot)) {
4064                         log_err(cd, _("Failed to remove reencryption keyslot."));
4065                         return -EINVAL;
4066                 }
4067         }
4068
4069         return 0;
4070 }
4071
4072 static void reencrypt_teardown_fatal(struct crypt_device *cd, struct luks2_reencrypt *rh)
4073 {
4074         log_err(cd, _("Fatal error while reencrypting chunk starting at %" PRIu64 ", %" PRIu64 " sectors long."),
4075                 (rh->offset >> SECTOR_SHIFT) + crypt_get_data_offset(cd), rh->length >> SECTOR_SHIFT);
4076
4077         if (rh->online) {
4078                 log_err(cd, _("Online reencryption failed."));
4079                 if (dm_status_suspended(cd, rh->hotzone_name) > 0) {
4080                         log_dbg(cd, "Hotzone device %s suspended, replacing with dm-error.", rh->hotzone_name);
4081                         if (dm_error_device(cd, rh->hotzone_name)) {
4082                                 log_err(cd, _("Failed to replace suspended device %s with dm-error target."), rh->hotzone_name);
4083                                 log_err(cd, _("Do not resume the device unless replaced with error target manually."));
4084                         }
4085                 }
4086         }
4087 }
4088
4089 static int reencrypt_teardown(struct crypt_device *cd, struct luks2_hdr *hdr,
4090                 struct luks2_reencrypt *rh, reenc_status_t rs, bool interrupted,
4091                 int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
4092                 void *usrptr)
4093 {
4094         int r;
4095
4096         switch (rs) {
4097         case REENC_OK:
4098                 if (progress && !interrupted)
4099                         progress(rh->device_size, rh->progress, usrptr);
4100                 r = reencrypt_teardown_ok(cd, hdr, rh);
4101                 break;
4102         case REENC_FATAL:
4103                 reencrypt_teardown_fatal(cd, rh);
4104                 /* fall-through */
4105         default:
4106                 r = -EIO;
4107         }
4108
4109         /* this frees reencryption lock */
4110         LUKS2_reencrypt_free(cd, rh);
4111         crypt_set_luks2_reencrypt(cd, NULL);
4112
4113         return r;
4114 }
4115 #endif
4116
4117 int crypt_reencrypt_run(
4118         struct crypt_device *cd,
4119         int (*progress)(uint64_t size, uint64_t offset, void *usrptr),
4120         void *usrptr)
4121 {
4122 #if USE_LUKS2_REENCRYPTION
4123         int r;
4124         crypt_reencrypt_info ri;
4125         struct luks2_hdr *hdr;
4126         struct luks2_reencrypt *rh;
4127         reenc_status_t rs;
4128         bool quit = false;
4129
4130         if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
4131                 return -EINVAL;
4132
4133         hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
4134
4135         ri = LUKS2_reencrypt_status(hdr);
4136         if (ri > CRYPT_REENCRYPT_CLEAN) {
4137                 log_err(cd, _("Cannot proceed with reencryption. Unexpected reencryption status."));
4138                 return -EINVAL;
4139         }
4140
4141         rh = crypt_get_luks2_reencrypt(cd);
4142         if (!rh || (!rh->reenc_lock && crypt_metadata_locking_enabled())) {
4143                 log_err(cd, _("Missing or invalid reencrypt context."));
4144                 return -EINVAL;
4145         }
4146
4147         log_dbg(cd, "Resuming LUKS2 reencryption.");
4148
4149         if (rh->online && reencrypt_init_device_stack(cd, rh)) {
4150                 log_err(cd, _("Failed to initialize reencryption device stack."));
4151                 return -EINVAL;
4152         }
4153
4154         log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
4155
4156         rs = REENC_OK;
4157
4158         if (progress && progress(rh->device_size, rh->progress, usrptr))
4159                 quit = true;
4160
4161         while (!quit && (rh->device_size > rh->progress)) {
4162                 rs = reencrypt_step(cd, hdr, rh, rh->device_size, rh->online);
4163                 if (rs != REENC_OK)
4164                         break;
4165
4166                 log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
4167                 if (progress && progress(rh->device_size, rh->progress, usrptr))
4168                         quit = true;
4169
4170                 r = reencrypt_context_update(cd, rh);
4171                 if (r) {
4172                         log_err(cd, _("Failed to update reencryption context."));
4173                         rs = REENC_ERR;
4174                         break;
4175                 }
4176
4177                 log_dbg(cd, "Next reencryption offset will be %" PRIu64 " sectors.", rh->offset);
4178                 log_dbg(cd, "Next reencryption chunk size will be %" PRIu64 " sectors).", rh->length);
4179         }
4180
4181         r = reencrypt_teardown(cd, hdr, rh, rs, quit, progress, usrptr);
4182         return r;
4183 #else
4184         log_err(cd, _("This operation is not supported for this device type."));
4185         return -ENOTSUP;
4186 #endif
4187 }
4188
4189 int crypt_reencrypt(
4190         struct crypt_device *cd,
4191         int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
4192 {
4193         return crypt_reencrypt_run(cd, progress, NULL);
4194 }
4195 #if USE_LUKS2_REENCRYPTION
4196 static int reencrypt_recovery(struct crypt_device *cd,
4197                 struct luks2_hdr *hdr,
4198                 uint64_t device_size,
4199                 struct volume_key *vks)
4200 {
4201         int r;
4202         struct luks2_reencrypt *rh = NULL;
4203
4204         r = reencrypt_load(cd, hdr, device_size, 0, 0, vks, &rh);
4205         if (r < 0) {
4206                 log_err(cd, _("Failed to load LUKS2 reencryption context."));
4207                 return r;
4208         }
4209
4210         r = reencrypt_recover_segment(cd, hdr, rh, vks);
4211         if (r < 0)
4212                 goto out;
4213
4214         if ((r = reencrypt_assign_segments(cd, hdr, rh, 0, 0)))
4215                 goto out;
4216
4217         r = reencrypt_context_update(cd, rh);
4218         if (r) {
4219                 log_err(cd, _("Failed to update reencryption context."));
4220                 goto out;
4221         }
4222
4223         r = reencrypt_teardown_ok(cd, hdr, rh);
4224         if (!r)
4225                 r = LUKS2_hdr_write(cd, hdr);
4226 out:
4227         LUKS2_reencrypt_free(cd, rh);
4228
4229         return r;
4230 }
4231 #endif
4232 /*
4233  * use only for calculation of minimal data device size.
4234  * The real data offset is taken directly from segments!
4235  */
4236 int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise)
4237 {
4238         crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
4239         uint64_t data_offset = LUKS2_get_data_offset(hdr);
4240
4241         if (ri == CRYPT_REENCRYPT_CLEAN && reencrypt_direction(hdr) == CRYPT_REENCRYPT_FORWARD)
4242                 data_offset += reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
4243
4244         return blockwise ? data_offset : data_offset << SECTOR_SHIFT;
4245 }
4246
4247 /* internal only */
4248 int LUKS2_reencrypt_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr,
4249         uint64_t check_size, uint64_t *dev_size, bool activation, bool dynamic)
4250 {
4251         int r;
4252         uint64_t data_offset, real_size = 0;
4253
4254         if (reencrypt_direction(hdr) == CRYPT_REENCRYPT_BACKWARD &&
4255             (LUKS2_get_segment_by_flag(hdr, "backup-moved-segment") || dynamic))
4256                 check_size += reencrypt_data_shift(hdr);
4257
4258         r = device_check_access(cd, crypt_data_device(cd), activation ? DEV_EXCL : DEV_OK);
4259         if (r)
4260                 return r;
4261
4262         data_offset = LUKS2_reencrypt_data_offset(hdr, false);
4263
4264         r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
4265         if (r)
4266                 return r;
4267
4268         r = device_size(crypt_data_device(cd), &real_size);
4269         if (r)
4270                 return r;
4271
4272         log_dbg(cd, "Required minimal device size: %" PRIu64 " (%" PRIu64 " sectors)"
4273                     ", real device size: %" PRIu64 " (%" PRIu64 " sectors) "
4274                     "calculated device size: %" PRIu64 " (%" PRIu64 " sectors)",
4275                     check_size, check_size >> SECTOR_SHIFT, real_size, real_size >> SECTOR_SHIFT,
4276                     real_size - data_offset, (real_size - data_offset) >> SECTOR_SHIFT);
4277
4278         if (real_size < data_offset || (check_size && real_size < check_size)) {
4279                 log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
4280                 return -EINVAL;
4281         }
4282
4283         *dev_size = real_size - data_offset;
4284
4285         return 0;
4286 }
4287 #if USE_LUKS2_REENCRYPTION
4288 /* returns keyslot number on success (>= 0) or negative errnor otherwise */
4289 int LUKS2_reencrypt_locked_recovery_by_passphrase(struct crypt_device *cd,
4290         int keyslot_old,
4291         int keyslot_new,
4292         const char *passphrase,
4293         size_t passphrase_size,
4294         struct volume_key **vks)
4295 {
4296         uint64_t minimal_size, device_size;
4297         int keyslot, r = -EINVAL;
4298         struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
4299         struct volume_key *vk = NULL, *_vks = NULL;
4300
4301         log_dbg(cd, "Entering reencryption crash recovery.");
4302
4303         if (LUKS2_get_data_size(hdr, &minimal_size, NULL))
4304                 return r;
4305
4306         r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new,
4307                         passphrase, passphrase_size, &_vks);
4308         if (r < 0)
4309                 goto out;
4310         keyslot = r;
4311
4312         if (crypt_use_keyring_for_vk(cd))
4313                 vk = _vks;
4314
4315         while (vk) {
4316                 r = LUKS2_volume_key_load_in_keyring_by_digest(cd, vk, crypt_volume_key_get_id(vk));
4317                 if (r < 0)
4318                         goto out;
4319                 vk = crypt_volume_key_next(vk);
4320         }
4321
4322         if (LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, true, false))
4323                 goto out;
4324
4325         r = reencrypt_recovery(cd, hdr, device_size, _vks);
4326
4327         if (!r && vks)
4328                 MOVE_REF(*vks, _vks);
4329 out:
4330         if (r < 0)
4331                 crypt_drop_keyring_key(cd, _vks);
4332         crypt_free_volume_key(_vks);
4333
4334         return r < 0 ? r : keyslot;
4335 }
4336 #endif
4337 crypt_reencrypt_info LUKS2_reencrypt_get_params(struct luks2_hdr *hdr,
4338         struct crypt_params_reencrypt *params)
4339 {
4340         crypt_reencrypt_info ri;
4341         int digest;
4342         uint8_t version;
4343
4344         if (params)
4345                 memset(params, 0, sizeof(*params));
4346
4347         ri = LUKS2_reencrypt_status(hdr);
4348         if (ri == CRYPT_REENCRYPT_NONE || ri == CRYPT_REENCRYPT_INVALID || !params)
4349                 return ri;
4350
4351         digest = LUKS2_digest_by_keyslot(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
4352         if (digest < 0 && digest != -ENOENT)
4353                 return CRYPT_REENCRYPT_INVALID;
4354
4355         /*
4356          * In case there's an old "online-reencrypt" requirement or reencryption
4357          * keyslot digest is missing inform caller reencryption metadata requires repair.
4358          */
4359         if (!LUKS2_config_get_reencrypt_version(hdr, &version) &&
4360             (version < 2 || digest == -ENOENT)) {
4361                 params->flags |= CRYPT_REENCRYPT_REPAIR_NEEDED;
4362                 return ri;
4363         }
4364
4365         params->mode = reencrypt_mode(hdr);
4366         params->direction = reencrypt_direction(hdr);
4367         params->resilience = reencrypt_resilience_type(hdr);
4368         params->hash = reencrypt_resilience_hash(hdr);
4369         params->data_shift = reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
4370         params->max_hotzone_size = 0;
4371         if (LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
4372                 params->flags |= CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT;
4373
4374         return ri;
4375 }