Fixed the build error for riscv64 arch using gcc 13
[platform/upstream/cryptsetup.git] / lib / luks2 / luks2_reencrypt.c
1 /*
2  * LUKS - Linux Unified Key Setup v2, reencryption helpers
3  *
4  * Copyright (C) 2015-2021, Red Hat, Inc. All rights reserved.
5  * Copyright (C) 2015-2021, Ondrej Kozina
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version 2
10  * of the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21
22 #include "luks2_internal.h"
23 #include "utils_device_locking.h"
24
25 struct reenc_protection {
26         enum { REENC_PROTECTION_NONE = 0, /* none should be 0 always */
27                REENC_PROTECTION_CHECKSUM,
28                REENC_PROTECTION_JOURNAL,
29                REENC_PROTECTION_DATASHIFT } type;
30
31         union {
32         struct {
33         } none;
34         struct {
35                 char hash[LUKS2_CHECKSUM_ALG_L]; // or include luks.h
36                 struct crypt_hash *ch;
37                 size_t hash_size;
38                 /* buffer for checksums */
39                 void *checksums;
40                 size_t checksums_len;
41         } csum;
42         struct {
43         } ds;
44         } p;
45 };
46
47 struct luks2_reencrypt {
48         /* reencryption window attributes */
49         uint64_t offset;
50         uint64_t progress;
51         uint64_t length;
52         uint64_t data_shift;
53         size_t alignment;
54         uint64_t device_size;
55         bool online;
56         bool fixed_length;
57         crypt_reencrypt_direction_info direction;
58         crypt_reencrypt_mode_info mode;
59
60         char *device_name;
61         char *hotzone_name;
62         char *overlay_name;
63         uint32_t flags;
64
65         /* reencryption window persistence attributes */
66         struct reenc_protection rp;
67
68         int reenc_keyslot;
69
70         /* already running reencryption */
71         json_object *jobj_segs_hot;
72         struct json_object *jobj_segs_post;
73
74         /* backup segments */
75         json_object *jobj_segment_new;
76         int digest_new;
77         json_object *jobj_segment_old;
78         int digest_old;
79         json_object *jobj_segment_moved;
80
81         struct volume_key *vks;
82
83         void *reenc_buffer;
84         ssize_t read;
85
86         struct crypt_storage_wrapper *cw1;
87         struct crypt_storage_wrapper *cw2;
88
89         uint32_t wflags1;
90         uint32_t wflags2;
91
92         struct crypt_lock_handle *reenc_lock;
93 };
94 #if USE_LUKS2_REENCRYPTION
95 static int reencrypt_keyslot_update(struct crypt_device *cd,
96         const struct luks2_reencrypt *rh)
97 {
98         int r;
99         json_object *jobj_keyslot, *jobj_area, *jobj_area_type;
100         struct luks2_hdr *hdr;
101
102         if (!(hdr = crypt_get_hdr(cd, CRYPT_LUKS2)))
103                 return -EINVAL;
104
105         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, rh->reenc_keyslot);
106         if (!jobj_keyslot)
107                 return -EINVAL;
108
109         json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
110         json_object_object_get_ex(jobj_area, "type", &jobj_area_type);
111
112         if (rh->rp.type == REENC_PROTECTION_CHECKSUM) {
113                 log_dbg(cd, "Updating reencrypt keyslot for checksum protection.");
114                 json_object_object_add(jobj_area, "type", json_object_new_string("checksum"));
115                 json_object_object_add(jobj_area, "hash", json_object_new_string(rh->rp.p.csum.hash));
116                 json_object_object_add(jobj_area, "sector_size", json_object_new_int64(rh->alignment));
117         } else if (rh->rp.type == REENC_PROTECTION_NONE) {
118                 log_dbg(cd, "Updating reencrypt keyslot for none protection.");
119                 json_object_object_add(jobj_area, "type", json_object_new_string("none"));
120                 json_object_object_del(jobj_area, "hash");
121         } else if (rh->rp.type == REENC_PROTECTION_JOURNAL) {
122                 log_dbg(cd, "Updating reencrypt keyslot for journal protection.");
123                 json_object_object_add(jobj_area, "type", json_object_new_string("journal"));
124                 json_object_object_del(jobj_area, "hash");
125         } else
126                 log_dbg(cd, "No update of reencrypt keyslot needed.");
127
128         r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, rh->vks);
129         if (r < 0)
130                 log_err(cd, "Failed to refresh reencryption verification digest.");
131
132         return r;
133 }
134
135 static json_object *reencrypt_segment(struct luks2_hdr *hdr, unsigned new)
136 {
137         return LUKS2_get_segment_by_flag(hdr, new ? "backup-final" : "backup-previous");
138 }
139
140 static json_object *reencrypt_segment_new(struct luks2_hdr *hdr)
141 {
142         return reencrypt_segment(hdr, 1);
143 }
144
145 static json_object *reencrypt_segment_old(struct luks2_hdr *hdr)
146 {
147         return reencrypt_segment(hdr, 0);
148 }
149
150 static const char *reencrypt_segment_cipher_new(struct luks2_hdr *hdr)
151 {
152         return json_segment_get_cipher(reencrypt_segment(hdr, 1));
153 }
154
155 static const char *reencrypt_segment_cipher_old(struct luks2_hdr *hdr)
156 {
157         return json_segment_get_cipher(reencrypt_segment(hdr, 0));
158 }
159
160 static int reencrypt_get_sector_size_new(struct luks2_hdr *hdr)
161 {
162         return json_segment_get_sector_size(reencrypt_segment(hdr, 1));
163 }
164
165 static int reencrypt_get_sector_size_old(struct luks2_hdr *hdr)
166 {
167         return json_segment_get_sector_size(reencrypt_segment(hdr, 0));
168 }
169
170 static uint64_t reencrypt_data_offset(struct luks2_hdr *hdr, unsigned new)
171 {
172         json_object *jobj = reencrypt_segment(hdr, new);
173         if (jobj)
174                 return json_segment_get_offset(jobj, 0);
175
176         return LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
177 }
178
179 static uint64_t LUKS2_reencrypt_get_data_offset_moved(struct luks2_hdr *hdr)
180 {
181         json_object *jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-moved-segment");
182
183         if (!jobj_segment)
184                 return 0;
185
186         return json_segment_get_offset(jobj_segment, 0);
187 }
188
189 static uint64_t reencrypt_get_data_offset_new(struct luks2_hdr *hdr)
190 {
191         return reencrypt_data_offset(hdr, 1);
192 }
193
194 static uint64_t reencrypt_get_data_offset_old(struct luks2_hdr *hdr)
195 {
196         return reencrypt_data_offset(hdr, 0);
197 }
198 #endif
199 static int reencrypt_digest(struct luks2_hdr *hdr, unsigned new)
200 {
201         int segment = LUKS2_get_segment_id_by_flag(hdr, new ? "backup-final" : "backup-previous");
202
203         if (segment < 0)
204                 return segment;
205
206         return LUKS2_digest_by_segment(hdr, segment);
207 }
208
209 int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr)
210 {
211         return reencrypt_digest(hdr, 1);
212 }
213
214 int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr)
215 {
216         return reencrypt_digest(hdr, 0);
217 }
218
219 /* none, checksums, journal or shift */
220 static const char *reencrypt_resilience_type(struct luks2_hdr *hdr)
221 {
222         json_object *jobj_keyslot, *jobj_area, *jobj_type;
223         int ks = LUKS2_find_keyslot(hdr, "reencrypt");
224
225         if (ks < 0)
226                 return NULL;
227
228         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
229
230         json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
231         if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
232                 return NULL;
233
234         return json_object_get_string(jobj_type);
235 }
236
237 static const char *reencrypt_resilience_hash(struct luks2_hdr *hdr)
238 {
239         json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash;
240         int ks = LUKS2_find_keyslot(hdr, "reencrypt");
241
242         if (ks < 0)
243                 return NULL;
244
245         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
246
247         json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
248         if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
249                 return NULL;
250         if (strcmp(json_object_get_string(jobj_type), "checksum"))
251                 return NULL;
252         if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
253                 return NULL;
254
255         return json_object_get_string(jobj_hash);
256 }
257 #if USE_LUKS2_REENCRYPTION
258 static uint32_t reencrypt_alignment(struct luks2_hdr *hdr)
259 {
260         json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash, *jobj_sector_size;
261         int ks = LUKS2_find_keyslot(hdr, "reencrypt");
262
263         if (ks < 0)
264                 return 0;
265
266         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
267
268         json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
269         if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
270                 return 0;
271         if (strcmp(json_object_get_string(jobj_type), "checksum"))
272                 return 0;
273         if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
274                 return 0;
275         if (!json_object_object_get_ex(jobj_area, "sector_size", &jobj_sector_size))
276                 return 0;
277
278         return crypt_jobj_get_uint32(jobj_sector_size);
279 }
280
281 static json_object *_enc_create_segments_shift_after(struct crypt_device *cd,
282         struct luks2_hdr *hdr,
283         struct luks2_reencrypt *rh,
284         uint64_t data_offset)
285 {
286         int reenc_seg, i = 0;
287         json_object *jobj_copy, *jobj_seg_new = NULL, *jobj_segs_post = json_object_new_object();
288         uint64_t tmp;
289
290         if (!rh->jobj_segs_hot || !jobj_segs_post)
291                 goto err;
292
293         if (json_segments_count(rh->jobj_segs_hot) == 0)
294                 return jobj_segs_post;
295
296         reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
297         if (reenc_seg < 0)
298                 goto err;
299
300         while (i < reenc_seg) {
301                 jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, i);
302                 if (!jobj_copy)
303                         goto err;
304                 json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
305         }
306
307         if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1), &jobj_seg_new)) {
308                 if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg), &jobj_seg_new))
309                         goto err;
310                 json_segment_remove_flag(jobj_seg_new, "in-reencryption");
311                 tmp = rh->length;
312         } else {
313                 json_object_object_add(jobj_seg_new, "offset", crypt_jobj_new_uint64(rh->offset + data_offset));
314                 json_object_object_add(jobj_seg_new, "iv_tweak", crypt_jobj_new_uint64(rh->offset >> SECTOR_SHIFT));
315                 tmp = json_segment_get_size(jobj_seg_new, 0) + rh->length;
316         }
317
318         /* alter size of new segment, reenc_seg == 0 we're finished */
319         json_object_object_add(jobj_seg_new, "size", reenc_seg > 0 ? crypt_jobj_new_uint64(tmp) : json_object_new_string("dynamic"));
320         json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_seg_new);
321
322         return jobj_segs_post;
323 err:
324         json_object_put(jobj_segs_post);
325         return NULL;
326 }
327
328 static json_object *reencrypt_make_hot_segments_encrypt_shift(struct crypt_device *cd,
329         struct luks2_hdr *hdr,
330         struct luks2_reencrypt *rh,
331         uint64_t data_offset)
332 {
333         int sg, crypt_seg, i = 0;
334         uint64_t segment_size;
335         json_object *jobj_seg_shrunk, *jobj_seg_new, *jobj_copy, *jobj_enc_seg = NULL,
336                      *jobj_segs_hot = json_object_new_object();
337
338         if (!jobj_segs_hot)
339                 return NULL;
340
341         crypt_seg = LUKS2_segment_by_type(hdr, "crypt");
342
343         /* FIXME: This is hack. Find proper way to fix it. */
344         sg = LUKS2_last_segment_by_type(hdr, "linear");
345         if (rh->offset && sg < 0)
346                 goto err;
347         if (sg < 0)
348                 return jobj_segs_hot;
349
350         jobj_enc_seg = json_segment_create_crypt(data_offset + rh->offset,
351                                                       rh->offset >> SECTOR_SHIFT,
352                                                       &rh->length,
353                                                       reencrypt_segment_cipher_new(hdr),
354                                                       reencrypt_get_sector_size_new(hdr),
355                                                       1);
356
357         while (i < sg) {
358                 jobj_copy = LUKS2_get_segment_jobj(hdr, i);
359                 if (!jobj_copy)
360                         goto err;
361                 json_object_object_add_by_uint(jobj_segs_hot, i++, json_object_get(jobj_copy));
362         }
363
364         segment_size = LUKS2_segment_size(hdr, sg, 0);
365         if (segment_size > rh->length) {
366                 jobj_seg_shrunk = NULL;
367                 if (json_object_copy(LUKS2_get_segment_jobj(hdr, sg), &jobj_seg_shrunk))
368                         goto err;
369                 json_object_object_add(jobj_seg_shrunk, "size", crypt_jobj_new_uint64(segment_size - rh->length));
370                 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_seg_shrunk);
371         }
372
373         json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_enc_seg);
374         jobj_enc_seg = NULL; /* see err: label */
375
376         /* first crypt segment after encryption ? */
377         if (crypt_seg >= 0) {
378                 jobj_seg_new = LUKS2_get_segment_jobj(hdr, crypt_seg);
379                 if (!jobj_seg_new)
380                         goto err;
381                 json_object_object_add_by_uint(jobj_segs_hot, sg, json_object_get(jobj_seg_new));
382         }
383
384         return jobj_segs_hot;
385 err:
386         json_object_put(jobj_enc_seg);
387         json_object_put(jobj_segs_hot);
388
389         return NULL;
390 }
391
392 static json_object *reencrypt_make_segment_new(struct crypt_device *cd,
393                 struct luks2_hdr *hdr,
394                 const struct luks2_reencrypt *rh,
395                 uint64_t data_offset,
396                 uint64_t segment_offset,
397                 uint64_t iv_offset,
398                 const uint64_t *segment_length)
399 {
400         switch (rh->mode) {
401         case CRYPT_REENCRYPT_REENCRYPT:
402         case CRYPT_REENCRYPT_ENCRYPT:
403                 return json_segment_create_crypt(data_offset + segment_offset,
404                                                   crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
405                                                   segment_length,
406                                                   reencrypt_segment_cipher_new(hdr),
407                                                   reencrypt_get_sector_size_new(hdr), 0);
408         case CRYPT_REENCRYPT_DECRYPT:
409                 return json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
410         }
411
412         return NULL;
413 }
414
415 static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd,
416         struct luks2_hdr *hdr,
417         struct luks2_reencrypt *rh,
418         uint64_t data_offset)
419 {
420         int reenc_seg;
421         json_object *jobj_new_seg_after, *jobj_old_seg, *jobj_old_seg_copy = NULL,
422                     *jobj_segs_post = json_object_new_object();
423         uint64_t fixed_length = rh->offset + rh->length;
424
425         if (!rh->jobj_segs_hot || !jobj_segs_post)
426                 goto err;
427
428         reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
429         if (reenc_seg < 0)
430                 return NULL;
431
432         jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
433
434         /*
435          * if there's no old segment after reencryption, we're done.
436          * Set size to 'dynamic' again.
437          */
438         jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &fixed_length : NULL);
439         if (!jobj_new_seg_after)
440                 goto err;
441         json_object_object_add_by_uint(jobj_segs_post, 0, jobj_new_seg_after);
442
443         if (jobj_old_seg) {
444                 if (rh->fixed_length) {
445                         if (json_object_copy(jobj_old_seg, &jobj_old_seg_copy))
446                                 goto err;
447                         jobj_old_seg = jobj_old_seg_copy;
448                         fixed_length = rh->device_size - fixed_length;
449                         json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(fixed_length));
450                 } else
451                         json_object_get(jobj_old_seg);
452                 json_object_object_add_by_uint(jobj_segs_post, 1, jobj_old_seg);
453         }
454
455         return jobj_segs_post;
456 err:
457         json_object_put(jobj_segs_post);
458         return NULL;
459 }
460
461 static json_object *reencrypt_make_post_segments_backward(struct crypt_device *cd,
462         struct luks2_hdr *hdr,
463         struct luks2_reencrypt *rh,
464         uint64_t data_offset)
465 {
466         int reenc_seg;
467         uint64_t fixed_length;
468
469         json_object *jobj_new_seg_after, *jobj_old_seg,
470                     *jobj_segs_post = json_object_new_object();
471
472         if (!rh->jobj_segs_hot || !jobj_segs_post)
473                 goto err;
474
475         reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
476         if (reenc_seg < 0)
477                 return NULL;
478
479         jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg - 1);
480         if (jobj_old_seg)
481                 json_object_object_add_by_uint(jobj_segs_post, reenc_seg - 1, json_object_get(jobj_old_seg));
482         if (rh->fixed_length && rh->offset) {
483                 fixed_length = rh->device_size - rh->offset;
484                 jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, &fixed_length);
485         } else
486                 jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL);
487         if (!jobj_new_seg_after)
488                 goto err;
489         json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_new_seg_after);
490
491         return jobj_segs_post;
492 err:
493         json_object_put(jobj_segs_post);
494         return NULL;
495 }
496
497 static json_object *reencrypt_make_segment_reencrypt(struct crypt_device *cd,
498                 struct luks2_hdr *hdr,
499                 const struct luks2_reencrypt *rh,
500                 uint64_t data_offset,
501                 uint64_t segment_offset,
502                 uint64_t iv_offset,
503                 const uint64_t *segment_length)
504 {
505         switch (rh->mode) {
506         case CRYPT_REENCRYPT_REENCRYPT:
507         case CRYPT_REENCRYPT_ENCRYPT:
508                 return json_segment_create_crypt(data_offset + segment_offset,
509                                 crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
510                                 segment_length,
511                                 reencrypt_segment_cipher_new(hdr),
512                                 reencrypt_get_sector_size_new(hdr), 1);
513         case CRYPT_REENCRYPT_DECRYPT:
514                 return json_segment_create_linear(data_offset + segment_offset, segment_length, 1);
515         }
516
517         return NULL;
518 }
519
520 static json_object *reencrypt_make_segment_old(struct crypt_device *cd,
521                 struct luks2_hdr *hdr,
522                 const struct luks2_reencrypt *rh,
523                 uint64_t data_offset,
524                 uint64_t segment_offset,
525                 const uint64_t *segment_length)
526 {
527         json_object *jobj_old_seg = NULL;
528
529         switch (rh->mode) {
530         case CRYPT_REENCRYPT_REENCRYPT:
531         case CRYPT_REENCRYPT_DECRYPT:
532                 jobj_old_seg = json_segment_create_crypt(data_offset + segment_offset,
533                                                     crypt_get_iv_offset(cd) + (segment_offset >> SECTOR_SHIFT),
534                                                     segment_length,
535                                                     reencrypt_segment_cipher_old(hdr),
536                                                     reencrypt_get_sector_size_old(hdr),
537                                                     0);
538                 break;
539         case CRYPT_REENCRYPT_ENCRYPT:
540                 jobj_old_seg = json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
541         }
542
543         return jobj_old_seg;
544 }
545
546 static json_object *reencrypt_make_hot_segments_forward(struct crypt_device *cd,
547                 struct luks2_hdr *hdr,
548                 struct luks2_reencrypt *rh,
549                 uint64_t device_size,
550                 uint64_t data_offset)
551 {
552         json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
553         uint64_t fixed_length, tmp = rh->offset + rh->length;
554         unsigned int sg = 0;
555
556         jobj_segs_hot = json_object_new_object();
557         if (!jobj_segs_hot)
558                 return NULL;
559
560         if (rh->offset) {
561                 jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, &rh->offset);
562                 if (!jobj_new_seg)
563                         goto err;
564                 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
565         }
566
567         jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
568         if (!jobj_reenc_seg)
569                 goto err;
570
571         json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
572
573         if (tmp < device_size) {
574                 fixed_length = device_size - tmp;
575                 jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh, data_offset + rh->data_shift, rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
576                 if (!jobj_old_seg)
577                         goto err;
578                 json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
579         }
580
581         return jobj_segs_hot;
582 err:
583         json_object_put(jobj_segs_hot);
584         return NULL;
585 }
586
587 static json_object *reencrypt_make_hot_segments_backward(struct crypt_device *cd,
588                 struct luks2_hdr *hdr,
589                 struct luks2_reencrypt *rh,
590                 uint64_t device_size,
591                 uint64_t data_offset)
592 {
593         json_object *jobj_reenc_seg, *jobj_new_seg, *jobj_old_seg = NULL,
594                     *jobj_segs_hot = json_object_new_object();
595         int sg = 0;
596         uint64_t fixed_length, tmp = rh->offset + rh->length;
597
598         if (!jobj_segs_hot)
599                 return NULL;
600
601         if (rh->offset) {
602                 if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_old_seg))
603                         goto err;
604                 json_object_object_add(jobj_old_seg, "size", crypt_jobj_new_uint64(rh->offset));
605
606                 json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_old_seg);
607         }
608
609         jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
610         if (!jobj_reenc_seg)
611                 goto err;
612
613         json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
614
615         if (tmp < device_size) {
616                 fixed_length = device_size - tmp;
617                 jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length, rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
618                 if (!jobj_new_seg)
619                         goto err;
620                 json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_new_seg);
621         }
622
623         return jobj_segs_hot;
624 err:
625         json_object_put(jobj_segs_hot);
626         return NULL;
627 }
628
629 static int reencrypt_make_hot_segments(struct crypt_device *cd,
630                 struct luks2_hdr *hdr,
631                 struct luks2_reencrypt *rh,
632                 uint64_t device_size,
633                 uint64_t data_offset)
634 {
635         rh->jobj_segs_hot = NULL;
636
637         if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
638             rh->data_shift && rh->jobj_segment_moved) {
639                 log_dbg(cd, "Calculating hot segments for encryption with data move.");
640                 rh->jobj_segs_hot = reencrypt_make_hot_segments_encrypt_shift(cd, hdr, rh, data_offset);
641         } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
642                 log_dbg(cd, "Calculating hot segments (forward direction).");
643                 rh->jobj_segs_hot = reencrypt_make_hot_segments_forward(cd, hdr, rh, device_size, data_offset);
644         } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
645                 log_dbg(cd, "Calculating hot segments (backward direction).");
646                 rh->jobj_segs_hot = reencrypt_make_hot_segments_backward(cd, hdr, rh, device_size, data_offset);
647         }
648
649         return rh->jobj_segs_hot ? 0 : -EINVAL;
650 }
651
652 static int reencrypt_make_post_segments(struct crypt_device *cd,
653                 struct luks2_hdr *hdr,
654                 struct luks2_reencrypt *rh,
655                 uint64_t data_offset)
656 {
657         rh->jobj_segs_post = NULL;
658
659         if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
660             rh->data_shift && rh->jobj_segment_moved) {
661                 log_dbg(cd, "Calculating post segments for encryption with data move.");
662                 rh->jobj_segs_post = _enc_create_segments_shift_after(cd, hdr, rh, data_offset);
663         } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
664                 log_dbg(cd, "Calculating post segments (forward direction).");
665                 rh->jobj_segs_post = reencrypt_make_post_segments_forward(cd, hdr, rh, data_offset);
666         } else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
667                 log_dbg(cd, "Calculating segments (backward direction).");
668                 rh->jobj_segs_post = reencrypt_make_post_segments_backward(cd, hdr, rh, data_offset);
669         }
670
671         return rh->jobj_segs_post ? 0 : -EINVAL;
672 }
673 #endif
674 static uint64_t reencrypt_data_shift(struct luks2_hdr *hdr)
675 {
676         json_object *jobj_keyslot, *jobj_area, *jobj_data_shift;
677         int ks = LUKS2_find_keyslot(hdr, "reencrypt");
678
679         if (ks < 0)
680                 return 0;
681
682         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
683
684         json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
685         if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj_data_shift))
686                 return 0;
687
688         return crypt_jobj_get_uint64(jobj_data_shift);
689 }
690
691 static crypt_reencrypt_mode_info reencrypt_mode(struct luks2_hdr *hdr)
692 {
693         const char *mode;
694         crypt_reencrypt_mode_info mi = CRYPT_REENCRYPT_REENCRYPT;
695         json_object *jobj_keyslot, *jobj_mode;
696
697         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
698         if (!jobj_keyslot)
699                 return mi;
700
701         json_object_object_get_ex(jobj_keyslot, "mode", &jobj_mode);
702         mode = json_object_get_string(jobj_mode);
703
704         /* validation enforces allowed values */
705         if (!strcmp(mode, "encrypt"))
706                 mi = CRYPT_REENCRYPT_ENCRYPT;
707         else if (!strcmp(mode, "decrypt"))
708                 mi = CRYPT_REENCRYPT_DECRYPT;
709
710         return mi;
711 }
712
713 static crypt_reencrypt_direction_info reencrypt_direction(struct luks2_hdr *hdr)
714 {
715         const char *value;
716         json_object *jobj_keyslot, *jobj_mode;
717         crypt_reencrypt_direction_info di = CRYPT_REENCRYPT_FORWARD;
718
719         jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
720         if (!jobj_keyslot)
721                 return di;
722
723         json_object_object_get_ex(jobj_keyslot, "direction", &jobj_mode);
724         value = json_object_get_string(jobj_mode);
725
726         /* validation enforces allowed values */
727         if (strcmp(value, "forward"))
728                 di = CRYPT_REENCRYPT_BACKWARD;
729
730         return di;
731 }
732
733 typedef enum { REENC_OK = 0, REENC_ERR, REENC_ROLLBACK, REENC_FATAL } reenc_status_t;
734
735 void LUKS2_reencrypt_free(struct crypt_device *cd, struct luks2_reencrypt *rh)
736 {
737         if (!rh)
738                 return;
739
740         if (rh->rp.type == REENC_PROTECTION_CHECKSUM) {
741                 if (rh->rp.p.csum.ch) {
742                         crypt_hash_destroy(rh->rp.p.csum.ch);
743                         rh->rp.p.csum.ch = NULL;
744                 }
745                 if (rh->rp.p.csum.checksums) {
746                         memset(rh->rp.p.csum.checksums, 0, rh->rp.p.csum.checksums_len);
747                         free(rh->rp.p.csum.checksums);
748                         rh->rp.p.csum.checksums = NULL;
749                 }
750         }
751
752         json_object_put(rh->jobj_segs_hot);
753         rh->jobj_segs_hot = NULL;
754         json_object_put(rh->jobj_segs_post);
755         rh->jobj_segs_post = NULL;
756         json_object_put(rh->jobj_segment_old);
757         rh->jobj_segment_old = NULL;
758         json_object_put(rh->jobj_segment_new);
759         rh->jobj_segment_new = NULL;
760         json_object_put(rh->jobj_segment_moved);
761         rh->jobj_segment_moved = NULL;
762
763         free(rh->reenc_buffer);
764         rh->reenc_buffer = NULL;
765         crypt_storage_wrapper_destroy(rh->cw1);
766         rh->cw1 = NULL;
767         crypt_storage_wrapper_destroy(rh->cw2);
768         rh->cw2 = NULL;
769
770         free(rh->device_name);
771         free(rh->overlay_name);
772         free(rh->hotzone_name);
773         crypt_drop_keyring_key(cd, rh->vks);
774         crypt_free_volume_key(rh->vks);
775         device_release_excl(cd, crypt_data_device(cd));
776         crypt_unlock_internal(cd, rh->reenc_lock);
777         free(rh);
778 }
779 #if USE_LUKS2_REENCRYPTION
780 static size_t reencrypt_get_alignment(struct crypt_device *cd,
781                 struct luks2_hdr *hdr)
782 {
783         int ss;
784         size_t alignment = device_block_size(cd, crypt_data_device(cd));
785
786         ss = reencrypt_get_sector_size_old(hdr);
787         if (ss > 0 && (size_t)ss > alignment)
788                 alignment = ss;
789         ss = reencrypt_get_sector_size_new(hdr);
790         if (ss > 0 && (size_t)ss > alignment)
791                 alignment = (size_t)ss;
792
793         return alignment;
794 }
795
796 /* returns void because it must not fail on valid LUKS2 header */
797 static void _load_backup_segments(struct luks2_hdr *hdr,
798                 struct luks2_reencrypt *rh)
799 {
800         int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
801
802         if (segment >= 0) {
803                 rh->jobj_segment_new = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
804                 rh->digest_new = LUKS2_digest_by_segment(hdr, segment);
805         } else {
806                 rh->jobj_segment_new = NULL;
807                 rh->digest_new = -ENOENT;
808         }
809
810         segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
811         if (segment >= 0) {
812                 rh->jobj_segment_old = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
813                 rh->digest_old = LUKS2_digest_by_segment(hdr, segment);
814         } else {
815                 rh->jobj_segment_old = NULL;
816                 rh->digest_old = -ENOENT;
817         }
818
819         segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
820         if (segment >= 0)
821                 rh->jobj_segment_moved = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
822         else
823                 rh->jobj_segment_moved = NULL;
824 }
825
826 static int reencrypt_offset_backward_moved(struct luks2_hdr *hdr, json_object *jobj_segments, uint64_t *reencrypt_length, uint64_t data_shift, uint64_t *offset)
827 {
828         uint64_t tmp, linear_length = 0;
829         int sg, segs = json_segments_count(jobj_segments);
830
831         /* find reencrypt offset with data shift */
832         for (sg = 0; sg < segs; sg++)
833                 if (LUKS2_segment_is_type(hdr, sg, "linear"))
834                         linear_length += LUKS2_segment_size(hdr, sg, 0);
835
836         /* all active linear segments length */
837         if (linear_length) {
838                 if (linear_length < data_shift)
839                         return -EINVAL;
840                 tmp = linear_length - data_shift;
841                 if (tmp && tmp < data_shift) {
842                         *offset = data_shift;
843                         *reencrypt_length = tmp;
844                 } else
845                         *offset = tmp;
846                 return 0;
847         }
848
849         if (segs == 1) {
850                 *offset = 0;
851                 return 0;
852         }
853
854         /* should be unreachable */
855
856         return -EINVAL;
857 }
858
859 static int _offset_forward(struct luks2_hdr *hdr, json_object *jobj_segments, uint64_t *offset)
860 {
861         int segs = json_segments_count(jobj_segments);
862
863         if (segs == 1)
864                 *offset = 0;
865         else if (segs == 2) {
866                 *offset = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
867                 if (!*offset)
868                         return -EINVAL;
869         } else
870                 return -EINVAL;
871
872         return 0;
873 }
874
875 static int _offset_backward(struct luks2_hdr *hdr, json_object *jobj_segments, uint64_t device_size, uint64_t *length, uint64_t *offset)
876 {
877         int segs = json_segments_count(jobj_segments);
878         uint64_t tmp;
879
880         if (segs == 1) {
881                 if (device_size < *length)
882                         *length = device_size;
883                 *offset = device_size - *length;
884         } else if (segs == 2) {
885                 tmp = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
886                 if (tmp < *length)
887                         *length = tmp;
888                 *offset =  tmp - *length;
889         } else
890                 return -EINVAL;
891
892         return 0;
893 }
894
895 /* must be always relative to data offset */
896 /* the LUKS2 header MUST be valid */
897 static int reencrypt_offset(struct luks2_hdr *hdr,
898                 crypt_reencrypt_direction_info di,
899                 uint64_t device_size,
900                 uint64_t *reencrypt_length,
901                 uint64_t *offset)
902 {
903         int sg;
904         json_object *jobj_segments;
905         uint64_t data_shift = reencrypt_data_shift(hdr);
906
907         if (!offset)
908                 return -EINVAL;
909
910         /* if there's segment in reencryption return directly offset of it */
911         json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments);
912         sg = json_segments_segment_in_reencrypt(jobj_segments);
913         if (sg >= 0) {
914                 *offset = LUKS2_segment_offset(hdr, sg, 0) - (reencrypt_get_data_offset_new(hdr));
915                 return 0;
916         }
917
918         if (di == CRYPT_REENCRYPT_FORWARD)
919                 return _offset_forward(hdr, jobj_segments, offset);
920         else if (di == CRYPT_REENCRYPT_BACKWARD) {
921                 if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_ENCRYPT &&
922                     LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
923                         return reencrypt_offset_backward_moved(hdr, jobj_segments, reencrypt_length, data_shift, offset);
924                 return _offset_backward(hdr, jobj_segments, device_size, reencrypt_length, offset);
925         }
926
927         return -EINVAL;
928 }
929
930 static uint64_t reencrypt_length(struct crypt_device *cd,
931                 struct luks2_hdr *hdr,
932                 struct luks2_reencrypt *rh,
933                 uint64_t keyslot_area_length,
934                 uint64_t length_max)
935 {
936         unsigned long dummy, optimal_alignment;
937         uint64_t length, soft_mem_limit;
938
939         if (rh->rp.type == REENC_PROTECTION_NONE)
940                 length = length_max ?: LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
941         else if (rh->rp.type == REENC_PROTECTION_CHECKSUM)
942                 length = (keyslot_area_length / rh->rp.p.csum.hash_size) * rh->alignment;
943         else if (rh->rp.type == REENC_PROTECTION_DATASHIFT)
944                 return reencrypt_data_shift(hdr);
945         else
946                 length = keyslot_area_length;
947
948         /* hard limit */
949         if (length > LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH)
950                 length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
951
952         /* soft limit is 1/4 of system memory */
953         soft_mem_limit = crypt_getphysmemory_kb() << 8; /* multiply by (1024/4) */
954
955         if (soft_mem_limit && length > soft_mem_limit)
956                 length = soft_mem_limit;
957
958         if (length_max && length > length_max)
959                 length = length_max;
960
961         length -= (length % rh->alignment);
962
963         /* Emits error later */
964         if (!length)
965                 return length;
966
967         device_topology_alignment(cd, crypt_data_device(cd), &optimal_alignment, &dummy, length);
968
969         /* we have to stick with encryption sector size alignment */
970         if (optimal_alignment % rh->alignment)
971                 return length;
972
973         /* align to opt-io size only if remaining size allows it */
974         if (length > optimal_alignment)
975                 length -= (length % optimal_alignment);
976
977         return length;
978 }
979
980 static int reencrypt_context_init(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh, uint64_t device_size, const struct crypt_params_reencrypt *params)
981 {
982         int r;
983         uint64_t dummy, area_length;
984
985         rh->reenc_keyslot = LUKS2_find_keyslot(hdr, "reencrypt");
986         if (rh->reenc_keyslot < 0)
987                 return -EINVAL;
988         if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &dummy, &area_length) < 0)
989                 return -EINVAL;
990
991         rh->mode = reencrypt_mode(hdr);
992
993         rh->alignment = reencrypt_get_alignment(cd, hdr);
994         if (!rh->alignment)
995                 return -EINVAL;
996
997         log_dbg(cd, "Hotzone size: %" PRIu64 ", device size: %" PRIu64 ", alignment: %zu.",
998                 params->max_hotzone_size << SECTOR_SHIFT,
999                 params->device_size << SECTOR_SHIFT, rh->alignment);
1000
1001         if ((params->max_hotzone_size << SECTOR_SHIFT) % rh->alignment) {
1002                 log_err(cd, _("Hotzone size must be multiple of calculated zone alignment (%zu bytes)."), rh->alignment);
1003                 return -EINVAL;
1004         }
1005
1006         if ((params->device_size << SECTOR_SHIFT) % rh->alignment) {
1007                 log_err(cd, _("Device size must be multiple of calculated zone alignment (%zu bytes)."), rh->alignment);
1008                 return -EINVAL;
1009         }
1010
1011         rh->direction = reencrypt_direction(hdr);
1012
1013         if (!strcmp(params->resilience, "datashift")) {
1014                 log_dbg(cd, "Initializing reencryption context with data_shift resilience.");
1015                 rh->rp.type = REENC_PROTECTION_DATASHIFT;
1016                 rh->data_shift = reencrypt_data_shift(hdr);
1017         } else if (!strcmp(params->resilience, "journal")) {
1018                 log_dbg(cd, "Initializing reencryption context with journal resilience.");
1019                 rh->rp.type = REENC_PROTECTION_JOURNAL;
1020         } else if (!strcmp(params->resilience, "checksum")) {
1021                 log_dbg(cd, "Initializing reencryption context with checksum resilience.");
1022                 rh->rp.type = REENC_PROTECTION_CHECKSUM;
1023
1024                 r = snprintf(rh->rp.p.csum.hash,
1025                         sizeof(rh->rp.p.csum.hash), "%s", params->hash);
1026                 if (r < 0 || (size_t)r >= sizeof(rh->rp.p.csum.hash)) {
1027                         log_dbg(cd, "Invalid hash parameter");
1028                         return -EINVAL;
1029                 }
1030
1031                 if (crypt_hash_init(&rh->rp.p.csum.ch, params->hash)) {
1032                         log_dbg(cd, "Failed to initialize checksum resilience hash %s", params->hash);
1033                         return -EINVAL;
1034                 }
1035
1036                 r = crypt_hash_size(params->hash);
1037                 if (r < 1) {
1038                         log_dbg(cd, "Invalid hash size");
1039                         return -EINVAL;
1040                 }
1041                 rh->rp.p.csum.hash_size = r;
1042
1043                 rh->rp.p.csum.checksums_len = area_length;
1044                 if (posix_memalign(&rh->rp.p.csum.checksums, device_alignment(crypt_metadata_device(cd)),
1045                                    rh->rp.p.csum.checksums_len))
1046                         return -ENOMEM;
1047         } else if (!strcmp(params->resilience, "none")) {
1048                 log_dbg(cd, "Initializing reencryption context with none resilience.");
1049                 rh->rp.type = REENC_PROTECTION_NONE;
1050         } else {
1051                 log_err(cd, _("Unsupported resilience mode %s"), params->resilience);
1052                 return -EINVAL;
1053         }
1054
1055         if (params->device_size) {
1056                 log_dbg(cd, "Switching reencryption to fixed size mode.");
1057                 device_size = params->device_size << SECTOR_SHIFT;
1058                 rh->fixed_length = true;
1059         } else
1060                 rh->fixed_length = false;
1061
1062         rh->length = reencrypt_length(cd, hdr, rh, area_length, params->max_hotzone_size << SECTOR_SHIFT);
1063         if (!rh->length) {
1064                 log_dbg(cd, "Invalid reencryption length.");
1065                 return -EINVAL;
1066         }
1067
1068         if (reencrypt_offset(hdr, rh->direction, device_size, &rh->length, &rh->offset)) {
1069                 log_dbg(cd, "Failed to get reencryption offset.");
1070                 return -EINVAL;
1071         }
1072
1073         if (rh->offset > device_size)
1074                 return -EINVAL;
1075         if (rh->length > device_size - rh->offset)
1076                 rh->length = device_size - rh->offset;
1077
1078         log_dbg(cd, "reencrypt-direction: %s", rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward");
1079
1080         _load_backup_segments(hdr, rh);
1081
1082         if (rh->direction == CRYPT_REENCRYPT_BACKWARD)
1083                 rh->progress = device_size - rh->offset - rh->length;
1084         else
1085                 rh->progress = rh->offset;
1086
1087         log_dbg(cd, "backup-previous digest id: %d", rh->digest_old);
1088         log_dbg(cd, "backup-final digest id: %d", rh->digest_new);
1089         log_dbg(cd, "reencrypt length: %" PRIu64, rh->length);
1090         log_dbg(cd, "reencrypt offset: %" PRIu64, rh->offset);
1091         log_dbg(cd, "reencrypt shift: %s%" PRIu64, (rh->data_shift && rh->direction == CRYPT_REENCRYPT_BACKWARD ? "-" : ""), rh->data_shift);
1092         log_dbg(cd, "reencrypt alignment: %zu", rh->alignment);
1093         log_dbg(cd, "reencrypt progress: %" PRIu64, rh->progress);
1094
1095         rh->device_size = device_size;
1096
1097         return rh->length < 512 ? -EINVAL : 0;
1098 }
1099
1100 static size_t reencrypt_buffer_length(struct luks2_reencrypt *rh)
1101 {
1102         if (rh->data_shift)
1103                 return rh->data_shift;
1104         return rh->length;
1105 }
1106
1107 static int reencrypt_load_clean(struct crypt_device *cd,
1108         struct luks2_hdr *hdr,
1109         uint64_t device_size,
1110         struct luks2_reencrypt **rh,
1111         const struct crypt_params_reencrypt *params)
1112 {
1113         int r;
1114         const struct crypt_params_reencrypt hdr_reenc_params = {
1115                 .resilience = reencrypt_resilience_type(hdr),
1116                 .hash = reencrypt_resilience_hash(hdr),
1117                 .device_size = params ? params->device_size : 0
1118         };
1119         struct luks2_reencrypt *tmp = crypt_zalloc(sizeof (*tmp));
1120
1121         if (!tmp)
1122                 return -ENOMEM;
1123
1124         r = -EINVAL;
1125         if (!hdr_reenc_params.resilience)
1126                 goto err;
1127
1128         /* skip context update if data shift is detected in header */
1129         if (!strcmp(hdr_reenc_params.resilience, "datashift"))
1130                 params = NULL;
1131
1132         log_dbg(cd, "Initializing reencryption context (%s).", params ? "update" : "load");
1133
1134         if (!params || !params->resilience)
1135                 params = &hdr_reenc_params;
1136
1137         r = reencrypt_context_init(cd, hdr, tmp, device_size, params);
1138         if (r)
1139                 goto err;
1140
1141         if (posix_memalign(&tmp->reenc_buffer, device_alignment(crypt_data_device(cd)),
1142                            reencrypt_buffer_length(tmp))) {
1143                 r = -ENOMEM;
1144                 goto err;
1145         }
1146
1147         *rh = tmp;
1148
1149         return 0;
1150 err:
1151         LUKS2_reencrypt_free(cd, tmp);
1152
1153         return r;
1154 }
1155
1156 static int reencrypt_make_segments(struct crypt_device *cd,
1157         struct luks2_hdr *hdr,
1158         struct luks2_reencrypt *rh,
1159         uint64_t device_size)
1160 {
1161         int r;
1162         uint64_t data_offset = reencrypt_get_data_offset_new(hdr);
1163
1164         log_dbg(cd, "Calculating segments.");
1165
1166         r = reencrypt_make_hot_segments(cd, hdr, rh, device_size, data_offset);
1167         if (!r) {
1168                 r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
1169                 if (r)
1170                         json_object_put(rh->jobj_segs_hot);
1171         }
1172
1173         if (r)
1174                 log_dbg(cd, "Failed to make reencryption segments.");
1175
1176         return r;
1177 }
1178
1179 static int reencrypt_make_segments_crashed(struct crypt_device *cd,
1180                                 struct luks2_hdr *hdr,
1181                                 struct luks2_reencrypt *rh)
1182 {
1183         int r;
1184         uint64_t data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
1185
1186         if (!rh)
1187                 return -EINVAL;
1188
1189         rh->jobj_segs_hot = json_object_new_object();
1190         if (!rh->jobj_segs_hot)
1191                 return -ENOMEM;
1192
1193         json_object_object_foreach(LUKS2_get_segments_jobj(hdr), key, val) {
1194                 if (json_segment_is_backup(val))
1195                         continue;
1196                 json_object_object_add(rh->jobj_segs_hot, key, json_object_get(val));
1197         }
1198
1199         r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
1200         if (r) {
1201                 json_object_put(rh->jobj_segs_hot);
1202                 rh->jobj_segs_hot = NULL;
1203         }
1204
1205         return r;
1206 }
1207
1208 static int reencrypt_load_crashed(struct crypt_device *cd,
1209         struct luks2_hdr *hdr, uint64_t device_size, struct luks2_reencrypt **rh)
1210 {
1211         bool dynamic;
1212         uint64_t minimal_size;
1213         int r, reenc_seg;
1214         struct crypt_params_reencrypt params = {};
1215
1216         if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic))
1217                 return -EINVAL;
1218
1219         if (!dynamic)
1220                 params.device_size = minimal_size >> SECTOR_SHIFT;
1221
1222         r = reencrypt_load_clean(cd, hdr, device_size, rh, &params);
1223
1224         if (!r) {
1225                 reenc_seg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
1226                 if (reenc_seg < 0)
1227                         r = -EINVAL;
1228                 else
1229                         (*rh)->length = LUKS2_segment_size(hdr, reenc_seg, 0);
1230         }
1231
1232         if (!r && ((*rh)->rp.type == REENC_PROTECTION_CHECKSUM)) {
1233                 /* we have to override calculated alignment with value stored in mda */
1234                 (*rh)->alignment = reencrypt_alignment(hdr);
1235                 if (!(*rh)->alignment) {
1236                         log_dbg(cd, "Failed to get read resilience sector_size from metadata.");
1237                         r = -EINVAL;
1238                 }
1239         }
1240
1241         if (!r)
1242                 r = reencrypt_make_segments_crashed(cd, hdr, *rh);
1243
1244         if (r) {
1245                 LUKS2_reencrypt_free(cd, *rh);
1246                 *rh = NULL;
1247         }
1248         return r;
1249 }
1250
1251 static int reencrypt_init_storage_wrappers(struct crypt_device *cd,
1252                 struct luks2_hdr *hdr,
1253                 struct luks2_reencrypt *rh,
1254                 struct volume_key *vks)
1255 {
1256         int r;
1257         struct volume_key *vk;
1258         uint32_t wrapper_flags = (getuid() || geteuid()) ? 0 : DISABLE_KCAPI;
1259
1260         vk = crypt_volume_key_by_id(vks, rh->digest_old);
1261         r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
1262                         reencrypt_get_data_offset_old(hdr),
1263                         crypt_get_iv_offset(cd),
1264                         reencrypt_get_sector_size_old(hdr),
1265                         reencrypt_segment_cipher_old(hdr),
1266                         vk, wrapper_flags | OPEN_READONLY);
1267         if (r) {
1268                 log_err(cd, _("Failed to initialize old segment storage wrapper."));
1269                 return r;
1270         }
1271         rh->wflags1 = wrapper_flags | OPEN_READONLY;
1272         log_dbg(cd, "Old cipher storage wrapper type: %d.", crypt_storage_wrapper_get_type(rh->cw1));
1273
1274         vk = crypt_volume_key_by_id(vks, rh->digest_new);
1275         r = crypt_storage_wrapper_init(cd, &rh->cw2, crypt_data_device(cd),
1276                         reencrypt_get_data_offset_new(hdr),
1277                         crypt_get_iv_offset(cd),
1278                         reencrypt_get_sector_size_new(hdr),
1279                         reencrypt_segment_cipher_new(hdr),
1280                         vk, wrapper_flags);
1281         if (r) {
1282                 log_err(cd, _("Failed to initialize new segment storage wrapper."));
1283                 return r;
1284         }
1285         rh->wflags2 = wrapper_flags;
1286         log_dbg(cd, "New cipher storage wrapper type: %d", crypt_storage_wrapper_get_type(rh->cw2));
1287
1288         return 0;
1289 }
1290
1291 static int reencrypt_context_set_names(struct luks2_reencrypt *rh, const char *name)
1292 {
1293         if (!rh | !name)
1294                 return -EINVAL;
1295
1296         if (*name == '/') {
1297                 if (!(rh->device_name = dm_device_name(name)))
1298                         return -EINVAL;
1299         } else if (!(rh->device_name = strdup(name)))
1300                 return -ENOMEM;
1301
1302         if (asprintf(&rh->hotzone_name, "%s-hotzone-%s", rh->device_name,
1303                      rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward") < 0) {
1304                 rh->hotzone_name = NULL;
1305                 return -ENOMEM;
1306         }
1307         if (asprintf(&rh->overlay_name, "%s-overlay", rh->device_name) < 0) {
1308                 rh->overlay_name = NULL;
1309                 return -ENOMEM;
1310         }
1311
1312         rh->online = true;
1313         return 0;
1314 }
1315
1316 static int modify_offset(uint64_t *offset, uint64_t data_shift, crypt_reencrypt_direction_info di)
1317 {
1318         int r = -EINVAL;
1319
1320         if (!offset)
1321                 return r;
1322
1323         if (di == CRYPT_REENCRYPT_FORWARD) {
1324                 if (*offset >= data_shift) {
1325                         *offset -= data_shift;
1326                         r = 0;
1327                 }
1328         } else if (di == CRYPT_REENCRYPT_BACKWARD) {
1329                 *offset += data_shift;
1330                 r = 0;
1331         }
1332
1333         return r;
1334 }
1335
1336 static int reencrypt_update_flag(struct crypt_device *cd, int enable, bool commit)
1337 {
1338         uint32_t reqs;
1339         struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
1340
1341         if (LUKS2_config_get_requirements(cd, hdr, &reqs))
1342                 return -EINVAL;
1343
1344         /* nothing to do */
1345         if (enable && (reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
1346                 return -EINVAL;
1347
1348         /* nothing to do */
1349         if (!enable && !(reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
1350                 return -EINVAL;
1351
1352         if (enable)
1353                 reqs |= CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
1354         else
1355                 reqs &= ~CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
1356
1357         log_dbg(cd, "Going to %s reencryption requirement flag.", enable ? "store" : "wipe");
1358
1359         return LUKS2_config_set_requirements(cd, hdr, reqs, commit);
1360 }
1361
1362 static int reencrypt_recover_segment(struct crypt_device *cd,
1363         struct luks2_hdr *hdr,
1364         struct luks2_reencrypt *rh,
1365         struct volume_key *vks)
1366 {
1367         struct volume_key *vk_old, *vk_new;
1368         size_t count, s;
1369         ssize_t read, w;
1370         unsigned resilience;
1371         uint64_t area_offset, area_length, area_length_read, crash_iv_offset,
1372                  data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
1373         int devfd, r, new_sector_size, old_sector_size, rseg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
1374         char *checksum_tmp = NULL, *data_buffer = NULL;
1375         struct crypt_storage_wrapper *cw1 = NULL, *cw2 = NULL;
1376
1377         resilience = rh->rp.type;
1378
1379         if (rseg < 0 || rh->length < 512)
1380                 return -EINVAL;
1381
1382         vk_new = crypt_volume_key_by_id(vks, rh->digest_new);
1383         if (!vk_new && rh->mode != CRYPT_REENCRYPT_DECRYPT)
1384                 return -EINVAL;
1385         vk_old = crypt_volume_key_by_id(vks, rh->digest_old);
1386         if (!vk_old && rh->mode != CRYPT_REENCRYPT_ENCRYPT)
1387                 return -EINVAL;
1388         old_sector_size = json_segment_get_sector_size(reencrypt_segment_old(hdr));
1389         new_sector_size = json_segment_get_sector_size(reencrypt_segment_new(hdr));
1390         if (rh->mode == CRYPT_REENCRYPT_DECRYPT)
1391                 crash_iv_offset = rh->offset >> SECTOR_SHIFT; /* TODO: + old iv_tweak */
1392         else
1393                 crash_iv_offset = json_segment_get_iv_offset(json_segments_get_segment(rh->jobj_segs_hot, rseg));
1394
1395         log_dbg(cd, "crash_offset: %" PRIu64 ", crash_length: %" PRIu64 ",  crash_iv_offset: %" PRIu64, data_offset + rh->offset, rh->length, crash_iv_offset);
1396
1397         r = crypt_storage_wrapper_init(cd, &cw2, crypt_data_device(cd),
1398                         data_offset + rh->offset, crash_iv_offset, new_sector_size,
1399                         reencrypt_segment_cipher_new(hdr), vk_new, 0);
1400         if (r) {
1401                 log_err(cd, _("Failed to initialize new segment storage wrapper."));
1402                 return r;
1403         }
1404
1405         if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &area_offset, &area_length)) {
1406                 r = -EINVAL;
1407                 goto out;
1408         }
1409
1410         if (posix_memalign((void**)&data_buffer, device_alignment(crypt_data_device(cd)), rh->length)) {
1411                 r = -ENOMEM;
1412                 goto out;
1413         }
1414
1415         switch (resilience) {
1416         case  REENC_PROTECTION_CHECKSUM:
1417                 log_dbg(cd, "Checksums based recovery.");
1418
1419                 r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1420                                 data_offset + rh->offset, crash_iv_offset, old_sector_size,
1421                                 reencrypt_segment_cipher_old(hdr), vk_old, 0);
1422                 if (r) {
1423                         log_err(cd, _("Failed to initialize old segment storage wrapper."));
1424                         goto out;
1425                 }
1426
1427                 count = rh->length / rh->alignment;
1428                 area_length_read = count * rh->rp.p.csum.hash_size;
1429                 if (area_length_read > area_length) {
1430                         log_dbg(cd, "Internal error in calculated area_length.");
1431                         r = -EINVAL;
1432                         goto out;
1433                 }
1434
1435                 checksum_tmp = malloc(rh->rp.p.csum.hash_size);
1436                 if (!checksum_tmp) {
1437                         r = -ENOMEM;
1438                         goto out;
1439                 }
1440
1441                 /* TODO: lock for read */
1442                 devfd = device_open(cd, crypt_metadata_device(cd), O_RDONLY);
1443                 if (devfd < 0)
1444                         goto out;
1445
1446                 /* read old data checksums */
1447                 read = read_lseek_blockwise(devfd, device_block_size(cd, crypt_metadata_device(cd)),
1448                                         device_alignment(crypt_metadata_device(cd)), rh->rp.p.csum.checksums, area_length_read, area_offset);
1449                 if (read < 0 || (size_t)read != area_length_read) {
1450                         log_err(cd, _("Failed to read checksums for current hotzone."));
1451                         r = -EINVAL;
1452                         goto out;
1453                 }
1454
1455                 read = crypt_storage_wrapper_read(cw2, 0, data_buffer, rh->length);
1456                 if (read < 0 || (size_t)read != rh->length) {
1457                         log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset + data_offset);
1458                         r = -EINVAL;
1459                         goto out;
1460                 }
1461
1462                 for (s = 0; s < count; s++) {
1463                         if (crypt_hash_write(rh->rp.p.csum.ch, data_buffer + (s * rh->alignment), rh->alignment)) {
1464                                 log_dbg(cd, "Failed to write hash.");
1465                                 r = EINVAL;
1466                                 goto out;
1467                         }
1468                         if (crypt_hash_final(rh->rp.p.csum.ch, checksum_tmp, rh->rp.p.csum.hash_size)) {
1469                                 log_dbg(cd, "Failed to finalize hash.");
1470                                 r = EINVAL;
1471                                 goto out;
1472                         }
1473                         if (!memcmp(checksum_tmp, (char *)rh->rp.p.csum.checksums + (s * rh->rp.p.csum.hash_size), rh->rp.p.csum.hash_size)) {
1474                                 log_dbg(cd, "Sector %zu (size %zu, offset %zu) needs recovery", s, rh->alignment, s * rh->alignment);
1475                                 if (crypt_storage_wrapper_decrypt(cw1, s * rh->alignment, data_buffer + (s * rh->alignment), rh->alignment)) {
1476                                         log_err(cd, _("Failed to decrypt sector %zu."), s);
1477                                         r = -EINVAL;
1478                                         goto out;
1479                                 }
1480                                 w = crypt_storage_wrapper_encrypt_write(cw2, s * rh->alignment, data_buffer + (s * rh->alignment), rh->alignment);
1481                                 if (w < 0 || (size_t)w != rh->alignment) {
1482                                         log_err(cd, _("Failed to recover sector %zu."), s);
1483                                         r = -EINVAL;
1484                                         goto out;
1485                                 }
1486                         }
1487                 }
1488
1489                 r = 0;
1490                 break;
1491         case  REENC_PROTECTION_JOURNAL:
1492                 log_dbg(cd, "Journal based recovery.");
1493
1494                 /* FIXME: validation candidate */
1495                 if (rh->length > area_length) {
1496                         r = -EINVAL;
1497                         log_dbg(cd, "Invalid journal size.");
1498                         goto out;
1499                 }
1500
1501                 /* TODO locking */
1502                 r = crypt_storage_wrapper_init(cd, &cw1, crypt_metadata_device(cd),
1503                                 area_offset, crash_iv_offset, old_sector_size,
1504                                 reencrypt_segment_cipher_old(hdr), vk_old, 0);
1505                 if (r) {
1506                         log_err(cd, _("Failed to initialize old segment storage wrapper."));
1507                         goto out;
1508                 }
1509                 read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
1510                 if (read < 0 || (size_t)read != rh->length) {
1511                         log_dbg(cd, "Failed to read journaled data.");
1512                         r = -EIO;
1513                         /* may content plaintext */
1514                         crypt_safe_memzero(data_buffer, rh->length);
1515                         goto out;
1516                 }
1517                 read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
1518                 /* may content plaintext */
1519                 crypt_safe_memzero(data_buffer, rh->length);
1520                 if (read < 0 || (size_t)read != rh->length) {
1521                         log_dbg(cd, "recovery write failed.");
1522                         r = -EINVAL;
1523                         goto out;
1524                 }
1525
1526                 r = 0;
1527                 break;
1528         case  REENC_PROTECTION_DATASHIFT:
1529                 log_dbg(cd, "Data shift based recovery.");
1530
1531                 if (rseg == 0) {
1532                         r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1533                                         json_segment_get_offset(rh->jobj_segment_moved, 0), 0, 0,
1534                                         reencrypt_segment_cipher_old(hdr), NULL, 0);
1535                 } else
1536                         r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
1537                                         data_offset + rh->offset - rh->data_shift, 0, 0,
1538                                         reencrypt_segment_cipher_old(hdr), NULL, 0);
1539                 if (r) {
1540                         log_err(cd, _("Failed to initialize old segment storage wrapper."));
1541                         goto out;
1542                 }
1543
1544                 read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
1545                 if (read < 0 || (size_t)read != rh->length) {
1546                         log_dbg(cd, "Failed to read data.");
1547                         r = -EIO;
1548                         /* may content plaintext */
1549                         crypt_safe_memzero(data_buffer, rh->length);
1550                         goto out;
1551                 }
1552
1553                 read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
1554                 /* may content plaintext */
1555                 crypt_safe_memzero(data_buffer, rh->length);
1556                 if (read < 0 || (size_t)read != rh->length) {
1557                         log_dbg(cd, "recovery write failed.");
1558                         r = -EINVAL;
1559                         goto out;
1560                 }
1561                 r = 0;
1562                 break;
1563         default:
1564                 r = -EINVAL;
1565         }
1566
1567         if (!r)
1568                 rh->read = rh->length;
1569 out:
1570         free(data_buffer);
1571         free(checksum_tmp);
1572         crypt_storage_wrapper_destroy(cw1);
1573         crypt_storage_wrapper_destroy(cw2);
1574
1575         return r;
1576 }
1577
1578 static int reencrypt_add_moved_segment(struct crypt_device *cd,
1579                 struct luks2_hdr *hdr,
1580                 struct luks2_reencrypt *rh)
1581 {
1582         int s = LUKS2_segment_first_unused_id(hdr);
1583
1584         if (!rh->jobj_segment_moved)
1585                 return 0;
1586
1587         if (s < 0)
1588                 return s;
1589
1590         if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(rh->jobj_segment_moved))) {
1591                 json_object_put(rh->jobj_segment_moved);
1592                 return -EINVAL;
1593         }
1594
1595         return 0;
1596 }
1597
1598 static int reencrypt_add_backup_segment(struct crypt_device *cd,
1599                 struct luks2_hdr *hdr,
1600                 struct luks2_reencrypt *rh,
1601                 unsigned final)
1602 {
1603         int digest, s = LUKS2_segment_first_unused_id(hdr);
1604         json_object *jobj;
1605
1606         if (s < 0)
1607                 return s;
1608
1609         digest = final ? rh->digest_new : rh->digest_old;
1610         jobj = final ? rh->jobj_segment_new : rh->jobj_segment_old;
1611
1612         if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(jobj))) {
1613                 json_object_put(jobj);
1614                 return -EINVAL;
1615         }
1616
1617         if (strcmp(json_segment_type(jobj), "crypt"))
1618                 return 0;
1619
1620         return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
1621 }
1622
1623 static int reencrypt_assign_segments_simple(struct crypt_device *cd,
1624         struct luks2_hdr *hdr,
1625         struct luks2_reencrypt *rh,
1626         unsigned hot,
1627         unsigned commit)
1628 {
1629         int r, sg;
1630
1631         if (hot && json_segments_count(rh->jobj_segs_hot) > 0) {
1632                 log_dbg(cd, "Setting 'hot' segments.");
1633
1634                 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
1635                 if (!r)
1636                         rh->jobj_segs_hot = NULL;
1637         } else if (!hot && json_segments_count(rh->jobj_segs_post) > 0) {
1638                 log_dbg(cd, "Setting 'post' segments.");
1639                 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
1640                 if (!r)
1641                         rh->jobj_segs_post = NULL;
1642         } else {
1643                 log_dbg(cd, "No segments to set.");
1644                 return -EINVAL;
1645         }
1646
1647         if (r) {
1648                 log_dbg(cd, "Failed to assign new enc segments.");
1649                 return r;
1650         }
1651
1652         r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
1653         if (r) {
1654                 log_dbg(cd, "Failed to assign reencryption previous backup segment.");
1655                 return r;
1656         }
1657
1658         r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
1659         if (r) {
1660                 log_dbg(cd, "Failed to assign reencryption final backup segment.");
1661                 return r;
1662         }
1663
1664         r = reencrypt_add_moved_segment(cd, hdr, rh);
1665         if (r) {
1666                 log_dbg(cd, "Failed to assign reencryption moved backup segment.");
1667                 return r;
1668         }
1669
1670         for (sg = 0; sg < LUKS2_segments_count(hdr); sg++) {
1671                 if (LUKS2_segment_is_type(hdr, sg, "crypt") &&
1672                     LUKS2_digest_segment_assign(cd, hdr, sg, rh->mode == CRYPT_REENCRYPT_ENCRYPT ? rh->digest_new : rh->digest_old, 1, 0)) {
1673                         log_dbg(cd, "Failed to assign digest %u to segment %u.", rh->digest_new, sg);
1674                         return -EINVAL;
1675                 }
1676         }
1677
1678         return commit ? LUKS2_hdr_write(cd, hdr) : 0;
1679 }
1680
1681 static int reencrypt_assign_segments(struct crypt_device *cd,
1682                 struct luks2_hdr *hdr,
1683                 struct luks2_reencrypt *rh,
1684                 unsigned hot,
1685                 unsigned commit)
1686 {
1687         bool forward;
1688         int rseg, scount, r = -EINVAL;
1689
1690         /* FIXME: validate in reencrypt context load */
1691         if (rh->digest_new < 0 && rh->mode != CRYPT_REENCRYPT_DECRYPT)
1692                 return -EINVAL;
1693
1694         if (LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0))
1695                 return -EINVAL;
1696
1697         if (rh->mode == CRYPT_REENCRYPT_ENCRYPT || rh->mode == CRYPT_REENCRYPT_DECRYPT)
1698                 return reencrypt_assign_segments_simple(cd, hdr, rh, hot, commit);
1699
1700         if (hot && rh->jobj_segs_hot) {
1701                 log_dbg(cd, "Setting 'hot' segments.");
1702
1703                 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
1704                 if (!r)
1705                         rh->jobj_segs_hot = NULL;
1706         } else if (!hot && rh->jobj_segs_post) {
1707                 log_dbg(cd, "Setting 'post' segments.");
1708                 r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
1709                 if (!r)
1710                         rh->jobj_segs_post = NULL;
1711         }
1712
1713         if (r)
1714                 return r;
1715
1716         scount = LUKS2_segments_count(hdr);
1717
1718         /* segment in reencryption has to hold reference on both digests */
1719         rseg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
1720         if (rseg < 0 && hot)
1721                 return -EINVAL;
1722
1723         if (rseg >= 0) {
1724                 LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_new, 1, 0);
1725                 LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_old, 1, 0);
1726         }
1727
1728         forward = (rh->direction == CRYPT_REENCRYPT_FORWARD);
1729         if (hot) {
1730                 if (rseg > 0)
1731                         LUKS2_digest_segment_assign(cd, hdr, 0, forward ? rh->digest_new : rh->digest_old, 1, 0);
1732                 if (scount > rseg + 1)
1733                         LUKS2_digest_segment_assign(cd, hdr, rseg + 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
1734         } else {
1735                 LUKS2_digest_segment_assign(cd, hdr, 0, forward || scount == 1 ? rh->digest_new : rh->digest_old, 1, 0);
1736                 if (scount > 1)
1737                         LUKS2_digest_segment_assign(cd, hdr, 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
1738         }
1739
1740         r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
1741         if (r) {
1742                 log_dbg(cd, "Failed to assign hot reencryption backup segment.");
1743                 return r;
1744         }
1745         r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
1746         if (r) {
1747                 log_dbg(cd, "Failed to assign post reencryption backup segment.");
1748                 return r;
1749         }
1750
1751         return commit ? LUKS2_hdr_write(cd, hdr) : 0;
1752 }
1753
1754 static int reencrypt_set_encrypt_segments(struct crypt_device *cd, struct luks2_hdr *hdr, uint64_t dev_size, uint64_t data_shift, bool move_first_segment, crypt_reencrypt_direction_info di)
1755 {
1756         int r;
1757         uint64_t first_segment_offset, first_segment_length,
1758                  second_segment_offset, second_segment_length,
1759                  data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
1760         json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
1761
1762         if (dev_size < data_shift)
1763                 return -EINVAL;
1764
1765         if (data_shift && (di == CRYPT_REENCRYPT_FORWARD))
1766                 return -ENOTSUP;
1767
1768         if (move_first_segment) {
1769                 /*
1770                  * future data_device layout:
1771                  * [future LUKS2 header (data shift size)][second data segment][gap (data shift size)][first data segment (data shift size)]
1772                  */
1773                 first_segment_offset = dev_size;
1774                 first_segment_length = data_shift;
1775                 second_segment_offset = data_shift;
1776                 second_segment_length = dev_size - 2 * data_shift;
1777         } else if (data_shift) {
1778                 first_segment_offset = data_offset;
1779                 first_segment_length = dev_size;
1780         } else {
1781                 /* future data_device layout with detached header: [first data segment] */
1782                 first_segment_offset = data_offset;
1783                 first_segment_length = 0; /* dynamic */
1784         }
1785
1786         jobj_segments = json_object_new_object();
1787         if (!jobj_segments)
1788                 return -ENOMEM;
1789
1790         r = -EINVAL;
1791         if (move_first_segment) {
1792                 jobj_segment_first =  json_segment_create_linear(first_segment_offset, &first_segment_length, 0);
1793                 if (second_segment_length &&
1794                     !(jobj_segment_second = json_segment_create_linear(second_segment_offset, &second_segment_length, 0))) {
1795                         log_dbg(cd, "Failed generate 2nd segment.");
1796                         goto err;
1797                 }
1798         } else
1799                 jobj_segment_first =  json_segment_create_linear(first_segment_offset, first_segment_length ? &first_segment_length : NULL, 0);
1800
1801         if (!jobj_segment_first) {
1802                 log_dbg(cd, "Failed generate 1st segment.");
1803                 goto err;
1804         }
1805
1806         json_object_object_add(jobj_segments, "0", jobj_segment_first);
1807         if (jobj_segment_second)
1808                 json_object_object_add(jobj_segments, "1", jobj_segment_second);
1809
1810         r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0);
1811
1812         if (!r)
1813                 r = LUKS2_segments_set(cd, hdr, jobj_segments, 0);
1814 err:
1815         return r;
1816 }
1817
1818 static int reencrypt_make_targets(struct crypt_device *cd,
1819                                 struct luks2_hdr *hdr,
1820                                 struct device *hz_device,
1821                                 struct volume_key *vks,
1822                                 struct dm_target *result,
1823                                 uint64_t size)
1824 {
1825         bool reenc_seg;
1826         struct volume_key *vk;
1827         uint64_t segment_size, segment_offset, segment_start = 0;
1828         int r;
1829         int s = 0;
1830         json_object *jobj, *jobj_segments = LUKS2_get_segments_jobj(hdr);
1831
1832         while (result) {
1833                 jobj = json_segments_get_segment(jobj_segments, s);
1834                 if (!jobj) {
1835                         log_dbg(cd, "Internal error. Segment %u is null.", s);
1836                         r = -EINVAL;
1837                         goto out;
1838                 }
1839
1840                 reenc_seg = (s == json_segments_segment_in_reencrypt(jobj_segments));
1841
1842                 segment_offset = json_segment_get_offset(jobj, 1);
1843                 segment_size = json_segment_get_size(jobj, 1);
1844                 /* 'dynamic' length allowed in last segment only */
1845                 if (!segment_size && !result->next)
1846                         segment_size = (size >> SECTOR_SHIFT) - segment_start;
1847                 if (!segment_size) {
1848                         log_dbg(cd, "Internal error. Wrong segment size %u", s);
1849                         r = -EINVAL;
1850                         goto out;
1851                 }
1852
1853                 if (!strcmp(json_segment_type(jobj), "crypt")) {
1854                         vk = crypt_volume_key_by_id(vks, reenc_seg ? LUKS2_reencrypt_digest_new(hdr) : LUKS2_digest_by_segment(hdr, s));
1855                         if (!vk) {
1856                                 log_err(cd, _("Missing key for dm-crypt segment %u"), s);
1857                                 r = -EINVAL;
1858                                 goto out;
1859                         }
1860
1861                         if (reenc_seg)
1862                                 segment_offset -= crypt_get_data_offset(cd);
1863
1864                         r = dm_crypt_target_set(result, segment_start, segment_size,
1865                                                 reenc_seg ? hz_device : crypt_data_device(cd),
1866                                                 vk,
1867                                                 json_segment_get_cipher(jobj),
1868                                                 json_segment_get_iv_offset(jobj),
1869                                                 segment_offset,
1870                                                 "none",
1871                                                 0,
1872                                                 json_segment_get_sector_size(jobj));
1873                         if (r) {
1874                                 log_err(cd, _("Failed to set dm-crypt segment."));
1875                                 goto out;
1876                         }
1877                 } else if (!strcmp(json_segment_type(jobj), "linear")) {
1878                         r = dm_linear_target_set(result, segment_start, segment_size, reenc_seg ? hz_device : crypt_data_device(cd), segment_offset);
1879                         if (r) {
1880                                 log_err(cd, _("Failed to set dm-linear segment."));
1881                                 goto out;
1882                         }
1883                 } else {
1884                         r = -EINVAL;
1885                         goto out;
1886                 }
1887
1888                 segment_start += segment_size;
1889                 s++;
1890                 result = result->next;
1891         }
1892
1893         return s;
1894 out:
1895         return r;
1896 }
1897
1898 /* GLOBAL FIXME: audit function names and parameters names */
1899
1900 /* FIXME:
1901  *      1) audit log routines
1902  *      2) can't we derive hotzone device name from crypt context? (unlocked name, device uuid, etc?)
1903  */
1904 static int reencrypt_load_overlay_device(struct crypt_device *cd, struct luks2_hdr *hdr,
1905         const char *overlay, const char *hotzone, struct volume_key *vks, uint64_t size,
1906         uint32_t flags)
1907 {
1908         char hz_path[PATH_MAX];
1909         int r;
1910
1911         struct device *hz_dev = NULL;
1912         struct crypt_dm_active_device dmd = {
1913                 .flags = flags,
1914         };
1915
1916         log_dbg(cd, "Loading new table for overlay device %s.", overlay);
1917
1918         r = snprintf(hz_path, PATH_MAX, "%s/%s", dm_get_dir(), hotzone);
1919         if (r < 0 || r >= PATH_MAX) {
1920                 r = -EINVAL;
1921                 goto out;
1922         }
1923
1924         r = device_alloc(cd, &hz_dev, hz_path);
1925         if (r)
1926                 goto out;
1927
1928         r = dm_targets_allocate(&dmd.segment, LUKS2_segments_count(hdr));
1929         if (r)
1930                 goto out;
1931
1932         r = reencrypt_make_targets(cd, hdr, hz_dev, vks, &dmd.segment, size);
1933         if (r < 0)
1934                 goto out;
1935
1936         r = dm_reload_device(cd, overlay, &dmd, 0, 0);
1937
1938         /* what else on error here ? */
1939 out:
1940         dm_targets_free(cd, &dmd);
1941         device_free(cd, hz_dev);
1942
1943         return r;
1944 }
1945
1946 static int reencrypt_replace_device(struct crypt_device *cd, const char *target, const char *source, uint32_t flags)
1947 {
1948         int r, exists = 1;
1949         struct crypt_dm_active_device dmd_source, dmd_target = {};
1950         uint32_t dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH;
1951
1952         log_dbg(cd, "Replacing table in device %s with table from device %s.", target, source);
1953
1954         /* check only whether target device exists */
1955         r = dm_status_device(cd, target);
1956         if (r < 0) {
1957                 if (r == -ENODEV)
1958                         exists = 0;
1959                 else
1960                         return r;
1961         }
1962
1963         r = dm_query_device(cd, source, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
1964                             DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY, &dmd_source);
1965
1966         if (r < 0)
1967                 return r;
1968
1969         if (exists && ((r = dm_query_device(cd, target, 0, &dmd_target)) < 0))
1970                 goto err;
1971
1972         dmd_source.flags |= flags;
1973         dmd_source.uuid = crypt_get_uuid(cd);
1974
1975         if (exists) {
1976                 if (dmd_target.size != dmd_source.size) {
1977                         log_err(cd, _("Source and target device sizes don't match. Source %" PRIu64 ", target: %" PRIu64 "."),
1978                                 dmd_source.size, dmd_target.size);
1979                         r = -EINVAL;
1980                         goto err;
1981                 }
1982                 r = dm_reload_device(cd, target, &dmd_source, 0, 0);
1983                 if (!r) {
1984                         log_dbg(cd, "Resuming device %s", target);
1985                         r = dm_resume_device(cd, target, dmflags | act2dmflags(dmd_source.flags));
1986                 }
1987         } else
1988                 r = dm_create_device(cd, target, CRYPT_SUBDEV, &dmd_source);
1989 err:
1990         dm_targets_free(cd, &dmd_source);
1991         dm_targets_free(cd, &dmd_target);
1992
1993         return r;
1994 }
1995
1996 static int reencrypt_swap_backing_device(struct crypt_device *cd, const char *name,
1997                               const char *new_backend_name)
1998 {
1999         int r;
2000         struct device *overlay_dev = NULL;
2001         char overlay_path[PATH_MAX] = { 0 };
2002         struct crypt_dm_active_device dmd = {};
2003
2004         log_dbg(cd, "Redirecting %s mapping to new backing device: %s.", name, new_backend_name);
2005
2006         r = snprintf(overlay_path, PATH_MAX, "%s/%s", dm_get_dir(), new_backend_name);
2007         if (r < 0 || r >= PATH_MAX) {
2008                 r = -EINVAL;
2009                 goto out;
2010         }
2011
2012         r = device_alloc(cd, &overlay_dev, overlay_path);
2013         if (r)
2014                 goto out;
2015
2016         r = device_block_adjust(cd, overlay_dev, DEV_OK,
2017                                 0, &dmd.size, &dmd.flags);
2018         if (r)
2019                 goto out;
2020
2021         r = dm_linear_target_set(&dmd.segment, 0, dmd.size, overlay_dev, 0);
2022         if (r)
2023                 goto out;
2024
2025         r = dm_reload_device(cd, name, &dmd, 0, 0);
2026         if (!r) {
2027                 log_dbg(cd, "Resuming device %s", name);
2028                 r = dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2029         }
2030
2031 out:
2032         dm_targets_free(cd, &dmd);
2033         device_free(cd, overlay_dev);
2034
2035         return r;
2036 }
2037
2038 static int reencrypt_activate_hotzone_device(struct crypt_device *cd, const char *name, uint64_t device_size, uint32_t flags)
2039 {
2040         int r;
2041         uint64_t new_offset = reencrypt_get_data_offset_new(crypt_get_hdr(cd, CRYPT_LUKS2)) >> SECTOR_SHIFT;
2042
2043         struct crypt_dm_active_device dmd = {
2044                 .flags = flags,
2045                 .uuid = crypt_get_uuid(cd),
2046                 .size = device_size >> SECTOR_SHIFT
2047         };
2048
2049         log_dbg(cd, "Activating hotzone device %s.", name);
2050
2051         r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
2052                                 new_offset, &dmd.size, &dmd.flags);
2053         if (r)
2054                 goto err;
2055
2056         r = dm_linear_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), new_offset);
2057         if (r)
2058                 goto err;
2059
2060         r = dm_create_device(cd, name, CRYPT_SUBDEV, &dmd);
2061 err:
2062         dm_targets_free(cd, &dmd);
2063
2064         return r;
2065 }
2066
2067 static int reencrypt_init_device_stack(struct crypt_device *cd,
2068                                      const struct luks2_reencrypt *rh)
2069 {
2070         int r;
2071
2072         /* Activate hotzone device 1:1 linear mapping to data_device */
2073         r = reencrypt_activate_hotzone_device(cd, rh->hotzone_name, rh->device_size, CRYPT_ACTIVATE_PRIVATE);
2074         if (r) {
2075                 log_err(cd, _("Failed to activate hotzone device %s."), rh->hotzone_name);
2076                 return r;
2077         }
2078
2079         /*
2080          * Activate overlay device with exactly same table as original 'name' mapping.
2081          * Note that within this step the 'name' device may already include a table
2082          * constructed from more than single dm-crypt segment. Therefore transfer
2083          * mapping as is.
2084          *
2085          * If we're about to resume reencryption orig mapping has to be already validated for
2086          * abrupt shutdown and rchunk_offset has to point on next chunk to reencrypt!
2087          *
2088          * TODO: in crypt_activate_by*
2089          */
2090         r = reencrypt_replace_device(cd, rh->overlay_name, rh->device_name, CRYPT_ACTIVATE_PRIVATE);
2091         if (r) {
2092                 log_err(cd, _("Failed to activate overlay device %s with actual origin table."), rh->overlay_name);
2093                 goto err;
2094         }
2095
2096         /* swap origin mapping to overlay device */
2097         r = reencrypt_swap_backing_device(cd, rh->device_name, rh->overlay_name);
2098         if (r) {
2099                 log_err(cd, _("Failed to load new mapping for device %s."), rh->device_name);
2100                 goto err;
2101         }
2102
2103         /*
2104          * Now the 'name' (unlocked luks) device is mapped via dm-linear to an overlay dev.
2105          * The overlay device has a original live table of 'name' device in-before the swap.
2106          */
2107
2108         return 0;
2109 err:
2110         /* TODO: force error helper devices on error path */
2111         dm_remove_device(cd, rh->overlay_name, 0);
2112         dm_remove_device(cd, rh->hotzone_name, 0);
2113
2114         return r;
2115 }
2116
2117 /* TODO:
2118  *      1) audit error path. any error in this routine is fatal and should be unlikely.
2119  *         usually it would hint some collision with another userspace process touching
2120  *         dm devices directly.
2121  */
2122 static int reenc_refresh_helper_devices(struct crypt_device *cd, const char *overlay, const char *hotzone)
2123 {
2124         int r;
2125
2126         /*
2127          * we have to explicitly suspend the overlay device before suspending
2128          * the hotzone one. Resuming overlay device (aka switching tables) only
2129          * after suspending the hotzone may lead to deadlock.
2130          *
2131          * In other words: always suspend the stack from top to bottom!
2132          */
2133         r = dm_suspend_device(cd, overlay, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2134         if (r) {
2135                 log_err(cd, _("Failed to suspend device %s."), overlay);
2136                 return r;
2137         }
2138
2139         /* suspend HZ device */
2140         r = dm_suspend_device(cd, hotzone, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
2141         if (r) {
2142                 log_err(cd, _("Failed to suspend device %s."), hotzone);
2143                 return r;
2144         }
2145
2146         /* resume overlay device: inactive table (with hotozne) -> live */
2147         r = dm_resume_device(cd, overlay, DM_RESUME_PRIVATE);
2148         if (r)
2149                 log_err(cd, _("Failed to resume device %s."), overlay);
2150
2151         return r;
2152 }
2153
2154 static int reencrypt_refresh_overlay_devices(struct crypt_device *cd,
2155                 struct luks2_hdr *hdr,
2156                 const char *overlay,
2157                 const char *hotzone,
2158                 struct volume_key *vks,
2159                 uint64_t device_size,
2160                 uint32_t flags)
2161 {
2162         int r = reencrypt_load_overlay_device(cd, hdr, overlay, hotzone, vks, device_size, flags);
2163         if (r) {
2164                 log_err(cd, _("Failed to reload device %s."), overlay);
2165                 return REENC_ERR;
2166         }
2167
2168         r = reenc_refresh_helper_devices(cd, overlay, hotzone);
2169         if (r) {
2170                 log_err(cd, _("Failed to refresh reencryption devices stack."));
2171                 return REENC_ROLLBACK;
2172         }
2173
2174         return REENC_OK;
2175 }
2176
2177 static int reencrypt_move_data(struct crypt_device *cd, int devfd, uint64_t data_shift)
2178 {
2179         void *buffer;
2180         int r;
2181         ssize_t ret;
2182         uint64_t buffer_len, offset;
2183         struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
2184
2185         log_dbg(cd, "Going to move data from head of data device.");
2186
2187         buffer_len = data_shift;
2188         if (!buffer_len)
2189                 return -EINVAL;
2190
2191         offset = json_segment_get_offset(LUKS2_get_segment_jobj(hdr, 0), 0);
2192
2193         /* this is nonsense anyway */
2194         if (buffer_len != json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0)) {
2195                 log_dbg(cd, "buffer_len %" PRIu64", segment size %" PRIu64, buffer_len, json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0));
2196                 return -EINVAL;
2197         }
2198
2199         if (posix_memalign(&buffer, device_alignment(crypt_data_device(cd)), buffer_len))
2200                 return -ENOMEM;
2201
2202         ret = read_lseek_blockwise(devfd,
2203                         device_block_size(cd, crypt_data_device(cd)),
2204                         device_alignment(crypt_data_device(cd)),
2205                         buffer, buffer_len, 0);
2206         if (ret < 0 || (uint64_t)ret != buffer_len) {
2207                 r = -EIO;
2208                 goto err;
2209         }
2210
2211         log_dbg(cd, "Going to write %" PRIu64 " bytes at offset %" PRIu64, buffer_len, offset);
2212         ret = write_lseek_blockwise(devfd,
2213                         device_block_size(cd, crypt_data_device(cd)),
2214                         device_alignment(crypt_data_device(cd)),
2215                         buffer, buffer_len, offset);
2216         if (ret < 0 || (uint64_t)ret != buffer_len) {
2217                 r = -EIO;
2218                 goto err;
2219         }
2220
2221         r = 0;
2222 err:
2223         memset(buffer, 0, buffer_len);
2224         free(buffer);
2225         return r;
2226 }
2227
2228 static int reencrypt_make_backup_segments(struct crypt_device *cd,
2229                 struct luks2_hdr *hdr,
2230                 int keyslot_new,
2231                 const char *cipher,
2232                 uint64_t data_offset,
2233                 const struct crypt_params_reencrypt *params)
2234 {
2235         int r, segment, moved_segment = -1, digest_old = -1, digest_new = -1;
2236         json_object *jobj_segment_new = NULL, *jobj_segment_old = NULL, *jobj_segment_bcp = NULL;
2237         uint32_t sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
2238         uint64_t segment_offset, tmp, data_shift = params->data_shift << SECTOR_SHIFT;
2239
2240         if (params->mode != CRYPT_REENCRYPT_DECRYPT) {
2241                 digest_new = LUKS2_digest_by_keyslot(hdr, keyslot_new);
2242                 if (digest_new < 0)
2243                         return -EINVAL;
2244         }
2245
2246         if (params->mode != CRYPT_REENCRYPT_ENCRYPT) {
2247                 digest_old = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT);
2248                 if (digest_old < 0)
2249                         return -EINVAL;
2250         }
2251
2252         segment = LUKS2_segment_first_unused_id(hdr);
2253         if (segment < 0)
2254                 return -EINVAL;
2255
2256         if (params->mode == CRYPT_REENCRYPT_ENCRYPT &&
2257             (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT)) {
2258                 json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_segment_bcp);
2259                 r = LUKS2_segment_set_flag(jobj_segment_bcp, "backup-moved-segment");
2260                 if (r)
2261                         goto err;
2262                 moved_segment = segment++;
2263                 json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), moved_segment, jobj_segment_bcp);
2264         }
2265
2266         /* FIXME: Add detection for case (digest old == digest new && old segment == new segment) */
2267         if (digest_old >= 0)
2268                 json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old);
2269         else if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
2270                 r = LUKS2_get_data_size(hdr, &tmp, NULL);
2271                 if (r)
2272                         goto err;
2273                 jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0);
2274         }
2275
2276         if (!jobj_segment_old) {
2277                 r = -EINVAL;
2278                 goto err;
2279         }
2280
2281         r = LUKS2_segment_set_flag(jobj_segment_old, "backup-previous");
2282         if (r)
2283                 goto err;
2284         json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_old);
2285         jobj_segment_old = NULL;
2286         if (digest_old >= 0)
2287                 LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0);
2288         segment++;
2289
2290         if (digest_new >= 0) {
2291                 segment_offset = data_offset;
2292                 if (params->mode != CRYPT_REENCRYPT_ENCRYPT &&
2293                     modify_offset(&segment_offset, data_shift, params->direction)) {
2294                         r = -EINVAL;
2295                         goto err;
2296                 }
2297                 jobj_segment_new = json_segment_create_crypt(segment_offset,
2298                                                         crypt_get_iv_offset(cd),
2299                                                         NULL, cipher, sector_size, 0);
2300         } else if (params->mode == CRYPT_REENCRYPT_DECRYPT) {
2301                 segment_offset = data_offset;
2302                 if (modify_offset(&segment_offset, data_shift, params->direction)) {
2303                         r = -EINVAL;
2304                         goto err;
2305                 }
2306                 jobj_segment_new = json_segment_create_linear(segment_offset, NULL, 0);
2307         }
2308
2309         if (!jobj_segment_new) {
2310                 r = -EINVAL;
2311                 goto err;
2312         }
2313
2314         r = LUKS2_segment_set_flag(jobj_segment_new, "backup-final");
2315         if (r)
2316                 goto err;
2317         json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_new);
2318         jobj_segment_new = NULL;
2319         if (digest_new >= 0)
2320                 LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0);
2321
2322         /* FIXME: also check occupied space by keyslot in shrunk area */
2323         if (params->direction == CRYPT_REENCRYPT_FORWARD && data_shift &&
2324             crypt_metadata_device(cd) == crypt_data_device(cd) &&
2325             LUKS2_set_keyslots_size(cd, hdr, json_segment_get_offset(reencrypt_segment_new(hdr), 0))) {
2326                 log_err(cd, _("Failed to set new keyslots area size."));
2327                 r = -EINVAL;
2328                 goto err;
2329         }
2330
2331         return 0;
2332 err:
2333         json_object_put(jobj_segment_new);
2334         json_object_put(jobj_segment_old);
2335         return r;
2336 }
2337
2338 static int reencrypt_verify_and_upload_keys(struct crypt_device *cd, struct luks2_hdr *hdr, int digest_old, int digest_new, struct volume_key *vks)
2339 {
2340         int r;
2341         struct volume_key *vk;
2342
2343         if (digest_new >= 0) {
2344                 vk = crypt_volume_key_by_id(vks, digest_new);
2345                 if (!vk)
2346                         return -ENOENT;
2347                 else {
2348                         if (LUKS2_digest_verify_by_digest(cd, hdr, digest_new, vk) != digest_new)
2349                                 return -EINVAL;
2350
2351                         if (crypt_use_keyring_for_vk(cd) && !crypt_is_cipher_null(reencrypt_segment_cipher_new(hdr)) &&
2352                             (r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk))))
2353                                 return r;
2354                 }
2355         }
2356
2357         if (digest_old >= 0 && digest_old != digest_new) {
2358                 vk = crypt_volume_key_by_id(vks, digest_old);
2359                 if (!vk) {
2360                         r = -ENOENT;
2361                         goto err;
2362                 } else {
2363                         if (LUKS2_digest_verify_by_digest(cd, hdr, digest_old, vk) != digest_old) {
2364                                 r = -EINVAL;
2365                                 goto err;
2366                         }
2367                         if (crypt_use_keyring_for_vk(cd) && !crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr)) &&
2368                             (r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk))))
2369                                 goto err;
2370                 }
2371         }
2372
2373         return 0;
2374 err:
2375         crypt_drop_keyring_key(cd, vks);
2376         return r;
2377 }
2378
2379 /* This function must be called with metadata lock held */
2380 static int reencrypt_init(struct crypt_device *cd,
2381                 const char *name,
2382                 struct luks2_hdr *hdr,
2383                 const char *passphrase,
2384                 size_t passphrase_size,
2385                 int keyslot_old,
2386                 int keyslot_new,
2387                 const char *cipher,
2388                 const char *cipher_mode,
2389                 const struct crypt_params_reencrypt *params,
2390                 struct volume_key **vks)
2391 {
2392         bool move_first_segment;
2393         char _cipher[128];
2394         uint32_t sector_size;
2395         int r, reencrypt_keyslot, devfd = -1;
2396         uint64_t data_offset, dev_size = 0;
2397         struct crypt_dm_active_device dmd_target, dmd_source = {
2398                 .uuid = crypt_get_uuid(cd),
2399                 .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
2400         };
2401
2402         if (!params || params->mode > CRYPT_REENCRYPT_DECRYPT)
2403                 return -EINVAL;
2404
2405         if (params->mode != CRYPT_REENCRYPT_DECRYPT &&
2406             (!params->luks2 || !(cipher && cipher_mode) || keyslot_new < 0))
2407                 return -EINVAL;
2408
2409         log_dbg(cd, "Initializing reencryption (mode: %s) in LUKS2 metadata.",
2410                     crypt_reencrypt_mode_to_str(params->mode));
2411
2412         move_first_segment = (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT);
2413
2414         /* implicit sector size 512 for decryption */
2415         sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
2416         if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE ||
2417             NOTPOW2(sector_size)) {
2418                 log_err(cd, _("Unsupported encryption sector size."));
2419                 return -EINVAL;
2420         }
2421
2422         if (!cipher_mode || *cipher_mode == '\0')
2423                 r = snprintf(_cipher, sizeof(_cipher), "%s", cipher);
2424         else
2425                 r = snprintf(_cipher, sizeof(_cipher), "%s-%s", cipher, cipher_mode);
2426         if (r < 0 || (size_t)r >= sizeof(_cipher))
2427                 return -EINVAL;
2428
2429         if (MISALIGNED(params->data_shift, sector_size >> SECTOR_SHIFT)) {
2430                 log_err(cd, _("Data shift is not aligned to requested encryption sector size (%" PRIu32 " bytes)."), sector_size);
2431                 return -EINVAL;
2432         }
2433
2434         data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
2435
2436         r = device_check_access(cd, crypt_data_device(cd), DEV_OK);
2437         if (r)
2438                 return r;
2439
2440         r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
2441         if (r)
2442                 return r;
2443
2444         r = device_size(crypt_data_device(cd), &dev_size);
2445         if (r)
2446                 return r;
2447
2448         dev_size -= data_offset;
2449
2450         if (MISALIGNED(dev_size, sector_size)) {
2451                 log_err(cd, _("Data device is not aligned to requested encryption sector size (%" PRIu32 " bytes)."), sector_size);
2452                 return -EINVAL;
2453         }
2454
2455         reencrypt_keyslot = LUKS2_keyslot_find_empty(hdr);
2456         if (reencrypt_keyslot < 0) {
2457                 log_err(cd, _("All key slots full."));
2458                 return -EINVAL;
2459         }
2460
2461         /*
2462          * We must perform data move with exclusive open data device
2463          * to exclude another cryptsetup process to colide with
2464          * encryption initialization (or mount)
2465          */
2466         if (move_first_segment) {
2467                 if (dev_size < 2 * (params->data_shift << SECTOR_SHIFT)) {
2468                         log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
2469                         return -EINVAL;
2470                 }
2471                 if (params->data_shift < LUKS2_get_data_offset(hdr)) {
2472                         log_err(cd, _("Data shift (%" PRIu64 " sectors) is less than future data offset (%" PRIu64 " sectors)."), params->data_shift, LUKS2_get_data_offset(hdr));
2473                         return -EINVAL;
2474                 }
2475                 devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
2476                 if (devfd < 0) {
2477                         if (devfd == -EBUSY)
2478                                 log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
2479                         return -EINVAL;
2480                 }
2481         }
2482
2483         if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
2484                 /* in-memory only */
2485                 r = reencrypt_set_encrypt_segments(cd, hdr, dev_size, params->data_shift << SECTOR_SHIFT, move_first_segment, params->direction);
2486                 if (r)
2487                         goto err;
2488         }
2489
2490         r = LUKS2_keyslot_reencrypt_allocate(cd, hdr, reencrypt_keyslot,
2491                                            params);
2492         if (r < 0)
2493                 goto err;
2494
2495         r = reencrypt_make_backup_segments(cd, hdr, keyslot_new, _cipher, data_offset, params);
2496         if (r) {
2497                 log_dbg(cd, "Failed to create reencryption backup device segments.");
2498                 goto err;
2499         }
2500
2501         r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
2502         if (r < 0)
2503                 goto err;
2504
2505         r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, *vks);
2506         if (r < 0)
2507                 goto err;
2508
2509         if (name && params->mode != CRYPT_REENCRYPT_ENCRYPT) {
2510                 r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
2511                 if (r)
2512                         goto err;
2513
2514                 r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
2515                                     DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
2516                                     DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
2517                 if (r < 0)
2518                         goto err;
2519
2520                 r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
2521                 if (!r) {
2522                         r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
2523                         if (r)
2524                                 log_err(cd, _("Mismatching parameters on device %s."), name);
2525                 }
2526
2527                 dm_targets_free(cd, &dmd_source);
2528                 dm_targets_free(cd, &dmd_target);
2529                 free(CONST_CAST(void*)dmd_target.uuid);
2530
2531                 if (r)
2532                         goto err;
2533         }
2534
2535         if (move_first_segment && reencrypt_move_data(cd, devfd, params->data_shift << SECTOR_SHIFT)) {
2536                 r = -EIO;
2537                 goto err;
2538         }
2539
2540         /* This must be first and only write in LUKS2 metadata during _reencrypt_init */
2541         r = reencrypt_update_flag(cd, 1, true);
2542         if (r) {
2543                 log_dbg(cd, "Failed to set online-reencryption requirement.");
2544                 r = -EINVAL;
2545         } else
2546                 r = reencrypt_keyslot;
2547 err:
2548         device_release_excl(cd, crypt_data_device(cd));
2549         if (r < 0)
2550                 crypt_load(cd, CRYPT_LUKS2, NULL);
2551
2552         return r;
2553 }
2554
2555 static int reencrypt_hotzone_protect_final(struct crypt_device *cd,
2556         struct luks2_hdr *hdr, struct luks2_reencrypt *rh,
2557         const void *buffer, size_t buffer_len)
2558 {
2559         const void *pbuffer;
2560         size_t data_offset, len;
2561         int r;
2562
2563         if (rh->rp.type == REENC_PROTECTION_NONE)
2564                 return 0;
2565
2566         if (rh->rp.type == REENC_PROTECTION_CHECKSUM) {
2567                 log_dbg(cd, "Checksums hotzone resilience.");
2568
2569                 for (data_offset = 0, len = 0; data_offset < buffer_len; data_offset += rh->alignment, len += rh->rp.p.csum.hash_size) {
2570                         if (crypt_hash_write(rh->rp.p.csum.ch, (const char *)buffer + data_offset, rh->alignment)) {
2571                                 log_dbg(cd, "Failed to hash sector at offset %zu.", data_offset);
2572                                 return -EINVAL;
2573                         }
2574                         if (crypt_hash_final(rh->rp.p.csum.ch, (char *)rh->rp.p.csum.checksums + len, rh->rp.p.csum.hash_size)) {
2575                                 log_dbg(cd, "Failed to finalize hash.");
2576                                 return -EINVAL;
2577                         }
2578                 }
2579                 pbuffer = rh->rp.p.csum.checksums;
2580         } else if (rh->rp.type == REENC_PROTECTION_JOURNAL) {
2581                 log_dbg(cd, "Journal hotzone resilience.");
2582                 len = buffer_len;
2583                 pbuffer = buffer;
2584         } else if (rh->rp.type == REENC_PROTECTION_DATASHIFT) {
2585                 log_dbg(cd, "Data shift hotzone resilience.");
2586                 return LUKS2_hdr_write(cd, hdr);
2587         } else
2588                 return -EINVAL;
2589
2590         log_dbg(cd, "Going to store %zu bytes in reencrypt keyslot.", len);
2591
2592         r = LUKS2_keyslot_reencrypt_store(cd, hdr, rh->reenc_keyslot, pbuffer, len);
2593
2594         return r > 0 ? 0 : r;
2595 }
2596
2597 static int reencrypt_context_update(struct crypt_device *cd,
2598         struct luks2_reencrypt *rh)
2599 {
2600         if (rh->read < 0)
2601                 return -EINVAL;
2602
2603         if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
2604                 if (rh->data_shift && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
2605                         if (rh->offset)
2606                                 rh->offset -= rh->data_shift;
2607                         if (rh->offset && (rh->offset < rh->data_shift)) {
2608                                 rh->length = rh->offset;
2609                                 rh->offset = rh->data_shift;
2610                         }
2611                         if (!rh->offset)
2612                                 rh->length = rh->data_shift;
2613                 } else {
2614                         if (rh->offset < rh->length)
2615                                 rh->length = rh->offset;
2616                         rh->offset -= rh->length;
2617                 }
2618         } else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
2619                 rh->offset += (uint64_t)rh->read;
2620                 /* it fails in-case of device_size < rh->offset later */
2621                 if (rh->device_size - rh->offset < rh->length)
2622                         rh->length = rh->device_size - rh->offset;
2623         } else
2624                 return -EINVAL;
2625
2626         if (rh->device_size < rh->offset) {
2627                 log_dbg(cd, "Calculated reencryption offset %" PRIu64 " is beyond device size %" PRIu64 ".", rh->offset, rh->device_size);
2628                 return -EINVAL;
2629         }
2630
2631         rh->progress += (uint64_t)rh->read;
2632
2633         return 0;
2634 }
2635
2636 static int reencrypt_load(struct crypt_device *cd, struct luks2_hdr *hdr,
2637                 uint64_t device_size,
2638                 const struct crypt_params_reencrypt *params,
2639                 struct volume_key *vks,
2640                 struct luks2_reencrypt **rh)
2641 {
2642         int r;
2643         struct luks2_reencrypt *tmp = NULL;
2644         crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
2645
2646         if (ri == CRYPT_REENCRYPT_NONE) {
2647                 log_err(cd, _("Device not marked for LUKS2 reencryption."));
2648                 return -EINVAL;
2649         } else if (ri == CRYPT_REENCRYPT_INVALID)
2650                 return -EINVAL;
2651
2652         r = LUKS2_reencrypt_digest_verify(cd, hdr, vks);
2653         if (r < 0)
2654                 return r;
2655
2656         if (ri == CRYPT_REENCRYPT_CLEAN)
2657                 r = reencrypt_load_clean(cd, hdr, device_size, &tmp, params);
2658         else if (ri == CRYPT_REENCRYPT_CRASH)
2659                 r = reencrypt_load_crashed(cd, hdr, device_size, &tmp);
2660         else
2661                 r = -EINVAL;
2662
2663         if (r < 0 || !tmp) {
2664                 log_err(cd, _("Failed to load LUKS2 reencryption context."));
2665                 return r;
2666         }
2667
2668         *rh = tmp;
2669
2670         return 0;
2671 }
2672 #endif
2673 static int reencrypt_lock_internal(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock)
2674 {
2675         int r;
2676         char *lock_resource;
2677
2678         if (!crypt_metadata_locking_enabled()) {
2679                 *reencrypt_lock = NULL;
2680                 return 0;
2681         }
2682
2683         r = asprintf(&lock_resource, "LUKS2-reencryption-%s", uuid);
2684         if (r < 0)
2685                 return -ENOMEM;
2686         if (r < 20) {
2687                 r = -EINVAL;
2688                 goto out;
2689         }
2690
2691         r = crypt_write_lock(cd, lock_resource, false, reencrypt_lock);
2692 out:
2693         free(lock_resource);
2694
2695         return r;
2696 }
2697
2698 /* internal only */
2699 int LUKS2_reencrypt_lock_by_dm_uuid(struct crypt_device *cd, const char *dm_uuid,
2700         struct crypt_lock_handle **reencrypt_lock)
2701 {
2702         int r;
2703         char hdr_uuid[37];
2704         const char *uuid = crypt_get_uuid(cd);
2705
2706         if (!dm_uuid)
2707                 return -EINVAL;
2708
2709         if (!uuid) {
2710                 r = snprintf(hdr_uuid, sizeof(hdr_uuid), "%.8s-%.4s-%.4s-%.4s-%.12s",
2711                          dm_uuid + 6, dm_uuid + 14, dm_uuid + 18, dm_uuid + 22, dm_uuid + 26);
2712                 if (r < 0 || (size_t)r != (sizeof(hdr_uuid) - 1))
2713                         return -EINVAL;
2714         } else if (crypt_uuid_cmp(dm_uuid, uuid))
2715                 return -EINVAL;
2716
2717         return reencrypt_lock_internal(cd, uuid, reencrypt_lock);
2718 }
2719
2720 /* internal only */
2721 int LUKS2_reencrypt_lock(struct crypt_device *cd, struct crypt_lock_handle **reencrypt_lock)
2722 {
2723         if (!cd || !crypt_get_type(cd) || strcmp(crypt_get_type(cd), CRYPT_LUKS2))
2724                 return -EINVAL;
2725
2726         return reencrypt_lock_internal(cd, crypt_get_uuid(cd), reencrypt_lock);
2727 }
2728
2729 /* internal only */
2730 void LUKS2_reencrypt_unlock(struct crypt_device *cd, struct crypt_lock_handle *reencrypt_lock)
2731 {
2732         crypt_unlock_internal(cd, reencrypt_lock);
2733 }
2734 #if USE_LUKS2_REENCRYPTION
2735 static int reencrypt_lock_and_verify(struct crypt_device *cd, struct luks2_hdr *hdr,
2736                 struct crypt_lock_handle **reencrypt_lock)
2737 {
2738         int r;
2739         crypt_reencrypt_info ri;
2740         struct crypt_lock_handle *h;
2741
2742         ri = LUKS2_reencrypt_status(hdr);
2743         if (ri == CRYPT_REENCRYPT_INVALID) {
2744                 log_err(cd, _("Failed to get reencryption state."));
2745                 return -EINVAL;
2746         }
2747         if (ri < CRYPT_REENCRYPT_CLEAN) {
2748                 log_err(cd, _("Device is not in reencryption."));
2749                 return -EINVAL;
2750         }
2751
2752         r = LUKS2_reencrypt_lock(cd, &h);
2753         if (r < 0) {
2754                 if (r == -EBUSY)
2755                         log_err(cd, _("Reencryption process is already running."));
2756                 else
2757                         log_err(cd, _("Failed to acquire reencryption lock."));
2758                 return r;
2759         }
2760
2761         /* With reencryption lock held, reload device context and verify metadata state */
2762         r = crypt_load(cd, CRYPT_LUKS2, NULL);
2763         if (r) {
2764                 LUKS2_reencrypt_unlock(cd, h);
2765                 return r;
2766         }
2767
2768         ri = LUKS2_reencrypt_status(hdr);
2769         if (ri == CRYPT_REENCRYPT_CLEAN) {
2770                 *reencrypt_lock = h;
2771                 return 0;
2772         }
2773
2774         LUKS2_reencrypt_unlock(cd, h);
2775         log_err(cd, _("Cannot proceed with reencryption. Run reencryption recovery first."));
2776         return -EINVAL;
2777 }
2778
2779 static int reencrypt_load_by_passphrase(struct crypt_device *cd,
2780                 const char *name,
2781                 const char *passphrase,
2782                 size_t passphrase_size,
2783                 int keyslot_old,
2784                 int keyslot_new,
2785                 struct volume_key **vks,
2786                 const struct crypt_params_reencrypt *params)
2787 {
2788         int r, old_ss, new_ss;
2789         struct luks2_hdr *hdr;
2790         struct crypt_lock_handle *reencrypt_lock;
2791         struct luks2_reencrypt *rh;
2792         const struct volume_key *vk;
2793         struct crypt_dm_active_device dmd_target, dmd_source = {
2794                 .uuid = crypt_get_uuid(cd),
2795                 .flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
2796         };
2797         uint64_t minimal_size, device_size, mapping_size = 0, required_size = 0;
2798         bool dynamic;
2799         struct crypt_params_reencrypt rparams = {};
2800         uint32_t flags = 0;
2801
2802         if (params) {
2803                 rparams = *params;
2804                 required_size = params->device_size;
2805         }
2806
2807         log_dbg(cd, "Loading LUKS2 reencryption context.");
2808
2809         rh = crypt_get_luks2_reencrypt(cd);
2810         if (rh) {
2811                 LUKS2_reencrypt_free(cd, rh);
2812                 crypt_set_luks2_reencrypt(cd, NULL);
2813                 rh = NULL;
2814         }
2815
2816         hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
2817
2818         r = reencrypt_lock_and_verify(cd, hdr, &reencrypt_lock);
2819         if (r)
2820                 return r;
2821
2822         /* From now on we hold reencryption lock */
2823
2824         if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic))
2825                 return -EINVAL;
2826
2827         /* some configurations provides fixed device size */
2828         r = LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, false, dynamic);
2829         if (r) {
2830                 r = -EINVAL;
2831                 goto err;
2832         }
2833
2834         minimal_size >>= SECTOR_SHIFT;
2835
2836         old_ss = reencrypt_get_sector_size_old(hdr);
2837         new_ss = reencrypt_get_sector_size_new(hdr);
2838
2839         r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
2840         if (r == -ENOENT) {
2841                 log_dbg(cd, "Keys are not ready. Unlocking all volume keys.");
2842                 r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
2843                 if (r < 0)
2844                         goto err;
2845                 r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
2846         }
2847
2848         if (r < 0)
2849                 goto err;
2850
2851         if (name) {
2852                 r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
2853                                     DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
2854                                     DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
2855                 if (r < 0)
2856                         goto err;
2857                 flags = dmd_target.flags;
2858
2859                 /*
2860                  * By default reencryption code aims to retain flags from existing dm device.
2861                  * The keyring activation flag can not be inherited if original cipher is null.
2862                  *
2863                  * In this case override the flag based on decision made in reencrypt_verify_and_upload_keys
2864                  * above. The code checks if new VK is eligible for keyring.
2865                  */
2866                 vk = crypt_volume_key_by_id(*vks, LUKS2_reencrypt_digest_new(hdr));
2867                 if (vk && vk->key_description && crypt_is_cipher_null(reencrypt_segment_cipher_old(hdr))) {
2868                         flags |= CRYPT_ACTIVATE_KEYRING_KEY;
2869                         dmd_source.flags |= CRYPT_ACTIVATE_KEYRING_KEY;
2870                 }
2871
2872                 r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
2873                 if (!r) {
2874                         r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
2875                         if (r)
2876                                 log_err(cd, _("Mismatching parameters on device %s."), name);
2877                 }
2878
2879                 dm_targets_free(cd, &dmd_source);
2880                 dm_targets_free(cd, &dmd_target);
2881                 free(CONST_CAST(void*)dmd_target.uuid);
2882                 if (r)
2883                         goto err;
2884                 mapping_size = dmd_target.size;
2885         }
2886
2887         r = -EINVAL;
2888         if (required_size && mapping_size && (required_size != mapping_size)) {
2889                 log_err(cd, _("Active device size and requested reencryption size don't match."));
2890                 goto err;
2891         }
2892
2893         if (mapping_size)
2894                 required_size = mapping_size;
2895
2896         if (required_size) {
2897                 /* TODO: Add support for changing fixed minimal size in reencryption mda where possible */
2898                 if ((minimal_size && (required_size < minimal_size)) ||
2899                     (required_size > (device_size >> SECTOR_SHIFT)) ||
2900                     (!dynamic && (required_size != minimal_size)) ||
2901                     (old_ss > 0 && MISALIGNED(required_size, old_ss >> SECTOR_SHIFT)) ||
2902                     (new_ss > 0 && MISALIGNED(required_size, new_ss >> SECTOR_SHIFT))) {
2903                         log_err(cd, _("Illegal device size requested in reencryption parameters."));
2904                         goto err;
2905                 }
2906                 rparams.device_size = required_size;
2907         }
2908
2909         r = reencrypt_load(cd, hdr, device_size, &rparams, *vks, &rh);
2910         if (r < 0 || !rh)
2911                 goto err;
2912
2913         if (name && (r = reencrypt_context_set_names(rh, name)))
2914                 goto err;
2915
2916         /* Reassure device is not mounted and there's no dm mapping active */
2917         if (!name && (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0)) {
2918                 log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
2919                 r = -EBUSY;
2920                 goto err;
2921         }
2922         device_release_excl(cd, crypt_data_device(cd));
2923
2924         /* FIXME: There's a race for dm device activation not managed by cryptsetup.
2925          *
2926          * 1) excl close
2927          * 2) rogue dm device activation
2928          * 3) one or more dm-crypt based wrapper activation
2929          * 4) next excl open get's skipped due to 3) device from 2) remains undetected.
2930          */
2931         r = reencrypt_init_storage_wrappers(cd, hdr, rh, *vks);
2932         if (r)
2933                 goto err;
2934
2935         /* If one of wrappers is based on dmcrypt fallback it already blocked mount */
2936         if (!name && crypt_storage_wrapper_get_type(rh->cw1) != DMCRYPT &&
2937             crypt_storage_wrapper_get_type(rh->cw2) != DMCRYPT) {
2938                 if (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0) {
2939                         log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
2940                         r = -EBUSY;
2941                         goto err;
2942                 }
2943         }
2944
2945         rh->flags = flags;
2946
2947         MOVE_REF(rh->vks, *vks);
2948         MOVE_REF(rh->reenc_lock, reencrypt_lock);
2949
2950         crypt_set_luks2_reencrypt(cd, rh);
2951
2952         return 0;
2953 err:
2954         LUKS2_reencrypt_unlock(cd, reencrypt_lock);
2955         LUKS2_reencrypt_free(cd, rh);
2956         return r;
2957 }
2958
2959 static int reencrypt_recovery_by_passphrase(struct crypt_device *cd,
2960         struct luks2_hdr *hdr,
2961         int keyslot_old,
2962         int keyslot_new,
2963         const char *passphrase,
2964         size_t passphrase_size)
2965 {
2966         int r;
2967         crypt_reencrypt_info ri;
2968         struct crypt_lock_handle *reencrypt_lock;
2969
2970         r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
2971         if (r) {
2972                 if (r == -EBUSY)
2973                         log_err(cd, _("Reencryption in-progress. Cannot perform recovery."));
2974                 else
2975                         log_err(cd, _("Failed to get reencryption lock."));
2976                 return r;
2977         }
2978
2979         if ((r = crypt_load(cd, CRYPT_LUKS2, NULL))) {
2980                 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
2981                 return r;
2982         }
2983
2984         ri = LUKS2_reencrypt_status(hdr);
2985         if (ri == CRYPT_REENCRYPT_INVALID) {
2986                 LUKS2_reencrypt_unlock(cd, reencrypt_lock);
2987                 return -EINVAL;
2988         }
2989
2990         if (ri == CRYPT_REENCRYPT_CRASH) {
2991                 r = LUKS2_reencrypt_locked_recovery_by_passphrase(cd, keyslot_old, keyslot_new,
2992                                 passphrase, passphrase_size, 0, NULL);
2993                 if (r < 0)
2994                         log_err(cd, _("LUKS2 reencryption recovery failed."));
2995         } else {
2996                 log_dbg(cd, "No LUKS2 reencryption recovery needed.");
2997                 r = 0;
2998         }
2999
3000         LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3001         return r;
3002 }
3003
3004 static int reencrypt_repair_by_passphrase(
3005                 struct crypt_device *cd,
3006                 struct luks2_hdr *hdr,
3007                 int keyslot_old,
3008                 int keyslot_new,
3009                 const char *passphrase,
3010                 size_t passphrase_size)
3011 {
3012         int r;
3013         struct crypt_lock_handle *reencrypt_lock;
3014         struct luks2_reencrypt *rh;
3015         crypt_reencrypt_info ri;
3016         struct volume_key *vks = NULL;
3017
3018         log_dbg(cd, "Loading LUKS2 reencryption context for metadata repair.");
3019
3020         rh = crypt_get_luks2_reencrypt(cd);
3021         if (rh) {
3022                 LUKS2_reencrypt_free(cd, rh);
3023                 crypt_set_luks2_reencrypt(cd, NULL);
3024                 rh = NULL;
3025         }
3026
3027         ri = LUKS2_reencrypt_status(hdr);
3028         if (ri == CRYPT_REENCRYPT_INVALID)
3029                 return -EINVAL;
3030
3031         if (ri < CRYPT_REENCRYPT_CLEAN) {
3032                 log_err(cd, _("Device is not in reencryption."));
3033                 return -EINVAL;
3034         }
3035
3036         r = LUKS2_reencrypt_lock(cd, &reencrypt_lock);
3037         if (r < 0) {
3038                 if (r == -EBUSY)
3039                         log_err(cd, _("Reencryption process is already running."));
3040                 else
3041                         log_err(cd, _("Failed to acquire reencryption lock."));
3042                 return r;
3043         }
3044
3045         /* With reencryption lock held, reload device context and verify metadata state */
3046         r = crypt_load(cd, CRYPT_LUKS2, NULL);
3047         if (r)
3048                 goto out;
3049
3050         ri = LUKS2_reencrypt_status(hdr);
3051         if (ri == CRYPT_REENCRYPT_INVALID) {
3052                 r = -EINVAL;
3053                 goto out;
3054         }
3055         if (ri == CRYPT_REENCRYPT_NONE) {
3056                 r = 0;
3057                 goto out;
3058         }
3059
3060         r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, &vks);
3061         if (r < 0)
3062                 goto out;
3063
3064         r = LUKS2_keyslot_reencrypt_digest_create(cd, hdr, vks);
3065         crypt_free_volume_key(vks);
3066         vks = NULL;
3067         if (r < 0)
3068                 goto out;
3069
3070         /* removes online-reencrypt flag v1 */
3071         if ((r = reencrypt_update_flag(cd, 0, false)))
3072                 goto out;
3073
3074         /* adds online-reencrypt flag v2 and commits metadata */
3075         r = reencrypt_update_flag(cd, 1, true);
3076 out:
3077         LUKS2_reencrypt_unlock(cd, reencrypt_lock);
3078         crypt_free_volume_key(vks);
3079         return r;
3080
3081 }
3082 #endif
3083 static int reencrypt_init_by_passphrase(struct crypt_device *cd,
3084         const char *name,
3085         const char *passphrase,
3086         size_t passphrase_size,
3087         int keyslot_old,
3088         int keyslot_new,
3089         const char *cipher,
3090         const char *cipher_mode,
3091         const struct crypt_params_reencrypt *params)
3092 {
3093 #if USE_LUKS2_REENCRYPTION
3094         int r;
3095         crypt_reencrypt_info ri;
3096         struct volume_key *vks = NULL;
3097         uint32_t flags = params ? params->flags : 0;
3098         struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3099
3100         /* short-circuit in reencryption metadata update and finish immediately. */
3101         if (flags & CRYPT_REENCRYPT_REPAIR_NEEDED)
3102                 return reencrypt_repair_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
3103
3104         /* short-circuit in recovery and finish immediately. */
3105         if (flags & CRYPT_REENCRYPT_RECOVERY)
3106                 return reencrypt_recovery_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
3107
3108         if (cipher && !crypt_cipher_wrapped_key(cipher, cipher_mode)) {
3109                 r = crypt_keyslot_get_key_size(cd, keyslot_new);
3110                 if (r < 0)
3111                         return r;
3112                 r = LUKS2_check_cipher(cd, r, cipher, cipher_mode);
3113                 if (r < 0)
3114                         return r;
3115         }
3116
3117         r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
3118         if (r)
3119                 return r;
3120
3121         ri = LUKS2_reencrypt_status(hdr);
3122         if (ri == CRYPT_REENCRYPT_INVALID) {
3123                 device_write_unlock(cd, crypt_metadata_device(cd));
3124                 return -EINVAL;
3125         }
3126
3127         if ((ri > CRYPT_REENCRYPT_NONE) && (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY)) {
3128                 device_write_unlock(cd, crypt_metadata_device(cd));
3129                 log_err(cd, _("LUKS2 reencryption already initialized in metadata."));
3130                 return -EBUSY;
3131         }
3132
3133         if (ri == CRYPT_REENCRYPT_NONE && !(flags & CRYPT_REENCRYPT_RESUME_ONLY)) {
3134                 r = reencrypt_init(cd, name, hdr, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params, &vks);
3135                 if (r < 0)
3136                         log_err(cd, _("Failed to initialize LUKS2 reencryption in metadata."));
3137         } else if (ri > CRYPT_REENCRYPT_NONE) {
3138                 log_dbg(cd, "LUKS2 reencryption already initialized.");
3139                 r = 0;
3140         }
3141
3142         device_write_unlock(cd, crypt_metadata_device(cd));
3143
3144         if (r < 0 || (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY))
3145                 goto out;
3146
3147         r = reencrypt_load_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, &vks, params);
3148 out:
3149         if (r < 0)
3150                 crypt_drop_keyring_key(cd, vks);
3151         crypt_free_volume_key(vks);
3152         return r < 0 ? r : LUKS2_find_keyslot(hdr, "reencrypt");
3153 #else
3154         log_err(cd, _("This operation is not supported for this device type."));
3155         return -ENOTSUP;
3156 #endif
3157 }
3158
3159 int crypt_reencrypt_init_by_keyring(struct crypt_device *cd,
3160         const char *name,
3161         const char *passphrase_description,
3162         int keyslot_old,
3163         int keyslot_new,
3164         const char *cipher,
3165         const char *cipher_mode,
3166         const struct crypt_params_reencrypt *params)
3167 {
3168         int r;
3169         char *passphrase;
3170         size_t passphrase_size;
3171
3172         if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase_description)
3173                 return -EINVAL;
3174         if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
3175                 return -EINVAL;
3176
3177         r = keyring_get_passphrase(passphrase_description, &passphrase, &passphrase_size);
3178         if (r < 0) {
3179                 log_err(cd, _("Failed to read passphrase from keyring (error %d)."), r);
3180                 return -EINVAL;
3181         }
3182
3183         r = reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
3184
3185         crypt_safe_memzero(passphrase, passphrase_size);
3186         free(passphrase);
3187
3188         return r;
3189 }
3190
3191 int crypt_reencrypt_init_by_passphrase(struct crypt_device *cd,
3192         const char *name,
3193         const char *passphrase,
3194         size_t passphrase_size,
3195         int keyslot_old,
3196         int keyslot_new,
3197         const char *cipher,
3198         const char *cipher_mode,
3199         const struct crypt_params_reencrypt *params)
3200 {
3201         if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase)
3202                 return -EINVAL;
3203         if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
3204                 return -EINVAL;
3205
3206         return reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
3207 }
3208
3209 #if USE_LUKS2_REENCRYPTION
3210 static reenc_status_t reencrypt_step(struct crypt_device *cd,
3211                 struct luks2_hdr *hdr,
3212                 struct luks2_reencrypt *rh,
3213                 uint64_t device_size,
3214                 bool online)
3215 {
3216         int r;
3217
3218         /* in memory only */
3219         r = reencrypt_make_segments(cd, hdr, rh, device_size);
3220         if (r)
3221                 return REENC_ERR;
3222
3223         r = reencrypt_assign_segments(cd, hdr, rh, 1, 0);
3224         if (r) {
3225                 log_err(cd, _("Failed to set device segments for next reencryption hotzone."));
3226                 return REENC_ERR;
3227         }
3228
3229         if (online) {
3230                 r = reencrypt_refresh_overlay_devices(cd, hdr, rh->overlay_name, rh->hotzone_name, rh->vks, rh->device_size, rh->flags);
3231                 /* Teardown overlay devices with dm-error. None bio shall pass! */
3232                 if (r != REENC_OK)
3233                         return r;
3234         }
3235
3236         log_dbg(cd, "Reencrypting chunk starting at offset: %" PRIu64 ", size :%" PRIu64 ".", rh->offset, rh->length);
3237         log_dbg(cd, "data_offset: %" PRIu64, crypt_get_data_offset(cd) << SECTOR_SHIFT);
3238
3239         if (!rh->offset && rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->data_shift &&
3240             rh->jobj_segment_moved) {
3241                 crypt_storage_wrapper_destroy(rh->cw1);
3242                 log_dbg(cd, "Reinitializing old segment storage wrapper for moved segment.");
3243                 r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
3244                                 LUKS2_reencrypt_get_data_offset_moved(hdr),
3245                                 crypt_get_iv_offset(cd),
3246                                 reencrypt_get_sector_size_old(hdr),
3247                                 reencrypt_segment_cipher_old(hdr),
3248                                 crypt_volume_key_by_id(rh->vks, rh->digest_old),
3249                                 rh->wflags1);
3250                 if (r) {
3251                         log_err(cd, _("Failed to initialize old segment storage wrapper."));
3252                         return REENC_ROLLBACK;
3253                 }
3254         }
3255
3256         rh->read = crypt_storage_wrapper_read(rh->cw1, rh->offset, rh->reenc_buffer, rh->length);
3257         if (rh->read < 0) {
3258                 /* severity normal */
3259                 log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset);
3260                 return REENC_ROLLBACK;
3261         }
3262
3263         /* metadata commit point */
3264         r = reencrypt_hotzone_protect_final(cd, hdr, rh, rh->reenc_buffer, rh->read);
3265         if (r < 0) {
3266                 /* severity normal */
3267                 log_err(cd, _("Failed to write reencryption resilience metadata."));
3268                 return REENC_ROLLBACK;
3269         }
3270
3271         r = crypt_storage_wrapper_decrypt(rh->cw1, rh->offset, rh->reenc_buffer, rh->read);
3272         if (r) {
3273                 /* severity normal */
3274                 log_err(cd, _("Decryption failed."));
3275                 return REENC_ROLLBACK;
3276         }
3277         if (rh->read != crypt_storage_wrapper_encrypt_write(rh->cw2, rh->offset, rh->reenc_buffer, rh->read)) {
3278                 /* severity fatal */
3279                 log_err(cd, _("Failed to write hotzone area starting at %" PRIu64 "."), rh->offset);
3280                 return REENC_FATAL;
3281         }
3282
3283         if (rh->rp.type != REENC_PROTECTION_NONE && crypt_storage_wrapper_datasync(rh->cw2)) {
3284                 log_err(cd, _("Failed to sync data."));
3285                 return REENC_FATAL;
3286         }
3287
3288         /* metadata commit safe point */
3289         r = reencrypt_assign_segments(cd, hdr, rh, 0, rh->rp.type != REENC_PROTECTION_NONE);
3290         if (r) {
3291                 /* severity fatal */
3292                 log_err(cd, _("Failed to update metadata after current reencryption hotzone completed."));
3293                 return REENC_FATAL;
3294         }
3295
3296         if (online) {
3297                 /* severity normal */
3298                 log_dbg(cd, "Resuming device %s", rh->hotzone_name);
3299                 r = dm_resume_device(cd, rh->hotzone_name, DM_RESUME_PRIVATE);
3300                 if (r) {
3301                         log_err(cd, _("Failed to resume device %s."), rh->hotzone_name);
3302                         return REENC_ERR;
3303                 }
3304         }
3305
3306         return REENC_OK;
3307 }
3308
3309 static int reencrypt_erase_backup_segments(struct crypt_device *cd,
3310                 struct luks2_hdr *hdr)
3311 {
3312         int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
3313         if (segment >= 0) {
3314                 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3315                         return -EINVAL;
3316                 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3317         }
3318         segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
3319         if (segment >= 0) {
3320                 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3321                         return -EINVAL;
3322                 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3323         }
3324         segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
3325         if (segment >= 0) {
3326                 if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
3327                         return -EINVAL;
3328                 json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
3329         }
3330
3331         return 0;
3332 }
3333
3334 static int reencrypt_wipe_moved_segment(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
3335 {
3336         int r = 0;
3337         uint64_t offset, length;
3338
3339         if (rh->jobj_segment_moved) {
3340                 offset = json_segment_get_offset(rh->jobj_segment_moved, 0);
3341                 length = json_segment_get_size(rh->jobj_segment_moved, 0);
3342                 log_dbg(cd, "Wiping %" PRIu64 " bytes of backup segment data at offset %" PRIu64,
3343                         length, offset);
3344                 r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
3345                                 offset, length, 1024 * 1024, NULL, NULL);
3346         }
3347
3348         return r;
3349 }
3350
3351 static int reencrypt_teardown_ok(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
3352 {
3353         int i, r;
3354         uint32_t dmt_flags;
3355         bool finished = !(rh->device_size > rh->progress);
3356
3357         if (rh->rp.type == REENC_PROTECTION_NONE &&
3358             LUKS2_hdr_write(cd, hdr)) {
3359                 log_err(cd, _("Failed to write LUKS2 metadata."));
3360                 return -EINVAL;
3361         }
3362
3363         if (rh->online) {
3364                 r = LUKS2_reload(cd, rh->device_name, rh->vks, rh->device_size, rh->flags);
3365                 if (r)
3366                         log_err(cd, _("Failed to reload device %s."), rh->device_name);
3367                 if (!r) {
3368                         r = dm_resume_device(cd, rh->device_name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
3369                         if (r)
3370                                 log_err(cd, _("Failed to resume device %s."), rh->device_name);
3371                 }
3372                 dm_remove_device(cd, rh->overlay_name, 0);
3373                 dm_remove_device(cd, rh->hotzone_name, 0);
3374
3375                 if (!r && finished && rh->mode == CRYPT_REENCRYPT_DECRYPT &&
3376                     !dm_flags(cd, DM_LINEAR, &dmt_flags) && (dmt_flags & DM_DEFERRED_SUPPORTED))
3377                     dm_remove_device(cd, rh->device_name, CRYPT_DEACTIVATE_DEFERRED);
3378         }
3379
3380         if (finished) {
3381                 if (reencrypt_wipe_moved_segment(cd, hdr, rh))
3382                         log_err(cd, _("Failed to wipe backup segment data."));
3383                 if (reencrypt_get_data_offset_new(hdr) && LUKS2_set_keyslots_size(cd, hdr, reencrypt_get_data_offset_new(hdr)))
3384                         log_dbg(cd, "Failed to set new keyslots area size.");
3385                 if (rh->digest_old >= 0 && rh->digest_new != rh->digest_old)
3386                         for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++)
3387                                 if (LUKS2_digest_by_keyslot(hdr, i) == rh->digest_old && crypt_keyslot_destroy(cd, i))
3388                                         log_err(cd, _("Failed to remove unused (unbound) keyslot %d."), i);
3389
3390                 if (reencrypt_erase_backup_segments(cd, hdr))
3391                         log_dbg(cd, "Failed to erase backup segments");
3392
3393                 if (reencrypt_update_flag(cd, 0, false))
3394                         log_dbg(cd, "Failed to disable reencryption requirement flag.");
3395
3396                 /* metadata commit point also removing reencryption flag on-disk */
3397                 if (crypt_keyslot_destroy(cd, rh->reenc_keyslot)) {
3398                         log_err(cd, _("Failed to remove reencryption keyslot."));
3399                         return -EINVAL;
3400                 }
3401         }
3402
3403         return 0;
3404 }
3405
3406 static void reencrypt_teardown_fatal(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reencrypt *rh)
3407 {
3408         log_err(cd, _("Fatal error while reencrypting chunk starting at %" PRIu64 ", %" PRIu64 " sectors long."),
3409                 (rh->offset >> SECTOR_SHIFT) + crypt_get_data_offset(cd), rh->length >> SECTOR_SHIFT);
3410
3411         if (rh->online) {
3412                 log_err(cd, "Reencryption was run in online mode.");
3413                 if (dm_status_suspended(cd, rh->hotzone_name) > 0) {
3414                         log_dbg(cd, "Hotzone device %s suspended, replacing with dm-error.", rh->hotzone_name);
3415                         if (dm_error_device(cd, rh->hotzone_name)) {
3416                                 log_err(cd, _("Failed to replace suspended device %s with dm-error target."), rh->hotzone_name);
3417                                 log_err(cd, _("Do not resume the device unless replaced with error target manually."));
3418                         }
3419                 }
3420         }
3421 }
3422
3423 static int reencrypt_teardown(struct crypt_device *cd, struct luks2_hdr *hdr,
3424                 struct luks2_reencrypt *rh, reenc_status_t rs, bool interrupted,
3425                 int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
3426 {
3427         int r;
3428
3429         switch (rs) {
3430         case REENC_OK:
3431                 if (progress && !interrupted)
3432                         progress(rh->device_size, rh->progress, NULL);
3433                 r = reencrypt_teardown_ok(cd, hdr, rh);
3434                 break;
3435         case REENC_FATAL:
3436                 reencrypt_teardown_fatal(cd, hdr, rh);
3437                 /* fall-through */
3438         default:
3439                 r = -EIO;
3440         }
3441
3442         /* this frees reencryption lock */
3443         LUKS2_reencrypt_free(cd, rh);
3444         crypt_set_luks2_reencrypt(cd, NULL);
3445
3446         return r;
3447 }
3448 #endif
3449 int crypt_reencrypt(struct crypt_device *cd,
3450                     int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
3451 {
3452 #if USE_LUKS2_REENCRYPTION
3453         int r;
3454         crypt_reencrypt_info ri;
3455         struct luks2_hdr *hdr;
3456         struct luks2_reencrypt *rh;
3457         reenc_status_t rs;
3458         bool quit = false;
3459
3460         if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
3461                 return -EINVAL;
3462
3463         hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3464
3465         ri = LUKS2_reencrypt_status(hdr);
3466         if (ri > CRYPT_REENCRYPT_CLEAN) {
3467                 log_err(cd, _("Cannot proceed with reencryption. Unexpected reencryption status."));
3468                 return -EINVAL;
3469         }
3470
3471         rh = crypt_get_luks2_reencrypt(cd);
3472         if (!rh || (!rh->reenc_lock && crypt_metadata_locking_enabled())) {
3473                 log_err(cd, _("Missing or invalid reencrypt context."));
3474                 return -EINVAL;
3475         }
3476
3477         log_dbg(cd, "Resuming LUKS2 reencryption.");
3478
3479         if (rh->online && reencrypt_init_device_stack(cd, rh)) {
3480                 log_err(cd, _("Failed to initialize reencryption device stack."));
3481                 return -EINVAL;
3482         }
3483
3484         log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
3485
3486         rs = REENC_OK;
3487
3488         /* update reencrypt keyslot protection parameters in memory only */
3489         if (!quit && (rh->device_size > rh->progress)) {
3490                 r = reencrypt_keyslot_update(cd, rh);
3491                 if (r < 0) {
3492                         log_dbg(cd, "Keyslot update failed.");
3493                         return reencrypt_teardown(cd, hdr, rh, REENC_ERR, quit, progress);
3494                 }
3495         }
3496
3497         while (!quit && (rh->device_size > rh->progress)) {
3498                 rs = reencrypt_step(cd, hdr, rh, rh->device_size, rh->online);
3499                 if (rs != REENC_OK)
3500                         break;
3501
3502                 log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
3503                 if (progress && progress(rh->device_size, rh->progress, NULL))
3504                         quit = true;
3505
3506                 r = reencrypt_context_update(cd, rh);
3507                 if (r) {
3508                         log_err(cd, _("Failed to update reencryption context."));
3509                         rs = REENC_ERR;
3510                         break;
3511                 }
3512
3513                 log_dbg(cd, "Next reencryption offset will be %" PRIu64 " sectors.", rh->offset);
3514                 log_dbg(cd, "Next reencryption chunk size will be %" PRIu64 " sectors).", rh->length);
3515         }
3516
3517         r = reencrypt_teardown(cd, hdr, rh, rs, quit, progress);
3518         return r;
3519 #else
3520         log_err(cd, _("This operation is not supported for this device type."));
3521         return -ENOTSUP;
3522 #endif
3523 }
3524
3525 #if USE_LUKS2_REENCRYPTION
3526 static int reencrypt_recovery(struct crypt_device *cd,
3527                 struct luks2_hdr *hdr,
3528                 uint64_t device_size,
3529                 struct volume_key *vks)
3530 {
3531         int r;
3532         struct luks2_reencrypt *rh = NULL;
3533
3534         r = reencrypt_load(cd, hdr, device_size, NULL, vks, &rh);
3535         if (r < 0) {
3536                 log_err(cd, _("Failed to load LUKS2 reencryption context."));
3537                 return r;
3538         }
3539
3540         r = reencrypt_recover_segment(cd, hdr, rh, vks);
3541         if (r < 0)
3542                 goto err;
3543
3544         if ((r = reencrypt_assign_segments(cd, hdr, rh, 0, 0)))
3545                 goto err;
3546
3547         r = reencrypt_context_update(cd, rh);
3548         if (r) {
3549                 log_err(cd, _("Failed to update reencryption context."));
3550                 goto err;
3551         }
3552
3553         r = reencrypt_teardown_ok(cd, hdr, rh);
3554         if (!r)
3555                 r = LUKS2_hdr_write(cd, hdr);
3556 err:
3557         LUKS2_reencrypt_free(cd, rh);
3558
3559         return r;
3560 }
3561 #endif
3562 /*
3563  * use only for calculation of minimal data device size.
3564  * The real data offset is taken directly from segments!
3565  */
3566 int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise)
3567 {
3568         crypt_reencrypt_info ri = LUKS2_reencrypt_status(hdr);
3569         uint64_t data_offset = LUKS2_get_data_offset(hdr);
3570
3571         if (ri == CRYPT_REENCRYPT_CLEAN && reencrypt_direction(hdr) == CRYPT_REENCRYPT_FORWARD)
3572                 data_offset += reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
3573
3574         return blockwise ? data_offset : data_offset << SECTOR_SHIFT;
3575 }
3576
3577 /* internal only */
3578 int LUKS2_reencrypt_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr,
3579         uint64_t check_size, uint64_t *dev_size, bool activation, bool dynamic)
3580 {
3581         int r;
3582         uint64_t data_offset, real_size = 0;
3583
3584         if (reencrypt_direction(hdr) == CRYPT_REENCRYPT_BACKWARD &&
3585             (LUKS2_get_segment_by_flag(hdr, "backup-moved-segment") || dynamic))
3586                 check_size += reencrypt_data_shift(hdr);
3587
3588         r = device_check_access(cd, crypt_data_device(cd), activation ? DEV_EXCL : DEV_OK);
3589         if (r)
3590                 return r;
3591
3592         data_offset = LUKS2_reencrypt_data_offset(hdr, false);
3593
3594         r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
3595         if (r)
3596                 return r;
3597
3598         r = device_size(crypt_data_device(cd), &real_size);
3599         if (r)
3600                 return r;
3601
3602         log_dbg(cd, "Required minimal device size: %" PRIu64 " (%" PRIu64 " sectors)"
3603                     ", real device size: %" PRIu64 " (%" PRIu64 " sectors)\n"
3604                     "calculated device size: %" PRIu64 " (%" PRIu64 " sectors)",
3605                     check_size, check_size >> SECTOR_SHIFT, real_size, real_size >> SECTOR_SHIFT,
3606                     real_size - data_offset, (real_size - data_offset) >> SECTOR_SHIFT);
3607
3608         if (real_size < data_offset || (check_size && (real_size - data_offset) < check_size)) {
3609                 log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
3610                 return -EINVAL;
3611         }
3612
3613         *dev_size = real_size - data_offset;
3614
3615         return 0;
3616 }
3617 #if USE_LUKS2_REENCRYPTION
3618 /* returns keyslot number on success (>= 0) or negative errnor otherwise */
3619 int LUKS2_reencrypt_locked_recovery_by_passphrase(struct crypt_device *cd,
3620         int keyslot_old,
3621         int keyslot_new,
3622         const char *passphrase,
3623         size_t passphrase_size,
3624         uint32_t flags,
3625         struct volume_key **vks)
3626 {
3627         uint64_t minimal_size, device_size;
3628         int keyslot, r = -EINVAL;
3629         struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
3630         struct volume_key *vk = NULL, *_vks = NULL;
3631
3632         log_dbg(cd, "Entering reencryption crash recovery.");
3633
3634         if (LUKS2_get_data_size(hdr, &minimal_size, NULL))
3635                 return r;
3636
3637         r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new,
3638                         passphrase, passphrase_size, &_vks);
3639         if (r < 0)
3640                 goto err;
3641         keyslot = r;
3642
3643         if (crypt_use_keyring_for_vk(cd))
3644                 vk = _vks;
3645
3646         while (vk) {
3647                 r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk));
3648                 if (r < 0)
3649                         goto err;
3650                 vk = crypt_volume_key_next(vk);
3651         }
3652
3653         if (LUKS2_reencrypt_check_device_size(cd, hdr, minimal_size, &device_size, true, false))
3654                 goto err;
3655
3656         r = reencrypt_recovery(cd, hdr, device_size, _vks);
3657
3658         if (!r && vks)
3659                 MOVE_REF(*vks, _vks);
3660 err:
3661         if (r < 0)
3662                 crypt_drop_keyring_key(cd, _vks);
3663         crypt_free_volume_key(_vks);
3664
3665         return r < 0 ? r : keyslot;
3666 }
3667 #endif
3668 crypt_reencrypt_info LUKS2_reencrypt_get_params(struct luks2_hdr *hdr,
3669         struct crypt_params_reencrypt *params)
3670 {
3671         crypt_reencrypt_info ri;
3672         int digest;
3673         uint32_t version;
3674
3675         ri = LUKS2_reencrypt_status(hdr);
3676         if (ri == CRYPT_REENCRYPT_NONE || ri == CRYPT_REENCRYPT_INVALID || !params)
3677                 return ri;
3678
3679         digest = LUKS2_digest_by_keyslot(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
3680         if (digest < 0 && digest != -ENOENT)
3681                 return CRYPT_REENCRYPT_INVALID;
3682
3683         /*
3684          * In case there's an old "online-reencrypt" requirement or reencryption
3685          * keyslot digest is missing inform caller reencryption metadata requires repair.
3686          */
3687         if (!LUKS2_config_get_reencrypt_version(hdr, &version) &&
3688             (version < 2 || digest == -ENOENT)) {
3689                 params->flags |= CRYPT_REENCRYPT_REPAIR_NEEDED;
3690                 return ri;
3691         }
3692
3693         params->mode = reencrypt_mode(hdr);
3694         params->direction = reencrypt_direction(hdr);
3695         params->resilience = reencrypt_resilience_type(hdr);
3696         params->hash = reencrypt_resilience_hash(hdr);
3697         params->data_shift = reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
3698         params->max_hotzone_size = 0;
3699         if (LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
3700                 params->flags |= CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT;
3701
3702         return ri;
3703 }