bcdddcd7bc794a1a63e275114539c03d9c0543de
[platform/kernel/linux-rpi.git] / fs / ntfs3 / fslog.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7
8 #include <linux/blkdev.h>
9 #include <linux/fs.h>
10 #include <linux/random.h>
11 #include <linux/slab.h>
12
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16
17 /*
18  * LOG FILE structs
19  */
20
21 // clang-format off
22
23 #define MaxLogFileSize     0x100000000ull
24 #define DefaultLogPageSize 4096
25 #define MinLogRecordPages  0x30
26
27 struct RESTART_HDR {
28         struct NTFS_RECORD_HEADER rhdr; // 'RSTR'
29         __le32 sys_page_size; // 0x10: Page size of the system which initialized the log.
30         __le32 page_size;     // 0x14: Log page size used for this log file.
31         __le16 ra_off;        // 0x18:
32         __le16 minor_ver;     // 0x1A:
33         __le16 major_ver;     // 0x1C:
34         __le16 fixups[];
35 };
36
37 #define LFS_NO_CLIENT 0xffff
38 #define LFS_NO_CLIENT_LE cpu_to_le16(0xffff)
39
40 struct CLIENT_REC {
41         __le64 oldest_lsn;
42         __le64 restart_lsn; // 0x08:
43         __le16 prev_client; // 0x10:
44         __le16 next_client; // 0x12:
45         __le16 seq_num;     // 0x14:
46         u8 align[6];        // 0x16:
47         __le32 name_bytes;  // 0x1C: In bytes.
48         __le16 name[32];    // 0x20: Name of client.
49 };
50
51 static_assert(sizeof(struct CLIENT_REC) == 0x60);
52
53 /* Two copies of these will exist at the beginning of the log file */
54 struct RESTART_AREA {
55         __le64 current_lsn;    // 0x00: Current logical end of log file.
56         __le16 log_clients;    // 0x08: Maximum number of clients.
57         __le16 client_idx[2];  // 0x0A: Free/use index into the client record arrays.
58         __le16 flags;          // 0x0E: See RESTART_SINGLE_PAGE_IO.
59         __le32 seq_num_bits;   // 0x10: The number of bits in sequence number.
60         __le16 ra_len;         // 0x14:
61         __le16 client_off;     // 0x16:
62         __le64 l_size;         // 0x18: Usable log file size.
63         __le32 last_lsn_data_len; // 0x20:
64         __le16 rec_hdr_len;    // 0x24: Log page data offset.
65         __le16 data_off;       // 0x26: Log page data length.
66         __le32 open_log_count; // 0x28:
67         __le32 align[5];       // 0x2C:
68         struct CLIENT_REC clients[]; // 0x40:
69 };
70
71 struct LOG_REC_HDR {
72         __le16 redo_op;      // 0x00:  NTFS_LOG_OPERATION
73         __le16 undo_op;      // 0x02:  NTFS_LOG_OPERATION
74         __le16 redo_off;     // 0x04:  Offset to Redo record.
75         __le16 redo_len;     // 0x06:  Redo length.
76         __le16 undo_off;     // 0x08:  Offset to Undo record.
77         __le16 undo_len;     // 0x0A:  Undo length.
78         __le16 target_attr;  // 0x0C:
79         __le16 lcns_follow;  // 0x0E:
80         __le16 record_off;   // 0x10:
81         __le16 attr_off;     // 0x12:
82         __le16 cluster_off;  // 0x14:
83         __le16 reserved;     // 0x16:
84         __le64 target_vcn;   // 0x18:
85         __le64 page_lcns[];  // 0x20:
86 };
87
88 static_assert(sizeof(struct LOG_REC_HDR) == 0x20);
89
90 #define RESTART_ENTRY_ALLOCATED    0xFFFFFFFF
91 #define RESTART_ENTRY_ALLOCATED_LE cpu_to_le32(0xFFFFFFFF)
92
93 struct RESTART_TABLE {
94         __le16 size;       // 0x00: In bytes
95         __le16 used;       // 0x02: Entries
96         __le16 total;      // 0x04: Entries
97         __le16 res[3];     // 0x06:
98         __le32 free_goal;  // 0x0C:
99         __le32 first_free; // 0x10:
100         __le32 last_free;  // 0x14:
101
102 };
103
104 static_assert(sizeof(struct RESTART_TABLE) == 0x18);
105
106 struct ATTR_NAME_ENTRY {
107         __le16 off; // Offset in the Open attribute Table.
108         __le16 name_bytes;
109         __le16 name[];
110 };
111
112 struct OPEN_ATTR_ENRTY {
113         __le32 next;            // 0x00: RESTART_ENTRY_ALLOCATED if allocated
114         __le32 bytes_per_index; // 0x04:
115         enum ATTR_TYPE type;    // 0x08:
116         u8 is_dirty_pages;      // 0x0C:
117         u8 is_attr_name;        // 0x0B: Faked field to manage 'ptr'
118         u8 name_len;            // 0x0C: Faked field to manage 'ptr'
119         u8 res;
120         struct MFT_REF ref;     // 0x10: File Reference of file containing attribute
121         __le64 open_record_lsn; // 0x18:
122         void *ptr;              // 0x20:
123 };
124
125 /* 32 bit version of 'struct OPEN_ATTR_ENRTY' */
126 struct OPEN_ATTR_ENRTY_32 {
127         __le32 next;            // 0x00: RESTART_ENTRY_ALLOCATED if allocated
128         __le32 ptr;             // 0x04:
129         struct MFT_REF ref;     // 0x08:
130         __le64 open_record_lsn; // 0x10:
131         u8 is_dirty_pages;      // 0x18:
132         u8 is_attr_name;        // 0x19:
133         u8 res1[2];
134         enum ATTR_TYPE type;    // 0x1C:
135         u8 name_len;            // 0x20: In wchar
136         u8 res2[3];
137         __le32 AttributeName;   // 0x24:
138         __le32 bytes_per_index; // 0x28:
139 };
140
141 #define SIZEOF_OPENATTRIBUTEENTRY0 0x2c
142 // static_assert( 0x2C == sizeof(struct OPEN_ATTR_ENRTY_32) );
143 static_assert(sizeof(struct OPEN_ATTR_ENRTY) < SIZEOF_OPENATTRIBUTEENTRY0);
144
145 /*
146  * One entry exists in the Dirty Pages Table for each page which is dirty at
147  * the time the Restart Area is written.
148  */
149 struct DIR_PAGE_ENTRY {
150         __le32 next;         // 0x00: RESTART_ENTRY_ALLOCATED if allocated
151         __le32 target_attr;  // 0x04: Index into the Open attribute Table
152         __le32 transfer_len; // 0x08:
153         __le32 lcns_follow;  // 0x0C:
154         __le64 vcn;          // 0x10: Vcn of dirty page
155         __le64 oldest_lsn;   // 0x18:
156         __le64 page_lcns[];  // 0x20:
157 };
158
159 static_assert(sizeof(struct DIR_PAGE_ENTRY) == 0x20);
160
161 /* 32 bit version of 'struct DIR_PAGE_ENTRY' */
162 struct DIR_PAGE_ENTRY_32 {
163         __le32 next;            // 0x00: RESTART_ENTRY_ALLOCATED if allocated
164         __le32 target_attr;     // 0x04: Index into the Open attribute Table
165         __le32 transfer_len;    // 0x08:
166         __le32 lcns_follow;     // 0x0C:
167         __le32 reserved;        // 0x10:
168         __le32 vcn_low;         // 0x14: Vcn of dirty page
169         __le32 vcn_hi;          // 0x18: Vcn of dirty page
170         __le32 oldest_lsn_low;  // 0x1C:
171         __le32 oldest_lsn_hi;   // 0x1C:
172         __le32 page_lcns_low;   // 0x24:
173         __le32 page_lcns_hi;    // 0x24:
174 };
175
176 static_assert(offsetof(struct DIR_PAGE_ENTRY_32, vcn_low) == 0x14);
177 static_assert(sizeof(struct DIR_PAGE_ENTRY_32) == 0x2c);
178
179 enum transact_state {
180         TransactionUninitialized = 0,
181         TransactionActive,
182         TransactionPrepared,
183         TransactionCommitted
184 };
185
186 struct TRANSACTION_ENTRY {
187         __le32 next;          // 0x00: RESTART_ENTRY_ALLOCATED if allocated
188         u8 transact_state;    // 0x04:
189         u8 reserved[3];       // 0x05:
190         __le64 first_lsn;     // 0x08:
191         __le64 prev_lsn;      // 0x10:
192         __le64 undo_next_lsn; // 0x18:
193         __le32 undo_records;  // 0x20: Number of undo log records pending abort
194         __le32 undo_len;      // 0x24: Total undo size
195 };
196
197 static_assert(sizeof(struct TRANSACTION_ENTRY) == 0x28);
198
199 struct NTFS_RESTART {
200         __le32 major_ver;             // 0x00:
201         __le32 minor_ver;             // 0x04:
202         __le64 check_point_start;     // 0x08:
203         __le64 open_attr_table_lsn;   // 0x10:
204         __le64 attr_names_lsn;        // 0x18:
205         __le64 dirty_pages_table_lsn; // 0x20:
206         __le64 transact_table_lsn;    // 0x28:
207         __le32 open_attr_len;         // 0x30: In bytes
208         __le32 attr_names_len;        // 0x34: In bytes
209         __le32 dirty_pages_len;       // 0x38: In bytes
210         __le32 transact_table_len;    // 0x3C: In bytes
211 };
212
213 static_assert(sizeof(struct NTFS_RESTART) == 0x40);
214
215 struct NEW_ATTRIBUTE_SIZES {
216         __le64 alloc_size;
217         __le64 valid_size;
218         __le64 data_size;
219         __le64 total_size;
220 };
221
222 struct BITMAP_RANGE {
223         __le32 bitmap_off;
224         __le32 bits;
225 };
226
227 struct LCN_RANGE {
228         __le64 lcn;
229         __le64 len;
230 };
231
232 /* The following type defines the different log record types. */
233 #define LfsClientRecord  cpu_to_le32(1)
234 #define LfsClientRestart cpu_to_le32(2)
235
236 /* This is used to uniquely identify a client for a particular log file. */
237 struct CLIENT_ID {
238         __le16 seq_num;
239         __le16 client_idx;
240 };
241
242 /* This is the header that begins every Log Record in the log file. */
243 struct LFS_RECORD_HDR {
244         __le64 this_lsn;                // 0x00:
245         __le64 client_prev_lsn;         // 0x08:
246         __le64 client_undo_next_lsn;    // 0x10:
247         __le32 client_data_len;         // 0x18:
248         struct CLIENT_ID client;        // 0x1C: Owner of this log record.
249         __le32 record_type;             // 0x20: LfsClientRecord or LfsClientRestart.
250         __le32 transact_id;             // 0x24:
251         __le16 flags;                   // 0x28: LOG_RECORD_MULTI_PAGE
252         u8 align[6];                    // 0x2A:
253 };
254
255 #define LOG_RECORD_MULTI_PAGE cpu_to_le16(1)
256
257 static_assert(sizeof(struct LFS_RECORD_HDR) == 0x30);
258
259 struct LFS_RECORD {
260         __le16 next_record_off; // 0x00: Offset of the free space in the page,
261         u8 align[6];            // 0x02:
262         __le64 last_end_lsn;    // 0x08: lsn for the last log record which ends on the page,
263 };
264
265 static_assert(sizeof(struct LFS_RECORD) == 0x10);
266
267 struct RECORD_PAGE_HDR {
268         struct NTFS_RECORD_HEADER rhdr; // 'RCRD'
269         __le32 rflags;                  // 0x10: See LOG_PAGE_LOG_RECORD_END
270         __le16 page_count;              // 0x14:
271         __le16 page_pos;                // 0x16:
272         struct LFS_RECORD record_hdr;   // 0x18:
273         __le16 fixups[10];              // 0x28:
274         __le32 file_off;                // 0x3c: Used when major version >= 2
275 };
276
277 // clang-format on
278
279 // Page contains the end of a log record.
280 #define LOG_PAGE_LOG_RECORD_END cpu_to_le32(0x00000001)
281
282 static inline bool is_log_record_end(const struct RECORD_PAGE_HDR *hdr)
283 {
284         return hdr->rflags & LOG_PAGE_LOG_RECORD_END;
285 }
286
287 static_assert(offsetof(struct RECORD_PAGE_HDR, file_off) == 0x3c);
288
289 /*
290  * END of NTFS LOG structures
291  */
292
293 /* Define some tuning parameters to keep the restart tables a reasonable size. */
294 #define INITIAL_NUMBER_TRANSACTIONS 5
295
296 enum NTFS_LOG_OPERATION {
297
298         Noop = 0x00,
299         CompensationLogRecord = 0x01,
300         InitializeFileRecordSegment = 0x02,
301         DeallocateFileRecordSegment = 0x03,
302         WriteEndOfFileRecordSegment = 0x04,
303         CreateAttribute = 0x05,
304         DeleteAttribute = 0x06,
305         UpdateResidentValue = 0x07,
306         UpdateNonresidentValue = 0x08,
307         UpdateMappingPairs = 0x09,
308         DeleteDirtyClusters = 0x0A,
309         SetNewAttributeSizes = 0x0B,
310         AddIndexEntryRoot = 0x0C,
311         DeleteIndexEntryRoot = 0x0D,
312         AddIndexEntryAllocation = 0x0E,
313         DeleteIndexEntryAllocation = 0x0F,
314         WriteEndOfIndexBuffer = 0x10,
315         SetIndexEntryVcnRoot = 0x11,
316         SetIndexEntryVcnAllocation = 0x12,
317         UpdateFileNameRoot = 0x13,
318         UpdateFileNameAllocation = 0x14,
319         SetBitsInNonresidentBitMap = 0x15,
320         ClearBitsInNonresidentBitMap = 0x16,
321         HotFix = 0x17,
322         EndTopLevelAction = 0x18,
323         PrepareTransaction = 0x19,
324         CommitTransaction = 0x1A,
325         ForgetTransaction = 0x1B,
326         OpenNonresidentAttribute = 0x1C,
327         OpenAttributeTableDump = 0x1D,
328         AttributeNamesDump = 0x1E,
329         DirtyPageTableDump = 0x1F,
330         TransactionTableDump = 0x20,
331         UpdateRecordDataRoot = 0x21,
332         UpdateRecordDataAllocation = 0x22,
333
334         UpdateRelativeDataInIndex =
335                 0x23, // NtOfsRestartUpdateRelativeDataInIndex
336         UpdateRelativeDataInIndex2 = 0x24,
337         ZeroEndOfFileRecord = 0x25,
338 };
339
340 /*
341  * Array for log records which require a target attribute.
342  * A true indicates that the corresponding restart operation
343  * requires a target attribute.
344  */
345 static const u8 AttributeRequired[] = {
346         0xFC, 0xFB, 0xFF, 0x10, 0x06,
347 };
348
349 static inline bool is_target_required(u16 op)
350 {
351         bool ret = op <= UpdateRecordDataAllocation &&
352                    (AttributeRequired[op >> 3] >> (op & 7) & 1);
353         return ret;
354 }
355
356 static inline bool can_skip_action(enum NTFS_LOG_OPERATION op)
357 {
358         switch (op) {
359         case Noop:
360         case DeleteDirtyClusters:
361         case HotFix:
362         case EndTopLevelAction:
363         case PrepareTransaction:
364         case CommitTransaction:
365         case ForgetTransaction:
366         case CompensationLogRecord:
367         case OpenNonresidentAttribute:
368         case OpenAttributeTableDump:
369         case AttributeNamesDump:
370         case DirtyPageTableDump:
371         case TransactionTableDump:
372                 return true;
373         default:
374                 return false;
375         }
376 }
377
378 enum { lcb_ctx_undo_next, lcb_ctx_prev, lcb_ctx_next };
379
380 /* Bytes per restart table. */
381 static inline u32 bytes_per_rt(const struct RESTART_TABLE *rt)
382 {
383         return le16_to_cpu(rt->used) * le16_to_cpu(rt->size) +
384                sizeof(struct RESTART_TABLE);
385 }
386
387 /* Log record length. */
388 static inline u32 lrh_length(const struct LOG_REC_HDR *lr)
389 {
390         u16 t16 = le16_to_cpu(lr->lcns_follow);
391
392         return struct_size(lr, page_lcns, max_t(u16, 1, t16));
393 }
394
395 struct lcb {
396         struct LFS_RECORD_HDR *lrh; // Log record header of the current lsn.
397         struct LOG_REC_HDR *log_rec;
398         u32 ctx_mode; // lcb_ctx_undo_next/lcb_ctx_prev/lcb_ctx_next
399         struct CLIENT_ID client;
400         bool alloc; // If true the we should deallocate 'log_rec'.
401 };
402
403 static void lcb_put(struct lcb *lcb)
404 {
405         if (lcb->alloc)
406                 kfree(lcb->log_rec);
407         kfree(lcb->lrh);
408         kfree(lcb);
409 }
410
411 /* Find the oldest lsn from active clients. */
412 static inline void oldest_client_lsn(const struct CLIENT_REC *ca,
413                                      __le16 next_client, u64 *oldest_lsn)
414 {
415         while (next_client != LFS_NO_CLIENT_LE) {
416                 const struct CLIENT_REC *cr = ca + le16_to_cpu(next_client);
417                 u64 lsn = le64_to_cpu(cr->oldest_lsn);
418
419                 /* Ignore this block if it's oldest lsn is 0. */
420                 if (lsn && lsn < *oldest_lsn)
421                         *oldest_lsn = lsn;
422
423                 next_client = cr->next_client;
424         }
425 }
426
427 static inline bool is_rst_page_hdr_valid(u32 file_off,
428                                          const struct RESTART_HDR *rhdr)
429 {
430         u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
431         u32 page_size = le32_to_cpu(rhdr->page_size);
432         u32 end_usa;
433         u16 ro;
434
435         if (sys_page < SECTOR_SIZE || page_size < SECTOR_SIZE ||
436             sys_page & (sys_page - 1) || page_size & (page_size - 1)) {
437                 return false;
438         }
439
440         /* Check that if the file offset isn't 0, it is the system page size. */
441         if (file_off && file_off != sys_page)
442                 return false;
443
444         /* Check support version 1.1+. */
445         if (le16_to_cpu(rhdr->major_ver) <= 1 && !rhdr->minor_ver)
446                 return false;
447
448         if (le16_to_cpu(rhdr->major_ver) > 2)
449                 return false;
450
451         ro = le16_to_cpu(rhdr->ra_off);
452         if (!IS_ALIGNED(ro, 8) || ro > sys_page)
453                 return false;
454
455         end_usa = ((sys_page >> SECTOR_SHIFT) + 1) * sizeof(short);
456         end_usa += le16_to_cpu(rhdr->rhdr.fix_off);
457
458         if (ro < end_usa)
459                 return false;
460
461         return true;
462 }
463
464 static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
465 {
466         const struct RESTART_AREA *ra;
467         u16 cl, fl, ul;
468         u32 off, l_size, file_dat_bits, file_size_round;
469         u16 ro = le16_to_cpu(rhdr->ra_off);
470         u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
471
472         if (ro + offsetof(struct RESTART_AREA, l_size) >
473             SECTOR_SIZE - sizeof(short))
474                 return false;
475
476         ra = Add2Ptr(rhdr, ro);
477         cl = le16_to_cpu(ra->log_clients);
478
479         if (cl > 1)
480                 return false;
481
482         off = le16_to_cpu(ra->client_off);
483
484         if (!IS_ALIGNED(off, 8) || ro + off > SECTOR_SIZE - sizeof(short))
485                 return false;
486
487         off += cl * sizeof(struct CLIENT_REC);
488
489         if (off > sys_page)
490                 return false;
491
492         /*
493          * Check the restart length field and whether the entire
494          * restart area is contained that length.
495          */
496         if (le16_to_cpu(rhdr->ra_off) + le16_to_cpu(ra->ra_len) > sys_page ||
497             off > le16_to_cpu(ra->ra_len)) {
498                 return false;
499         }
500
501         /*
502          * As a final check make sure that the use list and the free list
503          * are either empty or point to a valid client.
504          */
505         fl = le16_to_cpu(ra->client_idx[0]);
506         ul = le16_to_cpu(ra->client_idx[1]);
507         if ((fl != LFS_NO_CLIENT && fl >= cl) ||
508             (ul != LFS_NO_CLIENT && ul >= cl))
509                 return false;
510
511         /* Make sure the sequence number bits match the log file size. */
512         l_size = le64_to_cpu(ra->l_size);
513
514         file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits);
515         file_size_round = 1u << (file_dat_bits + 3);
516         if (file_size_round != l_size &&
517             (file_size_round < l_size || (file_size_round / 2) > l_size)) {
518                 return false;
519         }
520
521         /* The log page data offset and record header length must be quad-aligned. */
522         if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) ||
523             !IS_ALIGNED(le16_to_cpu(ra->rec_hdr_len), 8))
524                 return false;
525
526         return true;
527 }
528
529 static inline bool is_client_area_valid(const struct RESTART_HDR *rhdr,
530                                         bool usa_error)
531 {
532         u16 ro = le16_to_cpu(rhdr->ra_off);
533         const struct RESTART_AREA *ra = Add2Ptr(rhdr, ro);
534         u16 ra_len = le16_to_cpu(ra->ra_len);
535         const struct CLIENT_REC *ca;
536         u32 i;
537
538         if (usa_error && ra_len + ro > SECTOR_SIZE - sizeof(short))
539                 return false;
540
541         /* Find the start of the client array. */
542         ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
543
544         /*
545          * Start with the free list.
546          * Check that all the clients are valid and that there isn't a cycle.
547          * Do the in-use list on the second pass.
548          */
549         for (i = 0; i < 2; i++) {
550                 u16 client_idx = le16_to_cpu(ra->client_idx[i]);
551                 bool first_client = true;
552                 u16 clients = le16_to_cpu(ra->log_clients);
553
554                 while (client_idx != LFS_NO_CLIENT) {
555                         const struct CLIENT_REC *cr;
556
557                         if (!clients ||
558                             client_idx >= le16_to_cpu(ra->log_clients))
559                                 return false;
560
561                         clients -= 1;
562                         cr = ca + client_idx;
563
564                         client_idx = le16_to_cpu(cr->next_client);
565
566                         if (first_client) {
567                                 first_client = false;
568                                 if (cr->prev_client != LFS_NO_CLIENT_LE)
569                                         return false;
570                         }
571                 }
572         }
573
574         return true;
575 }
576
577 /*
578  * remove_client
579  *
580  * Remove a client record from a client record list an restart area.
581  */
582 static inline void remove_client(struct CLIENT_REC *ca,
583                                  const struct CLIENT_REC *cr, __le16 *head)
584 {
585         if (cr->prev_client == LFS_NO_CLIENT_LE)
586                 *head = cr->next_client;
587         else
588                 ca[le16_to_cpu(cr->prev_client)].next_client = cr->next_client;
589
590         if (cr->next_client != LFS_NO_CLIENT_LE)
591                 ca[le16_to_cpu(cr->next_client)].prev_client = cr->prev_client;
592 }
593
594 /*
595  * add_client - Add a client record to the start of a list.
596  */
597 static inline void add_client(struct CLIENT_REC *ca, u16 index, __le16 *head)
598 {
599         struct CLIENT_REC *cr = ca + index;
600
601         cr->prev_client = LFS_NO_CLIENT_LE;
602         cr->next_client = *head;
603
604         if (*head != LFS_NO_CLIENT_LE)
605                 ca[le16_to_cpu(*head)].prev_client = cpu_to_le16(index);
606
607         *head = cpu_to_le16(index);
608 }
609
610 static inline void *enum_rstbl(struct RESTART_TABLE *t, void *c)
611 {
612         __le32 *e;
613         u32 bprt;
614         u16 rsize = t ? le16_to_cpu(t->size) : 0;
615
616         if (!c) {
617                 if (!t || !t->total)
618                         return NULL;
619                 e = Add2Ptr(t, sizeof(struct RESTART_TABLE));
620         } else {
621                 e = Add2Ptr(c, rsize);
622         }
623
624         /* Loop until we hit the first one allocated, or the end of the list. */
625         for (bprt = bytes_per_rt(t); PtrOffset(t, e) < bprt;
626              e = Add2Ptr(e, rsize)) {
627                 if (*e == RESTART_ENTRY_ALLOCATED_LE)
628                         return e;
629         }
630         return NULL;
631 }
632
633 /*
634  * find_dp - Search for a @vcn in Dirty Page Table.
635  */
636 static inline struct DIR_PAGE_ENTRY *find_dp(struct RESTART_TABLE *dptbl,
637                                              u32 target_attr, u64 vcn)
638 {
639         __le32 ta = cpu_to_le32(target_attr);
640         struct DIR_PAGE_ENTRY *dp = NULL;
641
642         while ((dp = enum_rstbl(dptbl, dp))) {
643                 u64 dp_vcn = le64_to_cpu(dp->vcn);
644
645                 if (dp->target_attr == ta && vcn >= dp_vcn &&
646                     vcn < dp_vcn + le32_to_cpu(dp->lcns_follow)) {
647                         return dp;
648                 }
649         }
650         return NULL;
651 }
652
653 static inline u32 norm_file_page(u32 page_size, u32 *l_size, bool use_default)
654 {
655         if (use_default)
656                 page_size = DefaultLogPageSize;
657
658         /* Round the file size down to a system page boundary. */
659         *l_size &= ~(page_size - 1);
660
661         /* File should contain at least 2 restart pages and MinLogRecordPages pages. */
662         if (*l_size < (MinLogRecordPages + 2) * page_size)
663                 return 0;
664
665         return page_size;
666 }
667
668 static bool check_log_rec(const struct LOG_REC_HDR *lr, u32 bytes, u32 tr,
669                           u32 bytes_per_attr_entry)
670 {
671         u16 t16;
672
673         if (bytes < sizeof(struct LOG_REC_HDR))
674                 return false;
675         if (!tr)
676                 return false;
677
678         if ((tr - sizeof(struct RESTART_TABLE)) %
679             sizeof(struct TRANSACTION_ENTRY))
680                 return false;
681
682         if (le16_to_cpu(lr->redo_off) & 7)
683                 return false;
684
685         if (le16_to_cpu(lr->undo_off) & 7)
686                 return false;
687
688         if (lr->target_attr)
689                 goto check_lcns;
690
691         if (is_target_required(le16_to_cpu(lr->redo_op)))
692                 return false;
693
694         if (is_target_required(le16_to_cpu(lr->undo_op)))
695                 return false;
696
697 check_lcns:
698         if (!lr->lcns_follow)
699                 goto check_length;
700
701         t16 = le16_to_cpu(lr->target_attr);
702         if ((t16 - sizeof(struct RESTART_TABLE)) % bytes_per_attr_entry)
703                 return false;
704
705 check_length:
706         if (bytes < lrh_length(lr))
707                 return false;
708
709         return true;
710 }
711
712 static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
713 {
714         u32 ts;
715         u32 i, off;
716         u16 rsize = le16_to_cpu(rt->size);
717         u16 ne = le16_to_cpu(rt->used);
718         u32 ff = le32_to_cpu(rt->first_free);
719         u32 lf = le32_to_cpu(rt->last_free);
720
721         ts = rsize * ne + sizeof(struct RESTART_TABLE);
722
723         if (!rsize || rsize > bytes ||
724             rsize + sizeof(struct RESTART_TABLE) > bytes || bytes < ts ||
725             le16_to_cpu(rt->total) > ne || ff > ts || lf > ts ||
726             (ff && ff < sizeof(struct RESTART_TABLE)) ||
727             (lf && lf < sizeof(struct RESTART_TABLE))) {
728                 return false;
729         }
730
731         /*
732          * Verify each entry is either allocated or points
733          * to a valid offset the table.
734          */
735         for (i = 0; i < ne; i++) {
736                 off = le32_to_cpu(*(__le32 *)Add2Ptr(
737                         rt, i * rsize + sizeof(struct RESTART_TABLE)));
738
739                 if (off != RESTART_ENTRY_ALLOCATED && off &&
740                     (off < sizeof(struct RESTART_TABLE) ||
741                      ((off - sizeof(struct RESTART_TABLE)) % rsize))) {
742                         return false;
743                 }
744         }
745
746         /*
747          * Walk through the list headed by the first entry to make
748          * sure none of the entries are currently being used.
749          */
750         for (off = ff; off;) {
751                 if (off == RESTART_ENTRY_ALLOCATED)
752                         return false;
753
754                 off = le32_to_cpu(*(__le32 *)Add2Ptr(rt, off));
755         }
756
757         return true;
758 }
759
760 /*
761  * free_rsttbl_idx - Free a previously allocated index a Restart Table.
762  */
763 static inline void free_rsttbl_idx(struct RESTART_TABLE *rt, u32 off)
764 {
765         __le32 *e;
766         u32 lf = le32_to_cpu(rt->last_free);
767         __le32 off_le = cpu_to_le32(off);
768
769         e = Add2Ptr(rt, off);
770
771         if (off < le32_to_cpu(rt->free_goal)) {
772                 *e = rt->first_free;
773                 rt->first_free = off_le;
774                 if (!lf)
775                         rt->last_free = off_le;
776         } else {
777                 if (lf)
778                         *(__le32 *)Add2Ptr(rt, lf) = off_le;
779                 else
780                         rt->first_free = off_le;
781
782                 rt->last_free = off_le;
783                 *e = 0;
784         }
785
786         le16_sub_cpu(&rt->total, 1);
787 }
788
789 static inline struct RESTART_TABLE *init_rsttbl(u16 esize, u16 used)
790 {
791         __le32 *e, *last_free;
792         u32 off;
793         u32 bytes = esize * used + sizeof(struct RESTART_TABLE);
794         u32 lf = sizeof(struct RESTART_TABLE) + (used - 1) * esize;
795         struct RESTART_TABLE *t = kzalloc(bytes, GFP_NOFS);
796
797         if (!t)
798                 return NULL;
799
800         t->size = cpu_to_le16(esize);
801         t->used = cpu_to_le16(used);
802         t->free_goal = cpu_to_le32(~0u);
803         t->first_free = cpu_to_le32(sizeof(struct RESTART_TABLE));
804         t->last_free = cpu_to_le32(lf);
805
806         e = (__le32 *)(t + 1);
807         last_free = Add2Ptr(t, lf);
808
809         for (off = sizeof(struct RESTART_TABLE) + esize; e < last_free;
810              e = Add2Ptr(e, esize), off += esize) {
811                 *e = cpu_to_le32(off);
812         }
813         return t;
814 }
815
816 static inline struct RESTART_TABLE *extend_rsttbl(struct RESTART_TABLE *tbl,
817                                                   u32 add, u32 free_goal)
818 {
819         u16 esize = le16_to_cpu(tbl->size);
820         __le32 osize = cpu_to_le32(bytes_per_rt(tbl));
821         u32 used = le16_to_cpu(tbl->used);
822         struct RESTART_TABLE *rt;
823
824         rt = init_rsttbl(esize, used + add);
825         if (!rt)
826                 return NULL;
827
828         memcpy(rt + 1, tbl + 1, esize * used);
829
830         rt->free_goal = free_goal == ~0u
831                                 ? cpu_to_le32(~0u)
832                                 : cpu_to_le32(sizeof(struct RESTART_TABLE) +
833                                               free_goal * esize);
834
835         if (tbl->first_free) {
836                 rt->first_free = tbl->first_free;
837                 *(__le32 *)Add2Ptr(rt, le32_to_cpu(tbl->last_free)) = osize;
838         } else {
839                 rt->first_free = osize;
840         }
841
842         rt->total = tbl->total;
843
844         kfree(tbl);
845         return rt;
846 }
847
848 /*
849  * alloc_rsttbl_idx
850  *
851  * Allocate an index from within a previously initialized Restart Table.
852  */
853 static inline void *alloc_rsttbl_idx(struct RESTART_TABLE **tbl)
854 {
855         u32 off;
856         __le32 *e;
857         struct RESTART_TABLE *t = *tbl;
858
859         if (!t->first_free) {
860                 *tbl = t = extend_rsttbl(t, 16, ~0u);
861                 if (!t)
862                         return NULL;
863         }
864
865         off = le32_to_cpu(t->first_free);
866
867         /* Dequeue this entry and zero it. */
868         e = Add2Ptr(t, off);
869
870         t->first_free = *e;
871
872         memset(e, 0, le16_to_cpu(t->size));
873
874         *e = RESTART_ENTRY_ALLOCATED_LE;
875
876         /* If list is going empty, then we fix the last_free as well. */
877         if (!t->first_free)
878                 t->last_free = 0;
879
880         le16_add_cpu(&t->total, 1);
881
882         return Add2Ptr(t, off);
883 }
884
885 /*
886  * alloc_rsttbl_from_idx
887  *
888  * Allocate a specific index from within a previously initialized Restart Table.
889  */
890 static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo)
891 {
892         u32 off;
893         __le32 *e;
894         struct RESTART_TABLE *rt = *tbl;
895         u32 bytes = bytes_per_rt(rt);
896         u16 esize = le16_to_cpu(rt->size);
897
898         /* If the entry is not the table, we will have to extend the table. */
899         if (vbo >= bytes) {
900                 /*
901                  * Extend the size by computing the number of entries between
902                  * the existing size and the desired index and adding 1 to that.
903                  */
904                 u32 bytes2idx = vbo - bytes;
905
906                 /*
907                  * There should always be an integral number of entries
908                  * being added. Now extend the table.
909                  */
910                 *tbl = rt = extend_rsttbl(rt, bytes2idx / esize + 1, bytes);
911                 if (!rt)
912                         return NULL;
913         }
914
915         /* See if the entry is already allocated, and just return if it is. */
916         e = Add2Ptr(rt, vbo);
917
918         if (*e == RESTART_ENTRY_ALLOCATED_LE)
919                 return e;
920
921         /*
922          * Walk through the table, looking for the entry we're
923          * interested and the previous entry.
924          */
925         off = le32_to_cpu(rt->first_free);
926         e = Add2Ptr(rt, off);
927
928         if (off == vbo) {
929                 /* this is a match */
930                 rt->first_free = *e;
931                 goto skip_looking;
932         }
933
934         /*
935          * Need to walk through the list looking for the predecessor
936          * of our entry.
937          */
938         for (;;) {
939                 /* Remember the entry just found */
940                 u32 last_off = off;
941                 __le32 *last_e = e;
942
943                 /* Should never run of entries. */
944
945                 /* Lookup up the next entry the list. */
946                 off = le32_to_cpu(*last_e);
947                 e = Add2Ptr(rt, off);
948
949                 /* If this is our match we are done. */
950                 if (off == vbo) {
951                         *last_e = *e;
952
953                         /*
954                          * If this was the last entry, we update that
955                          * table as well.
956                          */
957                         if (le32_to_cpu(rt->last_free) == off)
958                                 rt->last_free = cpu_to_le32(last_off);
959                         break;
960                 }
961         }
962
963 skip_looking:
964         /* If the list is now empty, we fix the last_free as well. */
965         if (!rt->first_free)
966                 rt->last_free = 0;
967
968         /* Zero this entry. */
969         memset(e, 0, esize);
970         *e = RESTART_ENTRY_ALLOCATED_LE;
971
972         le16_add_cpu(&rt->total, 1);
973
974         return e;
975 }
976
977 #define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001)
978
979 #define NTFSLOG_WRAPPED 0x00000001
980 #define NTFSLOG_MULTIPLE_PAGE_IO 0x00000002
981 #define NTFSLOG_NO_LAST_LSN 0x00000004
982 #define NTFSLOG_REUSE_TAIL 0x00000010
983 #define NTFSLOG_NO_OLDEST_LSN 0x00000020
984
985 /* Helper struct to work with NTFS $LogFile. */
986 struct ntfs_log {
987         struct ntfs_inode *ni;
988
989         u32 l_size;
990         u32 sys_page_size;
991         u32 sys_page_mask;
992         u32 page_size;
993         u32 page_mask; // page_size - 1
994         u8 page_bits;
995         struct RECORD_PAGE_HDR *one_page_buf;
996
997         struct RESTART_TABLE *open_attr_tbl;
998         u32 transaction_id;
999         u32 clst_per_page;
1000
1001         u32 first_page;
1002         u32 next_page;
1003         u32 ra_off;
1004         u32 data_off;
1005         u32 restart_size;
1006         u32 data_size;
1007         u16 record_header_len;
1008         u64 seq_num;
1009         u32 seq_num_bits;
1010         u32 file_data_bits;
1011         u32 seq_num_mask; /* (1 << file_data_bits) - 1 */
1012
1013         struct RESTART_AREA *ra; /* In-memory image of the next restart area. */
1014         u32 ra_size; /* The usable size of the restart area. */
1015
1016         /*
1017          * If true, then the in-memory restart area is to be written
1018          * to the first position on the disk.
1019          */
1020         bool init_ra;
1021         bool set_dirty; /* True if we need to set dirty flag. */
1022
1023         u64 oldest_lsn;
1024
1025         u32 oldest_lsn_off;
1026         u64 last_lsn;
1027
1028         u32 total_avail;
1029         u32 total_avail_pages;
1030         u32 total_undo_commit;
1031         u32 max_current_avail;
1032         u32 current_avail;
1033         u32 reserved;
1034
1035         short major_ver;
1036         short minor_ver;
1037
1038         u32 l_flags; /* See NTFSLOG_XXX */
1039         u32 current_openlog_count; /* On-disk value for open_log_count. */
1040
1041         struct CLIENT_ID client_id;
1042         u32 client_undo_commit;
1043 };
1044
1045 static inline u32 lsn_to_vbo(struct ntfs_log *log, const u64 lsn)
1046 {
1047         u32 vbo = (lsn << log->seq_num_bits) >> (log->seq_num_bits - 3);
1048
1049         return vbo;
1050 }
1051
1052 /* Compute the offset in the log file of the next log page. */
1053 static inline u32 next_page_off(struct ntfs_log *log, u32 off)
1054 {
1055         off = (off & ~log->sys_page_mask) + log->page_size;
1056         return off >= log->l_size ? log->first_page : off;
1057 }
1058
1059 static inline u32 lsn_to_page_off(struct ntfs_log *log, u64 lsn)
1060 {
1061         return (((u32)lsn) << 3) & log->page_mask;
1062 }
1063
1064 static inline u64 vbo_to_lsn(struct ntfs_log *log, u32 off, u64 Seq)
1065 {
1066         return (off >> 3) + (Seq << log->file_data_bits);
1067 }
1068
1069 static inline bool is_lsn_in_file(struct ntfs_log *log, u64 lsn)
1070 {
1071         return lsn >= log->oldest_lsn &&
1072                lsn <= le64_to_cpu(log->ra->current_lsn);
1073 }
1074
1075 static inline u32 hdr_file_off(struct ntfs_log *log,
1076                                struct RECORD_PAGE_HDR *hdr)
1077 {
1078         if (log->major_ver < 2)
1079                 return le64_to_cpu(hdr->rhdr.lsn);
1080
1081         return le32_to_cpu(hdr->file_off);
1082 }
1083
1084 static inline u64 base_lsn(struct ntfs_log *log,
1085                            const struct RECORD_PAGE_HDR *hdr, u64 lsn)
1086 {
1087         u64 h_lsn = le64_to_cpu(hdr->rhdr.lsn);
1088         u64 ret = (((h_lsn >> log->file_data_bits) +
1089                     (lsn < (lsn_to_vbo(log, h_lsn) & ~log->page_mask) ? 1 : 0))
1090                    << log->file_data_bits) +
1091                   ((((is_log_record_end(hdr) &&
1092                       h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn))
1093                              ? le16_to_cpu(hdr->record_hdr.next_record_off)
1094                              : log->page_size) +
1095                     lsn) >>
1096                    3);
1097
1098         return ret;
1099 }
1100
1101 static inline bool verify_client_lsn(struct ntfs_log *log,
1102                                      const struct CLIENT_REC *client, u64 lsn)
1103 {
1104         return lsn >= le64_to_cpu(client->oldest_lsn) &&
1105                lsn <= le64_to_cpu(log->ra->current_lsn) && lsn;
1106 }
1107
1108 struct restart_info {
1109         u64 last_lsn;
1110         struct RESTART_HDR *r_page;
1111         u32 vbo;
1112         bool chkdsk_was_run;
1113         bool valid_page;
1114         bool initialized;
1115         bool restart;
1116 };
1117
1118 static int read_log_page(struct ntfs_log *log, u32 vbo,
1119                          struct RECORD_PAGE_HDR **buffer, bool *usa_error)
1120 {
1121         int err = 0;
1122         u32 page_idx = vbo >> log->page_bits;
1123         u32 page_off = vbo & log->page_mask;
1124         u32 bytes = log->page_size - page_off;
1125         void *to_free = NULL;
1126         u32 page_vbo = page_idx << log->page_bits;
1127         struct RECORD_PAGE_HDR *page_buf;
1128         struct ntfs_inode *ni = log->ni;
1129         bool bBAAD;
1130
1131         if (vbo >= log->l_size)
1132                 return -EINVAL;
1133
1134         if (!*buffer) {
1135                 to_free = kmalloc(bytes, GFP_NOFS);
1136                 if (!to_free)
1137                         return -ENOMEM;
1138                 *buffer = to_free;
1139         }
1140
1141         page_buf = page_off ? log->one_page_buf : *buffer;
1142
1143         err = ntfs_read_run_nb(ni->mi.sbi, &ni->file.run, page_vbo, page_buf,
1144                                log->page_size, NULL);
1145         if (err)
1146                 goto out;
1147
1148         if (page_buf->rhdr.sign != NTFS_FFFF_SIGNATURE)
1149                 ntfs_fix_post_read(&page_buf->rhdr, PAGE_SIZE, false);
1150
1151         if (page_buf != *buffer)
1152                 memcpy(*buffer, Add2Ptr(page_buf, page_off), bytes);
1153
1154         bBAAD = page_buf->rhdr.sign == NTFS_BAAD_SIGNATURE;
1155
1156         if (usa_error)
1157                 *usa_error = bBAAD;
1158         /* Check that the update sequence array for this page is valid */
1159         /* If we don't allow errors, raise an error status */
1160         else if (bBAAD)
1161                 err = -EINVAL;
1162
1163 out:
1164         if (err && to_free) {
1165                 kfree(to_free);
1166                 *buffer = NULL;
1167         }
1168
1169         return err;
1170 }
1171
1172 /*
1173  * log_read_rst
1174  *
1175  * It walks through 512 blocks of the file looking for a valid
1176  * restart page header. It will stop the first time we find a
1177  * valid page header.
1178  */
1179 static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
1180                         struct restart_info *info)
1181 {
1182         u32 skip, vbo;
1183         struct RESTART_HDR *r_page = kmalloc(DefaultLogPageSize, GFP_NOFS);
1184
1185         if (!r_page)
1186                 return -ENOMEM;
1187
1188         /* Determine which restart area we are looking for. */
1189         if (first) {
1190                 vbo = 0;
1191                 skip = 512;
1192         } else {
1193                 vbo = 512;
1194                 skip = 0;
1195         }
1196
1197         /* Loop continuously until we succeed. */
1198         for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) {
1199                 bool usa_error;
1200                 u32 sys_page_size;
1201                 bool brst, bchk;
1202                 struct RESTART_AREA *ra;
1203
1204                 /* Read a page header at the current offset. */
1205                 if (read_log_page(log, vbo, (struct RECORD_PAGE_HDR **)&r_page,
1206                                   &usa_error)) {
1207                         /* Ignore any errors. */
1208                         continue;
1209                 }
1210
1211                 /* Exit if the signature is a log record page. */
1212                 if (r_page->rhdr.sign == NTFS_RCRD_SIGNATURE) {
1213                         info->initialized = true;
1214                         break;
1215                 }
1216
1217                 brst = r_page->rhdr.sign == NTFS_RSTR_SIGNATURE;
1218                 bchk = r_page->rhdr.sign == NTFS_CHKD_SIGNATURE;
1219
1220                 if (!bchk && !brst) {
1221                         if (r_page->rhdr.sign != NTFS_FFFF_SIGNATURE) {
1222                                 /*
1223                                  * Remember if the signature does not
1224                                  * indicate uninitialized file.
1225                                  */
1226                                 info->initialized = true;
1227                         }
1228                         continue;
1229                 }
1230
1231                 ra = NULL;
1232                 info->valid_page = false;
1233                 info->initialized = true;
1234                 info->vbo = vbo;
1235
1236                 /* Let's check the restart area if this is a valid page. */
1237                 if (!is_rst_page_hdr_valid(vbo, r_page))
1238                         goto check_result;
1239                 ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
1240
1241                 if (!is_rst_area_valid(r_page))
1242                         goto check_result;
1243
1244                 /*
1245                  * We have a valid restart page header and restart area.
1246                  * If chkdsk was run or we have no clients then we have
1247                  * no more checking to do.
1248                  */
1249                 if (bchk || ra->client_idx[1] == LFS_NO_CLIENT_LE) {
1250                         info->valid_page = true;
1251                         goto check_result;
1252                 }
1253
1254                 /* Read the entire restart area. */
1255                 sys_page_size = le32_to_cpu(r_page->sys_page_size);
1256                 if (DefaultLogPageSize != sys_page_size) {
1257                         kfree(r_page);
1258                         r_page = kzalloc(sys_page_size, GFP_NOFS);
1259                         if (!r_page)
1260                                 return -ENOMEM;
1261
1262                         if (read_log_page(log, vbo,
1263                                           (struct RECORD_PAGE_HDR **)&r_page,
1264                                           &usa_error)) {
1265                                 /* Ignore any errors. */
1266                                 kfree(r_page);
1267                                 r_page = NULL;
1268                                 continue;
1269                         }
1270                 }
1271
1272                 if (is_client_area_valid(r_page, usa_error)) {
1273                         info->valid_page = true;
1274                         ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
1275                 }
1276
1277 check_result:
1278                 /*
1279                  * If chkdsk was run then update the caller's
1280                  * values and return.
1281                  */
1282                 if (r_page->rhdr.sign == NTFS_CHKD_SIGNATURE) {
1283                         info->chkdsk_was_run = true;
1284                         info->last_lsn = le64_to_cpu(r_page->rhdr.lsn);
1285                         info->restart = true;
1286                         info->r_page = r_page;
1287                         return 0;
1288                 }
1289
1290                 /*
1291                  * If we have a valid page then copy the values
1292                  * we need from it.
1293                  */
1294                 if (info->valid_page) {
1295                         info->last_lsn = le64_to_cpu(ra->current_lsn);
1296                         info->restart = true;
1297                         info->r_page = r_page;
1298                         return 0;
1299                 }
1300         }
1301
1302         kfree(r_page);
1303
1304         return 0;
1305 }
1306
1307 /*
1308  * Ilog_init_pg_hdr - Init @log from restart page header.
1309  */
1310 static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
1311                             u32 page_size, u16 major_ver, u16 minor_ver)
1312 {
1313         log->sys_page_size = sys_page_size;
1314         log->sys_page_mask = sys_page_size - 1;
1315         log->page_size = page_size;
1316         log->page_mask = page_size - 1;
1317         log->page_bits = blksize_bits(page_size);
1318
1319         log->clst_per_page = log->page_size >> log->ni->mi.sbi->cluster_bits;
1320         if (!log->clst_per_page)
1321                 log->clst_per_page = 1;
1322
1323         log->first_page = major_ver >= 2
1324                                   ? 0x22 * page_size
1325                                   : ((sys_page_size << 1) + (page_size << 1));
1326         log->major_ver = major_ver;
1327         log->minor_ver = minor_ver;
1328 }
1329
1330 /*
1331  * log_create - Init @log in cases when we don't have a restart area to use.
1332  */
1333 static void log_create(struct ntfs_log *log, u32 l_size, const u64 last_lsn,
1334                        u32 open_log_count, bool wrapped, bool use_multi_page)
1335 {
1336         log->l_size = l_size;
1337         /* All file offsets must be quadword aligned. */
1338         log->file_data_bits = blksize_bits(l_size) - 3;
1339         log->seq_num_mask = (8 << log->file_data_bits) - 1;
1340         log->seq_num_bits = sizeof(u64) * 8 - log->file_data_bits;
1341         log->seq_num = (last_lsn >> log->file_data_bits) + 2;
1342         log->next_page = log->first_page;
1343         log->oldest_lsn = log->seq_num << log->file_data_bits;
1344         log->oldest_lsn_off = 0;
1345         log->last_lsn = log->oldest_lsn;
1346
1347         log->l_flags |= NTFSLOG_NO_LAST_LSN | NTFSLOG_NO_OLDEST_LSN;
1348
1349         /* Set the correct flags for the I/O and indicate if we have wrapped. */
1350         if (wrapped)
1351                 log->l_flags |= NTFSLOG_WRAPPED;
1352
1353         if (use_multi_page)
1354                 log->l_flags |= NTFSLOG_MULTIPLE_PAGE_IO;
1355
1356         /* Compute the log page values. */
1357         log->data_off = ALIGN(
1358                 offsetof(struct RECORD_PAGE_HDR, fixups) +
1359                         sizeof(short) * ((log->page_size >> SECTOR_SHIFT) + 1),
1360                 8);
1361         log->data_size = log->page_size - log->data_off;
1362         log->record_header_len = sizeof(struct LFS_RECORD_HDR);
1363
1364         /* Remember the different page sizes for reservation. */
1365         log->reserved = log->data_size - log->record_header_len;
1366
1367         /* Compute the restart page values. */
1368         log->ra_off = ALIGN(
1369                 offsetof(struct RESTART_HDR, fixups) +
1370                         sizeof(short) *
1371                                 ((log->sys_page_size >> SECTOR_SHIFT) + 1),
1372                 8);
1373         log->restart_size = log->sys_page_size - log->ra_off;
1374         log->ra_size = struct_size(log->ra, clients, 1);
1375         log->current_openlog_count = open_log_count;
1376
1377         /*
1378          * The total available log file space is the number of
1379          * log file pages times the space available on each page.
1380          */
1381         log->total_avail_pages = log->l_size - log->first_page;
1382         log->total_avail = log->total_avail_pages >> log->page_bits;
1383
1384         /*
1385          * We assume that we can't use the end of the page less than
1386          * the file record size.
1387          * Then we won't need to reserve more than the caller asks for.
1388          */
1389         log->max_current_avail = log->total_avail * log->reserved;
1390         log->total_avail = log->total_avail * log->data_size;
1391         log->current_avail = log->max_current_avail;
1392 }
1393
1394 /*
1395  * log_create_ra - Fill a restart area from the values stored in @log.
1396  */
1397 static struct RESTART_AREA *log_create_ra(struct ntfs_log *log)
1398 {
1399         struct CLIENT_REC *cr;
1400         struct RESTART_AREA *ra = kzalloc(log->restart_size, GFP_NOFS);
1401
1402         if (!ra)
1403                 return NULL;
1404
1405         ra->current_lsn = cpu_to_le64(log->last_lsn);
1406         ra->log_clients = cpu_to_le16(1);
1407         ra->client_idx[1] = LFS_NO_CLIENT_LE;
1408         if (log->l_flags & NTFSLOG_MULTIPLE_PAGE_IO)
1409                 ra->flags = RESTART_SINGLE_PAGE_IO;
1410         ra->seq_num_bits = cpu_to_le32(log->seq_num_bits);
1411         ra->ra_len = cpu_to_le16(log->ra_size);
1412         ra->client_off = cpu_to_le16(offsetof(struct RESTART_AREA, clients));
1413         ra->l_size = cpu_to_le64(log->l_size);
1414         ra->rec_hdr_len = cpu_to_le16(log->record_header_len);
1415         ra->data_off = cpu_to_le16(log->data_off);
1416         ra->open_log_count = cpu_to_le32(log->current_openlog_count + 1);
1417
1418         cr = ra->clients;
1419
1420         cr->prev_client = LFS_NO_CLIENT_LE;
1421         cr->next_client = LFS_NO_CLIENT_LE;
1422
1423         return ra;
1424 }
1425
1426 static u32 final_log_off(struct ntfs_log *log, u64 lsn, u32 data_len)
1427 {
1428         u32 base_vbo = lsn << 3;
1429         u32 final_log_off = (base_vbo & log->seq_num_mask) & ~log->page_mask;
1430         u32 page_off = base_vbo & log->page_mask;
1431         u32 tail = log->page_size - page_off;
1432
1433         page_off -= 1;
1434
1435         /* Add the length of the header. */
1436         data_len += log->record_header_len;
1437
1438         /*
1439          * If this lsn is contained this log page we are done.
1440          * Otherwise we need to walk through several log pages.
1441          */
1442         if (data_len > tail) {
1443                 data_len -= tail;
1444                 tail = log->data_size;
1445                 page_off = log->data_off - 1;
1446
1447                 for (;;) {
1448                         final_log_off = next_page_off(log, final_log_off);
1449
1450                         /*
1451                          * We are done if the remaining bytes
1452                          * fit on this page.
1453                          */
1454                         if (data_len <= tail)
1455                                 break;
1456                         data_len -= tail;
1457                 }
1458         }
1459
1460         /*
1461          * We add the remaining bytes to our starting position on this page
1462          * and then add that value to the file offset of this log page.
1463          */
1464         return final_log_off + data_len + page_off;
1465 }
1466
1467 static int next_log_lsn(struct ntfs_log *log, const struct LFS_RECORD_HDR *rh,
1468                         u64 *lsn)
1469 {
1470         int err;
1471         u64 this_lsn = le64_to_cpu(rh->this_lsn);
1472         u32 vbo = lsn_to_vbo(log, this_lsn);
1473         u32 end =
1474                 final_log_off(log, this_lsn, le32_to_cpu(rh->client_data_len));
1475         u32 hdr_off = end & ~log->sys_page_mask;
1476         u64 seq = this_lsn >> log->file_data_bits;
1477         struct RECORD_PAGE_HDR *page = NULL;
1478
1479         /* Remember if we wrapped. */
1480         if (end <= vbo)
1481                 seq += 1;
1482
1483         /* Log page header for this page. */
1484         err = read_log_page(log, hdr_off, &page, NULL);
1485         if (err)
1486                 return err;
1487
1488         /*
1489          * If the lsn we were given was not the last lsn on this page,
1490          * then the starting offset for the next lsn is on a quad word
1491          * boundary following the last file offset for the current lsn.
1492          * Otherwise the file offset is the start of the data on the next page.
1493          */
1494         if (this_lsn == le64_to_cpu(page->rhdr.lsn)) {
1495                 /* If we wrapped, we need to increment the sequence number. */
1496                 hdr_off = next_page_off(log, hdr_off);
1497                 if (hdr_off == log->first_page)
1498                         seq += 1;
1499
1500                 vbo = hdr_off + log->data_off;
1501         } else {
1502                 vbo = ALIGN(end, 8);
1503         }
1504
1505         /* Compute the lsn based on the file offset and the sequence count. */
1506         *lsn = vbo_to_lsn(log, vbo, seq);
1507
1508         /*
1509          * If this lsn is within the legal range for the file, we return true.
1510          * Otherwise false indicates that there are no more lsn's.
1511          */
1512         if (!is_lsn_in_file(log, *lsn))
1513                 *lsn = 0;
1514
1515         kfree(page);
1516
1517         return 0;
1518 }
1519
1520 /*
1521  * current_log_avail - Calculate the number of bytes available for log records.
1522  */
1523 static u32 current_log_avail(struct ntfs_log *log)
1524 {
1525         u32 oldest_off, next_free_off, free_bytes;
1526
1527         if (log->l_flags & NTFSLOG_NO_LAST_LSN) {
1528                 /* The entire file is available. */
1529                 return log->max_current_avail;
1530         }
1531
1532         /*
1533          * If there is a last lsn the restart area then we know that we will
1534          * have to compute the free range.
1535          * If there is no oldest lsn then start at the first page of the file.
1536          */
1537         oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN)
1538                              ? log->first_page
1539                              : (log->oldest_lsn_off & ~log->sys_page_mask);
1540
1541         /*
1542          * We will use the next log page offset to compute the next free page.
1543          * If we are going to reuse this page go to the next page.
1544          * If we are at the first page then use the end of the file.
1545          */
1546         next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL)
1547                                 ? log->next_page + log->page_size
1548                                 : log->next_page == log->first_page
1549                                           ? log->l_size
1550                                           : log->next_page;
1551
1552         /* If the two offsets are the same then there is no available space. */
1553         if (oldest_off == next_free_off)
1554                 return 0;
1555         /*
1556          * If the free offset follows the oldest offset then subtract
1557          * this range from the total available pages.
1558          */
1559         free_bytes =
1560                 oldest_off < next_free_off
1561                         ? log->total_avail_pages - (next_free_off - oldest_off)
1562                         : oldest_off - next_free_off;
1563
1564         free_bytes >>= log->page_bits;
1565         return free_bytes * log->reserved;
1566 }
1567
1568 static bool check_subseq_log_page(struct ntfs_log *log,
1569                                   const struct RECORD_PAGE_HDR *rp, u32 vbo,
1570                                   u64 seq)
1571 {
1572         u64 lsn_seq;
1573         const struct NTFS_RECORD_HEADER *rhdr = &rp->rhdr;
1574         u64 lsn = le64_to_cpu(rhdr->lsn);
1575
1576         if (rhdr->sign == NTFS_FFFF_SIGNATURE || !rhdr->sign)
1577                 return false;
1578
1579         /*
1580          * If the last lsn on the page occurs was written after the page
1581          * that caused the original error then we have a fatal error.
1582          */
1583         lsn_seq = lsn >> log->file_data_bits;
1584
1585         /*
1586          * If the sequence number for the lsn the page is equal or greater
1587          * than lsn we expect, then this is a subsequent write.
1588          */
1589         return lsn_seq >= seq ||
1590                (lsn_seq == seq - 1 && log->first_page == vbo &&
1591                 vbo != (lsn_to_vbo(log, lsn) & ~log->page_mask));
1592 }
1593
1594 /*
1595  * last_log_lsn
1596  *
1597  * Walks through the log pages for a file, searching for the
1598  * last log page written to the file.
1599  */
1600 static int last_log_lsn(struct ntfs_log *log)
1601 {
1602         int err;
1603         bool usa_error = false;
1604         bool replace_page = false;
1605         bool reuse_page = log->l_flags & NTFSLOG_REUSE_TAIL;
1606         bool wrapped_file, wrapped;
1607
1608         u32 page_cnt = 1, page_pos = 1;
1609         u32 page_off = 0, page_off1 = 0, saved_off = 0;
1610         u32 final_off, second_off, final_off_prev = 0, second_off_prev = 0;
1611         u32 first_file_off = 0, second_file_off = 0;
1612         u32 part_io_count = 0;
1613         u32 tails = 0;
1614         u32 this_off, curpage_off, nextpage_off, remain_pages;
1615
1616         u64 expected_seq, seq_base = 0, lsn_base = 0;
1617         u64 best_lsn, best_lsn1, best_lsn2;
1618         u64 lsn_cur, lsn1, lsn2;
1619         u64 last_ok_lsn = reuse_page ? log->last_lsn : 0;
1620
1621         u16 cur_pos, best_page_pos;
1622
1623         struct RECORD_PAGE_HDR *page = NULL;
1624         struct RECORD_PAGE_HDR *tst_page = NULL;
1625         struct RECORD_PAGE_HDR *first_tail = NULL;
1626         struct RECORD_PAGE_HDR *second_tail = NULL;
1627         struct RECORD_PAGE_HDR *tail_page = NULL;
1628         struct RECORD_PAGE_HDR *second_tail_prev = NULL;
1629         struct RECORD_PAGE_HDR *first_tail_prev = NULL;
1630         struct RECORD_PAGE_HDR *page_bufs = NULL;
1631         struct RECORD_PAGE_HDR *best_page;
1632
1633         if (log->major_ver >= 2) {
1634                 final_off = 0x02 * log->page_size;
1635                 second_off = 0x12 * log->page_size;
1636
1637                 // 0x10 == 0x12 - 0x2
1638                 page_bufs = kmalloc(log->page_size * 0x10, GFP_NOFS);
1639                 if (!page_bufs)
1640                         return -ENOMEM;
1641         } else {
1642                 second_off = log->first_page - log->page_size;
1643                 final_off = second_off - log->page_size;
1644         }
1645
1646 next_tail:
1647         /* Read second tail page (at pos 3/0x12000). */
1648         if (read_log_page(log, second_off, &second_tail, &usa_error) ||
1649             usa_error || second_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) {
1650                 kfree(second_tail);
1651                 second_tail = NULL;
1652                 second_file_off = 0;
1653                 lsn2 = 0;
1654         } else {
1655                 second_file_off = hdr_file_off(log, second_tail);
1656                 lsn2 = le64_to_cpu(second_tail->record_hdr.last_end_lsn);
1657         }
1658
1659         /* Read first tail page (at pos 2/0x2000). */
1660         if (read_log_page(log, final_off, &first_tail, &usa_error) ||
1661             usa_error || first_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) {
1662                 kfree(first_tail);
1663                 first_tail = NULL;
1664                 first_file_off = 0;
1665                 lsn1 = 0;
1666         } else {
1667                 first_file_off = hdr_file_off(log, first_tail);
1668                 lsn1 = le64_to_cpu(first_tail->record_hdr.last_end_lsn);
1669         }
1670
1671         if (log->major_ver < 2) {
1672                 int best_page;
1673
1674                 first_tail_prev = first_tail;
1675                 final_off_prev = first_file_off;
1676                 second_tail_prev = second_tail;
1677                 second_off_prev = second_file_off;
1678                 tails = 1;
1679
1680                 if (!first_tail && !second_tail)
1681                         goto tail_read;
1682
1683                 if (first_tail && second_tail)
1684                         best_page = lsn1 < lsn2 ? 1 : 0;
1685                 else if (first_tail)
1686                         best_page = 0;
1687                 else
1688                         best_page = 1;
1689
1690                 page_off = best_page ? second_file_off : first_file_off;
1691                 seq_base = (best_page ? lsn2 : lsn1) >> log->file_data_bits;
1692                 goto tail_read;
1693         }
1694
1695         best_lsn1 = first_tail ? base_lsn(log, first_tail, first_file_off) : 0;
1696         best_lsn2 =
1697                 second_tail ? base_lsn(log, second_tail, second_file_off) : 0;
1698
1699         if (first_tail && second_tail) {
1700                 if (best_lsn1 > best_lsn2) {
1701                         best_lsn = best_lsn1;
1702                         best_page = first_tail;
1703                         this_off = first_file_off;
1704                 } else {
1705                         best_lsn = best_lsn2;
1706                         best_page = second_tail;
1707                         this_off = second_file_off;
1708                 }
1709         } else if (first_tail) {
1710                 best_lsn = best_lsn1;
1711                 best_page = first_tail;
1712                 this_off = first_file_off;
1713         } else if (second_tail) {
1714                 best_lsn = best_lsn2;
1715                 best_page = second_tail;
1716                 this_off = second_file_off;
1717         } else {
1718                 goto tail_read;
1719         }
1720
1721         best_page_pos = le16_to_cpu(best_page->page_pos);
1722
1723         if (!tails) {
1724                 if (best_page_pos == page_pos) {
1725                         seq_base = best_lsn >> log->file_data_bits;
1726                         saved_off = page_off = le32_to_cpu(best_page->file_off);
1727                         lsn_base = best_lsn;
1728
1729                         memmove(page_bufs, best_page, log->page_size);
1730
1731                         page_cnt = le16_to_cpu(best_page->page_count);
1732                         if (page_cnt > 1)
1733                                 page_pos += 1;
1734
1735                         tails = 1;
1736                 }
1737         } else if (seq_base == (best_lsn >> log->file_data_bits) &&
1738                    saved_off + log->page_size == this_off &&
1739                    lsn_base < best_lsn &&
1740                    (page_pos != page_cnt || best_page_pos == page_pos ||
1741                     best_page_pos == 1) &&
1742                    (page_pos >= page_cnt || best_page_pos == page_pos)) {
1743                 u16 bppc = le16_to_cpu(best_page->page_count);
1744
1745                 saved_off += log->page_size;
1746                 lsn_base = best_lsn;
1747
1748                 memmove(Add2Ptr(page_bufs, tails * log->page_size), best_page,
1749                         log->page_size);
1750
1751                 tails += 1;
1752
1753                 if (best_page_pos != bppc) {
1754                         page_cnt = bppc;
1755                         page_pos = best_page_pos;
1756
1757                         if (page_cnt > 1)
1758                                 page_pos += 1;
1759                 } else {
1760                         page_pos = page_cnt = 1;
1761                 }
1762         } else {
1763                 kfree(first_tail);
1764                 kfree(second_tail);
1765                 goto tail_read;
1766         }
1767
1768         kfree(first_tail_prev);
1769         first_tail_prev = first_tail;
1770         final_off_prev = first_file_off;
1771         first_tail = NULL;
1772
1773         kfree(second_tail_prev);
1774         second_tail_prev = second_tail;
1775         second_off_prev = second_file_off;
1776         second_tail = NULL;
1777
1778         final_off += log->page_size;
1779         second_off += log->page_size;
1780
1781         if (tails < 0x10)
1782                 goto next_tail;
1783 tail_read:
1784         first_tail = first_tail_prev;
1785         final_off = final_off_prev;
1786
1787         second_tail = second_tail_prev;
1788         second_off = second_off_prev;
1789
1790         page_cnt = page_pos = 1;
1791
1792         curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off)
1793                                                : log->next_page;
1794
1795         wrapped_file =
1796                 curpage_off == log->first_page &&
1797                 !(log->l_flags & (NTFSLOG_NO_LAST_LSN | NTFSLOG_REUSE_TAIL));
1798
1799         expected_seq = wrapped_file ? (log->seq_num + 1) : log->seq_num;
1800
1801         nextpage_off = curpage_off;
1802
1803 next_page:
1804         tail_page = NULL;
1805         /* Read the next log page. */
1806         err = read_log_page(log, curpage_off, &page, &usa_error);
1807
1808         /* Compute the next log page offset the file. */
1809         nextpage_off = next_page_off(log, curpage_off);
1810         wrapped = nextpage_off == log->first_page;
1811
1812         if (tails > 1) {
1813                 struct RECORD_PAGE_HDR *cur_page =
1814                         Add2Ptr(page_bufs, curpage_off - page_off);
1815
1816                 if (curpage_off == saved_off) {
1817                         tail_page = cur_page;
1818                         goto use_tail_page;
1819                 }
1820
1821                 if (page_off > curpage_off || curpage_off >= saved_off)
1822                         goto use_tail_page;
1823
1824                 if (page_off1)
1825                         goto use_cur_page;
1826
1827                 if (!err && !usa_error &&
1828                     page->rhdr.sign == NTFS_RCRD_SIGNATURE &&
1829                     cur_page->rhdr.lsn == page->rhdr.lsn &&
1830                     cur_page->record_hdr.next_record_off ==
1831                             page->record_hdr.next_record_off &&
1832                     ((page_pos == page_cnt &&
1833                       le16_to_cpu(page->page_pos) == 1) ||
1834                      (page_pos != page_cnt &&
1835                       le16_to_cpu(page->page_pos) == page_pos + 1 &&
1836                       le16_to_cpu(page->page_count) == page_cnt))) {
1837                         cur_page = NULL;
1838                         goto use_tail_page;
1839                 }
1840
1841                 page_off1 = page_off;
1842
1843 use_cur_page:
1844
1845                 lsn_cur = le64_to_cpu(cur_page->rhdr.lsn);
1846
1847                 if (last_ok_lsn !=
1848                             le64_to_cpu(cur_page->record_hdr.last_end_lsn) &&
1849                     ((lsn_cur >> log->file_data_bits) +
1850                      ((curpage_off <
1851                        (lsn_to_vbo(log, lsn_cur) & ~log->page_mask))
1852                               ? 1
1853                               : 0)) != expected_seq) {
1854                         goto check_tail;
1855                 }
1856
1857                 if (!is_log_record_end(cur_page)) {
1858                         tail_page = NULL;
1859                         last_ok_lsn = lsn_cur;
1860                         goto next_page_1;
1861                 }
1862
1863                 log->seq_num = expected_seq;
1864                 log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
1865                 log->last_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn);
1866                 log->ra->current_lsn = cur_page->record_hdr.last_end_lsn;
1867
1868                 if (log->record_header_len <=
1869                     log->page_size -
1870                             le16_to_cpu(cur_page->record_hdr.next_record_off)) {
1871                         log->l_flags |= NTFSLOG_REUSE_TAIL;
1872                         log->next_page = curpage_off;
1873                 } else {
1874                         log->l_flags &= ~NTFSLOG_REUSE_TAIL;
1875                         log->next_page = nextpage_off;
1876                 }
1877
1878                 if (wrapped_file)
1879                         log->l_flags |= NTFSLOG_WRAPPED;
1880
1881                 last_ok_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn);
1882                 goto next_page_1;
1883         }
1884
1885         /*
1886          * If we are at the expected first page of a transfer check to see
1887          * if either tail copy is at this offset.
1888          * If this page is the last page of a transfer, check if we wrote
1889          * a subsequent tail copy.
1890          */
1891         if (page_cnt == page_pos || page_cnt == page_pos + 1) {
1892                 /*
1893                  * Check if the offset matches either the first or second
1894                  * tail copy. It is possible it will match both.
1895                  */
1896                 if (curpage_off == final_off)
1897                         tail_page = first_tail;
1898
1899                 /*
1900                  * If we already matched on the first page then
1901                  * check the ending lsn's.
1902                  */
1903                 if (curpage_off == second_off) {
1904                         if (!tail_page ||
1905                             (second_tail &&
1906                              le64_to_cpu(second_tail->record_hdr.last_end_lsn) >
1907                                      le64_to_cpu(first_tail->record_hdr
1908                                                          .last_end_lsn))) {
1909                                 tail_page = second_tail;
1910                         }
1911                 }
1912         }
1913
1914 use_tail_page:
1915         if (tail_page) {
1916                 /* We have a candidate for a tail copy. */
1917                 lsn_cur = le64_to_cpu(tail_page->record_hdr.last_end_lsn);
1918
1919                 if (last_ok_lsn < lsn_cur) {
1920                         /*
1921                          * If the sequence number is not expected,
1922                          * then don't use the tail copy.
1923                          */
1924                         if (expected_seq != (lsn_cur >> log->file_data_bits))
1925                                 tail_page = NULL;
1926                 } else if (last_ok_lsn > lsn_cur) {
1927                         /*
1928                          * If the last lsn is greater than the one on
1929                          * this page then forget this tail.
1930                          */
1931                         tail_page = NULL;
1932                 }
1933         }
1934
1935         /*
1936          *If we have an error on the current page,
1937          * we will break of this loop.
1938          */
1939         if (err || usa_error)
1940                 goto check_tail;
1941
1942         /*
1943          * Done if the last lsn on this page doesn't match the previous known
1944          * last lsn or the sequence number is not expected.
1945          */
1946         lsn_cur = le64_to_cpu(page->rhdr.lsn);
1947         if (last_ok_lsn != lsn_cur &&
1948             expected_seq != (lsn_cur >> log->file_data_bits)) {
1949                 goto check_tail;
1950         }
1951
1952         /*
1953          * Check that the page position and page count values are correct.
1954          * If this is the first page of a transfer the position must be 1
1955          * and the count will be unknown.
1956          */
1957         if (page_cnt == page_pos) {
1958                 if (page->page_pos != cpu_to_le16(1) &&
1959                     (!reuse_page || page->page_pos != page->page_count)) {
1960                         /*
1961                          * If the current page is the first page we are
1962                          * looking at and we are reusing this page then
1963                          * it can be either the first or last page of a
1964                          * transfer. Otherwise it can only be the first.
1965                          */
1966                         goto check_tail;
1967                 }
1968         } else if (le16_to_cpu(page->page_count) != page_cnt ||
1969                    le16_to_cpu(page->page_pos) != page_pos + 1) {
1970                 /*
1971                  * The page position better be 1 more than the last page
1972                  * position and the page count better match.
1973                  */
1974                 goto check_tail;
1975         }
1976
1977         /*
1978          * We have a valid page the file and may have a valid page
1979          * the tail copy area.
1980          * If the tail page was written after the page the file then
1981          * break of the loop.
1982          */
1983         if (tail_page &&
1984             le64_to_cpu(tail_page->record_hdr.last_end_lsn) > lsn_cur) {
1985                 /* Remember if we will replace the page. */
1986                 replace_page = true;
1987                 goto check_tail;
1988         }
1989
1990         tail_page = NULL;
1991
1992         if (is_log_record_end(page)) {
1993                 /*
1994                  * Since we have read this page we know the sequence number
1995                  * is the same as our expected value.
1996                  */
1997                 log->seq_num = expected_seq;
1998                 log->last_lsn = le64_to_cpu(page->record_hdr.last_end_lsn);
1999                 log->ra->current_lsn = page->record_hdr.last_end_lsn;
2000                 log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
2001
2002                 /*
2003                  * If there is room on this page for another header then
2004                  * remember we want to reuse the page.
2005                  */
2006                 if (log->record_header_len <=
2007                     log->page_size -
2008                             le16_to_cpu(page->record_hdr.next_record_off)) {
2009                         log->l_flags |= NTFSLOG_REUSE_TAIL;
2010                         log->next_page = curpage_off;
2011                 } else {
2012                         log->l_flags &= ~NTFSLOG_REUSE_TAIL;
2013                         log->next_page = nextpage_off;
2014                 }
2015
2016                 /* Remember if we wrapped the log file. */
2017                 if (wrapped_file)
2018                         log->l_flags |= NTFSLOG_WRAPPED;
2019         }
2020
2021         /*
2022          * Remember the last page count and position.
2023          * Also remember the last known lsn.
2024          */
2025         page_cnt = le16_to_cpu(page->page_count);
2026         page_pos = le16_to_cpu(page->page_pos);
2027         last_ok_lsn = le64_to_cpu(page->rhdr.lsn);
2028
2029 next_page_1:
2030
2031         if (wrapped) {
2032                 expected_seq += 1;
2033                 wrapped_file = 1;
2034         }
2035
2036         curpage_off = nextpage_off;
2037         kfree(page);
2038         page = NULL;
2039         reuse_page = 0;
2040         goto next_page;
2041
2042 check_tail:
2043         if (tail_page) {
2044                 log->seq_num = expected_seq;
2045                 log->last_lsn = le64_to_cpu(tail_page->record_hdr.last_end_lsn);
2046                 log->ra->current_lsn = tail_page->record_hdr.last_end_lsn;
2047                 log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
2048
2049                 if (log->page_size -
2050                             le16_to_cpu(
2051                                     tail_page->record_hdr.next_record_off) >=
2052                     log->record_header_len) {
2053                         log->l_flags |= NTFSLOG_REUSE_TAIL;
2054                         log->next_page = curpage_off;
2055                 } else {
2056                         log->l_flags &= ~NTFSLOG_REUSE_TAIL;
2057                         log->next_page = nextpage_off;
2058                 }
2059
2060                 if (wrapped)
2061                         log->l_flags |= NTFSLOG_WRAPPED;
2062         }
2063
2064         /* Remember that the partial IO will start at the next page. */
2065         second_off = nextpage_off;
2066
2067         /*
2068          * If the next page is the first page of the file then update
2069          * the sequence number for log records which begon the next page.
2070          */
2071         if (wrapped)
2072                 expected_seq += 1;
2073
2074         /*
2075          * If we have a tail copy or are performing single page I/O we can
2076          * immediately look at the next page.
2077          */
2078         if (replace_page || (log->ra->flags & RESTART_SINGLE_PAGE_IO)) {
2079                 page_cnt = 2;
2080                 page_pos = 1;
2081                 goto check_valid;
2082         }
2083
2084         if (page_pos != page_cnt)
2085                 goto check_valid;
2086         /*
2087          * If the next page causes us to wrap to the beginning of the log
2088          * file then we know which page to check next.
2089          */
2090         if (wrapped) {
2091                 page_cnt = 2;
2092                 page_pos = 1;
2093                 goto check_valid;
2094         }
2095
2096         cur_pos = 2;
2097
2098 next_test_page:
2099         kfree(tst_page);
2100         tst_page = NULL;
2101
2102         /* Walk through the file, reading log pages. */
2103         err = read_log_page(log, nextpage_off, &tst_page, &usa_error);
2104
2105         /*
2106          * If we get a USA error then assume that we correctly found
2107          * the end of the original transfer.
2108          */
2109         if (usa_error)
2110                 goto file_is_valid;
2111
2112         /*
2113          * If we were able to read the page, we examine it to see if it
2114          * is the same or different Io block.
2115          */
2116         if (err)
2117                 goto next_test_page_1;
2118
2119         if (le16_to_cpu(tst_page->page_pos) == cur_pos &&
2120             check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) {
2121                 page_cnt = le16_to_cpu(tst_page->page_count) + 1;
2122                 page_pos = le16_to_cpu(tst_page->page_pos);
2123                 goto check_valid;
2124         } else {
2125                 goto file_is_valid;
2126         }
2127
2128 next_test_page_1:
2129
2130         nextpage_off = next_page_off(log, curpage_off);
2131         wrapped = nextpage_off == log->first_page;
2132
2133         if (wrapped) {
2134                 expected_seq += 1;
2135                 page_cnt = 2;
2136                 page_pos = 1;
2137         }
2138
2139         cur_pos += 1;
2140         part_io_count += 1;
2141         if (!wrapped)
2142                 goto next_test_page;
2143
2144 check_valid:
2145         /* Skip over the remaining pages this transfer. */
2146         remain_pages = page_cnt - page_pos - 1;
2147         part_io_count += remain_pages;
2148
2149         while (remain_pages--) {
2150                 nextpage_off = next_page_off(log, curpage_off);
2151                 wrapped = nextpage_off == log->first_page;
2152
2153                 if (wrapped)
2154                         expected_seq += 1;
2155         }
2156
2157         /* Call our routine to check this log page. */
2158         kfree(tst_page);
2159         tst_page = NULL;
2160
2161         err = read_log_page(log, nextpage_off, &tst_page, &usa_error);
2162         if (!err && !usa_error &&
2163             check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) {
2164                 err = -EINVAL;
2165                 goto out;
2166         }
2167
2168 file_is_valid:
2169
2170         /* We have a valid file. */
2171         if (page_off1 || tail_page) {
2172                 struct RECORD_PAGE_HDR *tmp_page;
2173
2174                 if (sb_rdonly(log->ni->mi.sbi->sb)) {
2175                         err = -EROFS;
2176                         goto out;
2177                 }
2178
2179                 if (page_off1) {
2180                         tmp_page = Add2Ptr(page_bufs, page_off1 - page_off);
2181                         tails -= (page_off1 - page_off) / log->page_size;
2182                         if (!tail_page)
2183                                 tails -= 1;
2184                 } else {
2185                         tmp_page = tail_page;
2186                         tails = 1;
2187                 }
2188
2189                 while (tails--) {
2190                         u64 off = hdr_file_off(log, tmp_page);
2191
2192                         if (!page) {
2193                                 page = kmalloc(log->page_size, GFP_NOFS);
2194                                 if (!page)
2195                                         return -ENOMEM;
2196                         }
2197
2198                         /*
2199                          * Correct page and copy the data from this page
2200                          * into it and flush it to disk.
2201                          */
2202                         memcpy(page, tmp_page, log->page_size);
2203
2204                         /* Fill last flushed lsn value flush the page. */
2205                         if (log->major_ver < 2)
2206                                 page->rhdr.lsn = page->record_hdr.last_end_lsn;
2207                         else
2208                                 page->file_off = 0;
2209
2210                         page->page_pos = page->page_count = cpu_to_le16(1);
2211
2212                         ntfs_fix_pre_write(&page->rhdr, log->page_size);
2213
2214                         err = ntfs_sb_write_run(log->ni->mi.sbi,
2215                                                 &log->ni->file.run, off, page,
2216                                                 log->page_size, 0);
2217
2218                         if (err)
2219                                 goto out;
2220
2221                         if (part_io_count && second_off == off) {
2222                                 second_off += log->page_size;
2223                                 part_io_count -= 1;
2224                         }
2225
2226                         tmp_page = Add2Ptr(tmp_page, log->page_size);
2227                 }
2228         }
2229
2230         if (part_io_count) {
2231                 if (sb_rdonly(log->ni->mi.sbi->sb)) {
2232                         err = -EROFS;
2233                         goto out;
2234                 }
2235         }
2236
2237 out:
2238         kfree(second_tail);
2239         kfree(first_tail);
2240         kfree(page);
2241         kfree(tst_page);
2242         kfree(page_bufs);
2243
2244         return err;
2245 }
2246
2247 /*
2248  * read_log_rec_buf - Copy a log record from the file to a buffer.
2249  *
2250  * The log record may span several log pages and may even wrap the file.
2251  */
2252 static int read_log_rec_buf(struct ntfs_log *log,
2253                             const struct LFS_RECORD_HDR *rh, void *buffer)
2254 {
2255         int err;
2256         struct RECORD_PAGE_HDR *ph = NULL;
2257         u64 lsn = le64_to_cpu(rh->this_lsn);
2258         u32 vbo = lsn_to_vbo(log, lsn) & ~log->page_mask;
2259         u32 off = lsn_to_page_off(log, lsn) + log->record_header_len;
2260         u32 data_len = le32_to_cpu(rh->client_data_len);
2261
2262         /*
2263          * While there are more bytes to transfer,
2264          * we continue to attempt to perform the read.
2265          */
2266         for (;;) {
2267                 bool usa_error;
2268                 u32 tail = log->page_size - off;
2269
2270                 if (tail >= data_len)
2271                         tail = data_len;
2272
2273                 data_len -= tail;
2274
2275                 err = read_log_page(log, vbo, &ph, &usa_error);
2276                 if (err)
2277                         goto out;
2278
2279                 /*
2280                  * The last lsn on this page better be greater or equal
2281                  * to the lsn we are copying.
2282                  */
2283                 if (lsn > le64_to_cpu(ph->rhdr.lsn)) {
2284                         err = -EINVAL;
2285                         goto out;
2286                 }
2287
2288                 memcpy(buffer, Add2Ptr(ph, off), tail);
2289
2290                 /* If there are no more bytes to transfer, we exit the loop. */
2291                 if (!data_len) {
2292                         if (!is_log_record_end(ph) ||
2293                             lsn > le64_to_cpu(ph->record_hdr.last_end_lsn)) {
2294                                 err = -EINVAL;
2295                                 goto out;
2296                         }
2297                         break;
2298                 }
2299
2300                 if (ph->rhdr.lsn == ph->record_hdr.last_end_lsn ||
2301                     lsn > le64_to_cpu(ph->rhdr.lsn)) {
2302                         err = -EINVAL;
2303                         goto out;
2304                 }
2305
2306                 vbo = next_page_off(log, vbo);
2307                 off = log->data_off;
2308
2309                 /*
2310                  * Adjust our pointer the user's buffer to transfer
2311                  * the next block to.
2312                  */
2313                 buffer = Add2Ptr(buffer, tail);
2314         }
2315
2316 out:
2317         kfree(ph);
2318         return err;
2319 }
2320
2321 static int read_rst_area(struct ntfs_log *log, struct NTFS_RESTART **rst_,
2322                          u64 *lsn)
2323 {
2324         int err;
2325         struct LFS_RECORD_HDR *rh = NULL;
2326         const struct CLIENT_REC *cr =
2327                 Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off));
2328         u64 lsnr, lsnc = le64_to_cpu(cr->restart_lsn);
2329         u32 len;
2330         struct NTFS_RESTART *rst;
2331
2332         *lsn = 0;
2333         *rst_ = NULL;
2334
2335         /* If the client doesn't have a restart area, go ahead and exit now. */
2336         if (!lsnc)
2337                 return 0;
2338
2339         err = read_log_page(log, lsn_to_vbo(log, lsnc),
2340                             (struct RECORD_PAGE_HDR **)&rh, NULL);
2341         if (err)
2342                 return err;
2343
2344         rst = NULL;
2345         lsnr = le64_to_cpu(rh->this_lsn);
2346
2347         if (lsnc != lsnr) {
2348                 /* If the lsn values don't match, then the disk is corrupt. */
2349                 err = -EINVAL;
2350                 goto out;
2351         }
2352
2353         *lsn = lsnr;
2354         len = le32_to_cpu(rh->client_data_len);
2355
2356         if (!len) {
2357                 err = 0;
2358                 goto out;
2359         }
2360
2361         if (len < sizeof(struct NTFS_RESTART)) {
2362                 err = -EINVAL;
2363                 goto out;
2364         }
2365
2366         rst = kmalloc(len, GFP_NOFS);
2367         if (!rst) {
2368                 err = -ENOMEM;
2369                 goto out;
2370         }
2371
2372         /* Copy the data into the 'rst' buffer. */
2373         err = read_log_rec_buf(log, rh, rst);
2374         if (err)
2375                 goto out;
2376
2377         *rst_ = rst;
2378         rst = NULL;
2379
2380 out:
2381         kfree(rh);
2382         kfree(rst);
2383
2384         return err;
2385 }
2386
2387 static int find_log_rec(struct ntfs_log *log, u64 lsn, struct lcb *lcb)
2388 {
2389         int err;
2390         struct LFS_RECORD_HDR *rh = lcb->lrh;
2391         u32 rec_len, len;
2392
2393         /* Read the record header for this lsn. */
2394         if (!rh) {
2395                 err = read_log_page(log, lsn_to_vbo(log, lsn),
2396                                     (struct RECORD_PAGE_HDR **)&rh, NULL);
2397
2398                 lcb->lrh = rh;
2399                 if (err)
2400                         return err;
2401         }
2402
2403         /*
2404          * If the lsn the log record doesn't match the desired
2405          * lsn then the disk is corrupt.
2406          */
2407         if (lsn != le64_to_cpu(rh->this_lsn))
2408                 return -EINVAL;
2409
2410         len = le32_to_cpu(rh->client_data_len);
2411
2412         /*
2413          * Check that the length field isn't greater than the total
2414          * available space the log file.
2415          */
2416         rec_len = len + log->record_header_len;
2417         if (rec_len >= log->total_avail)
2418                 return -EINVAL;
2419
2420         /*
2421          * If the entire log record is on this log page,
2422          * put a pointer to the log record the context block.
2423          */
2424         if (rh->flags & LOG_RECORD_MULTI_PAGE) {
2425                 void *lr = kmalloc(len, GFP_NOFS);
2426
2427                 if (!lr)
2428                         return -ENOMEM;
2429
2430                 lcb->log_rec = lr;
2431                 lcb->alloc = true;
2432
2433                 /* Copy the data into the buffer returned. */
2434                 err = read_log_rec_buf(log, rh, lr);
2435                 if (err)
2436                         return err;
2437         } else {
2438                 /* If beyond the end of the current page -> an error. */
2439                 u32 page_off = lsn_to_page_off(log, lsn);
2440
2441                 if (page_off + len + log->record_header_len > log->page_size)
2442                         return -EINVAL;
2443
2444                 lcb->log_rec = Add2Ptr(rh, sizeof(struct LFS_RECORD_HDR));
2445                 lcb->alloc = false;
2446         }
2447
2448         return 0;
2449 }
2450
2451 /*
2452  * read_log_rec_lcb - Init the query operation.
2453  */
2454 static int read_log_rec_lcb(struct ntfs_log *log, u64 lsn, u32 ctx_mode,
2455                             struct lcb **lcb_)
2456 {
2457         int err;
2458         const struct CLIENT_REC *cr;
2459         struct lcb *lcb;
2460
2461         switch (ctx_mode) {
2462         case lcb_ctx_undo_next:
2463         case lcb_ctx_prev:
2464         case lcb_ctx_next:
2465                 break;
2466         default:
2467                 return -EINVAL;
2468         }
2469
2470         /* Check that the given lsn is the legal range for this client. */
2471         cr = Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off));
2472
2473         if (!verify_client_lsn(log, cr, lsn))
2474                 return -EINVAL;
2475
2476         lcb = kzalloc(sizeof(struct lcb), GFP_NOFS);
2477         if (!lcb)
2478                 return -ENOMEM;
2479         lcb->client = log->client_id;
2480         lcb->ctx_mode = ctx_mode;
2481
2482         /* Find the log record indicated by the given lsn. */
2483         err = find_log_rec(log, lsn, lcb);
2484         if (err)
2485                 goto out;
2486
2487         *lcb_ = lcb;
2488         return 0;
2489
2490 out:
2491         lcb_put(lcb);
2492         *lcb_ = NULL;
2493         return err;
2494 }
2495
2496 /*
2497  * find_client_next_lsn
2498  *
2499  * Attempt to find the next lsn to return to a client based on the context mode.
2500  */
2501 static int find_client_next_lsn(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
2502 {
2503         int err;
2504         u64 next_lsn;
2505         struct LFS_RECORD_HDR *hdr;
2506
2507         hdr = lcb->lrh;
2508         *lsn = 0;
2509
2510         if (lcb_ctx_next != lcb->ctx_mode)
2511                 goto check_undo_next;
2512
2513         /* Loop as long as another lsn can be found. */
2514         for (;;) {
2515                 u64 current_lsn;
2516
2517                 err = next_log_lsn(log, hdr, &current_lsn);
2518                 if (err)
2519                         goto out;
2520
2521                 if (!current_lsn)
2522                         break;
2523
2524                 if (hdr != lcb->lrh)
2525                         kfree(hdr);
2526
2527                 hdr = NULL;
2528                 err = read_log_page(log, lsn_to_vbo(log, current_lsn),
2529                                     (struct RECORD_PAGE_HDR **)&hdr, NULL);
2530                 if (err)
2531                         goto out;
2532
2533                 if (memcmp(&hdr->client, &lcb->client,
2534                            sizeof(struct CLIENT_ID))) {
2535                         /*err = -EINVAL; */
2536                 } else if (LfsClientRecord == hdr->record_type) {
2537                         kfree(lcb->lrh);
2538                         lcb->lrh = hdr;
2539                         *lsn = current_lsn;
2540                         return 0;
2541                 }
2542         }
2543
2544 out:
2545         if (hdr != lcb->lrh)
2546                 kfree(hdr);
2547         return err;
2548
2549 check_undo_next:
2550         if (lcb_ctx_undo_next == lcb->ctx_mode)
2551                 next_lsn = le64_to_cpu(hdr->client_undo_next_lsn);
2552         else if (lcb_ctx_prev == lcb->ctx_mode)
2553                 next_lsn = le64_to_cpu(hdr->client_prev_lsn);
2554         else
2555                 return 0;
2556
2557         if (!next_lsn)
2558                 return 0;
2559
2560         if (!verify_client_lsn(
2561                     log, Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off)),
2562                     next_lsn))
2563                 return 0;
2564
2565         hdr = NULL;
2566         err = read_log_page(log, lsn_to_vbo(log, next_lsn),
2567                             (struct RECORD_PAGE_HDR **)&hdr, NULL);
2568         if (err)
2569                 return err;
2570         kfree(lcb->lrh);
2571         lcb->lrh = hdr;
2572
2573         *lsn = next_lsn;
2574
2575         return 0;
2576 }
2577
2578 static int read_next_log_rec(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
2579 {
2580         int err;
2581
2582         err = find_client_next_lsn(log, lcb, lsn);
2583         if (err)
2584                 return err;
2585
2586         if (!*lsn)
2587                 return 0;
2588
2589         if (lcb->alloc)
2590                 kfree(lcb->log_rec);
2591
2592         lcb->log_rec = NULL;
2593         lcb->alloc = false;
2594         kfree(lcb->lrh);
2595         lcb->lrh = NULL;
2596
2597         return find_log_rec(log, *lsn, lcb);
2598 }
2599
2600 static inline bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes)
2601 {
2602         __le16 mask;
2603         u32 min_de, de_off, used, total;
2604         const struct NTFS_DE *e;
2605
2606         if (hdr_has_subnode(hdr)) {
2607                 min_de = sizeof(struct NTFS_DE) + sizeof(u64);
2608                 mask = NTFS_IE_HAS_SUBNODES;
2609         } else {
2610                 min_de = sizeof(struct NTFS_DE);
2611                 mask = 0;
2612         }
2613
2614         de_off = le32_to_cpu(hdr->de_off);
2615         used = le32_to_cpu(hdr->used);
2616         total = le32_to_cpu(hdr->total);
2617
2618         if (de_off > bytes - min_de || used > bytes || total > bytes ||
2619             de_off + min_de > used || used > total) {
2620                 return false;
2621         }
2622
2623         e = Add2Ptr(hdr, de_off);
2624         for (;;) {
2625                 u16 esize = le16_to_cpu(e->size);
2626                 struct NTFS_DE *next = Add2Ptr(e, esize);
2627
2628                 if (esize < min_de || PtrOffset(hdr, next) > used ||
2629                     (e->flags & NTFS_IE_HAS_SUBNODES) != mask) {
2630                         return false;
2631                 }
2632
2633                 if (de_is_last(e))
2634                         break;
2635
2636                 e = next;
2637         }
2638
2639         return true;
2640 }
2641
2642 static inline bool check_index_buffer(const struct INDEX_BUFFER *ib, u32 bytes)
2643 {
2644         u16 fo;
2645         const struct NTFS_RECORD_HEADER *r = &ib->rhdr;
2646
2647         if (r->sign != NTFS_INDX_SIGNATURE)
2648                 return false;
2649
2650         fo = (SECTOR_SIZE - ((bytes >> SECTOR_SHIFT) + 1) * sizeof(short));
2651
2652         if (le16_to_cpu(r->fix_off) > fo)
2653                 return false;
2654
2655         if ((le16_to_cpu(r->fix_num) - 1) * SECTOR_SIZE != bytes)
2656                 return false;
2657
2658         return check_index_header(&ib->ihdr,
2659                                   bytes - offsetof(struct INDEX_BUFFER, ihdr));
2660 }
2661
2662 static inline bool check_index_root(const struct ATTRIB *attr,
2663                                     struct ntfs_sb_info *sbi)
2664 {
2665         bool ret;
2666         const struct INDEX_ROOT *root = resident_data(attr);
2667         u8 index_bits = le32_to_cpu(root->index_block_size) >= sbi->cluster_size
2668                                 ? sbi->cluster_bits
2669                                 : SECTOR_SHIFT;
2670         u8 block_clst = root->index_block_clst;
2671
2672         if (le32_to_cpu(attr->res.data_size) < sizeof(struct INDEX_ROOT) ||
2673             (root->type != ATTR_NAME && root->type != ATTR_ZERO) ||
2674             (root->type == ATTR_NAME &&
2675              root->rule != NTFS_COLLATION_TYPE_FILENAME) ||
2676             (le32_to_cpu(root->index_block_size) !=
2677              (block_clst << index_bits)) ||
2678             (block_clst != 1 && block_clst != 2 && block_clst != 4 &&
2679              block_clst != 8 && block_clst != 0x10 && block_clst != 0x20 &&
2680              block_clst != 0x40 && block_clst != 0x80)) {
2681                 return false;
2682         }
2683
2684         ret = check_index_header(&root->ihdr,
2685                                  le32_to_cpu(attr->res.data_size) -
2686                                          offsetof(struct INDEX_ROOT, ihdr));
2687         return ret;
2688 }
2689
2690 static inline bool check_attr(const struct MFT_REC *rec,
2691                               const struct ATTRIB *attr,
2692                               struct ntfs_sb_info *sbi)
2693 {
2694         u32 asize = le32_to_cpu(attr->size);
2695         u32 rsize = 0;
2696         u64 dsize, svcn, evcn;
2697         u16 run_off;
2698
2699         /* Check the fixed part of the attribute record header. */
2700         if (asize >= sbi->record_size ||
2701             asize + PtrOffset(rec, attr) >= sbi->record_size ||
2702             (attr->name_len &&
2703              le16_to_cpu(attr->name_off) + attr->name_len * sizeof(short) >
2704                      asize)) {
2705                 return false;
2706         }
2707
2708         /* Check the attribute fields. */
2709         switch (attr->non_res) {
2710         case 0:
2711                 rsize = le32_to_cpu(attr->res.data_size);
2712                 if (rsize >= asize ||
2713                     le16_to_cpu(attr->res.data_off) + rsize > asize) {
2714                         return false;
2715                 }
2716                 break;
2717
2718         case 1:
2719                 dsize = le64_to_cpu(attr->nres.data_size);
2720                 svcn = le64_to_cpu(attr->nres.svcn);
2721                 evcn = le64_to_cpu(attr->nres.evcn);
2722                 run_off = le16_to_cpu(attr->nres.run_off);
2723
2724                 if (svcn > evcn + 1 || run_off >= asize ||
2725                     le64_to_cpu(attr->nres.valid_size) > dsize ||
2726                     dsize > le64_to_cpu(attr->nres.alloc_size)) {
2727                         return false;
2728                 }
2729
2730                 if (run_off > asize)
2731                         return false;
2732
2733                 if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn,
2734                                Add2Ptr(attr, run_off), asize - run_off) < 0) {
2735                         return false;
2736                 }
2737
2738                 return true;
2739
2740         default:
2741                 return false;
2742         }
2743
2744         switch (attr->type) {
2745         case ATTR_NAME:
2746                 if (fname_full_size(Add2Ptr(
2747                             attr, le16_to_cpu(attr->res.data_off))) > asize) {
2748                         return false;
2749                 }
2750                 break;
2751
2752         case ATTR_ROOT:
2753                 return check_index_root(attr, sbi);
2754
2755         case ATTR_STD:
2756                 if (rsize < sizeof(struct ATTR_STD_INFO5) &&
2757                     rsize != sizeof(struct ATTR_STD_INFO)) {
2758                         return false;
2759                 }
2760                 break;
2761
2762         case ATTR_LIST:
2763         case ATTR_ID:
2764         case ATTR_SECURE:
2765         case ATTR_LABEL:
2766         case ATTR_VOL_INFO:
2767         case ATTR_DATA:
2768         case ATTR_ALLOC:
2769         case ATTR_BITMAP:
2770         case ATTR_REPARSE:
2771         case ATTR_EA_INFO:
2772         case ATTR_EA:
2773         case ATTR_PROPERTYSET:
2774         case ATTR_LOGGED_UTILITY_STREAM:
2775                 break;
2776
2777         default:
2778                 return false;
2779         }
2780
2781         return true;
2782 }
2783
2784 static inline bool check_file_record(const struct MFT_REC *rec,
2785                                      const struct MFT_REC *rec2,
2786                                      struct ntfs_sb_info *sbi)
2787 {
2788         const struct ATTRIB *attr;
2789         u16 fo = le16_to_cpu(rec->rhdr.fix_off);
2790         u16 fn = le16_to_cpu(rec->rhdr.fix_num);
2791         u16 ao = le16_to_cpu(rec->attr_off);
2792         u32 rs = sbi->record_size;
2793
2794         /* Check the file record header for consistency. */
2795         if (rec->rhdr.sign != NTFS_FILE_SIGNATURE ||
2796             fo > (SECTOR_SIZE - ((rs >> SECTOR_SHIFT) + 1) * sizeof(short)) ||
2797             (fn - 1) * SECTOR_SIZE != rs || ao < MFTRECORD_FIXUP_OFFSET_1 ||
2798             ao > sbi->record_size - SIZEOF_RESIDENT || !is_rec_inuse(rec) ||
2799             le32_to_cpu(rec->total) != rs) {
2800                 return false;
2801         }
2802
2803         /* Loop to check all of the attributes. */
2804         for (attr = Add2Ptr(rec, ao); attr->type != ATTR_END;
2805              attr = Add2Ptr(attr, le32_to_cpu(attr->size))) {
2806                 if (check_attr(rec, attr, sbi))
2807                         continue;
2808                 return false;
2809         }
2810
2811         return true;
2812 }
2813
2814 static inline int check_lsn(const struct NTFS_RECORD_HEADER *hdr,
2815                             const u64 *rlsn)
2816 {
2817         u64 lsn;
2818
2819         if (!rlsn)
2820                 return true;
2821
2822         lsn = le64_to_cpu(hdr->lsn);
2823
2824         if (hdr->sign == NTFS_HOLE_SIGNATURE)
2825                 return false;
2826
2827         if (*rlsn > lsn)
2828                 return true;
2829
2830         return false;
2831 }
2832
2833 static inline bool check_if_attr(const struct MFT_REC *rec,
2834                                  const struct LOG_REC_HDR *lrh)
2835 {
2836         u16 ro = le16_to_cpu(lrh->record_off);
2837         u16 o = le16_to_cpu(rec->attr_off);
2838         const struct ATTRIB *attr = Add2Ptr(rec, o);
2839
2840         while (o < ro) {
2841                 u32 asize;
2842
2843                 if (attr->type == ATTR_END)
2844                         break;
2845
2846                 asize = le32_to_cpu(attr->size);
2847                 if (!asize)
2848                         break;
2849
2850                 o += asize;
2851                 attr = Add2Ptr(attr, asize);
2852         }
2853
2854         return o == ro;
2855 }
2856
2857 static inline bool check_if_index_root(const struct MFT_REC *rec,
2858                                        const struct LOG_REC_HDR *lrh)
2859 {
2860         u16 ro = le16_to_cpu(lrh->record_off);
2861         u16 o = le16_to_cpu(rec->attr_off);
2862         const struct ATTRIB *attr = Add2Ptr(rec, o);
2863
2864         while (o < ro) {
2865                 u32 asize;
2866
2867                 if (attr->type == ATTR_END)
2868                         break;
2869
2870                 asize = le32_to_cpu(attr->size);
2871                 if (!asize)
2872                         break;
2873
2874                 o += asize;
2875                 attr = Add2Ptr(attr, asize);
2876         }
2877
2878         return o == ro && attr->type == ATTR_ROOT;
2879 }
2880
2881 static inline bool check_if_root_index(const struct ATTRIB *attr,
2882                                        const struct INDEX_HDR *hdr,
2883                                        const struct LOG_REC_HDR *lrh)
2884 {
2885         u16 ao = le16_to_cpu(lrh->attr_off);
2886         u32 de_off = le32_to_cpu(hdr->de_off);
2887         u32 o = PtrOffset(attr, hdr) + de_off;
2888         const struct NTFS_DE *e = Add2Ptr(hdr, de_off);
2889         u32 asize = le32_to_cpu(attr->size);
2890
2891         while (o < ao) {
2892                 u16 esize;
2893
2894                 if (o >= asize)
2895                         break;
2896
2897                 esize = le16_to_cpu(e->size);
2898                 if (!esize)
2899                         break;
2900
2901                 o += esize;
2902                 e = Add2Ptr(e, esize);
2903         }
2904
2905         return o == ao;
2906 }
2907
2908 static inline bool check_if_alloc_index(const struct INDEX_HDR *hdr,
2909                                         u32 attr_off)
2910 {
2911         u32 de_off = le32_to_cpu(hdr->de_off);
2912         u32 o = offsetof(struct INDEX_BUFFER, ihdr) + de_off;
2913         const struct NTFS_DE *e = Add2Ptr(hdr, de_off);
2914         u32 used = le32_to_cpu(hdr->used);
2915
2916         while (o < attr_off) {
2917                 u16 esize;
2918
2919                 if (de_off >= used)
2920                         break;
2921
2922                 esize = le16_to_cpu(e->size);
2923                 if (!esize)
2924                         break;
2925
2926                 o += esize;
2927                 de_off += esize;
2928                 e = Add2Ptr(e, esize);
2929         }
2930
2931         return o == attr_off;
2932 }
2933
2934 static inline void change_attr_size(struct MFT_REC *rec, struct ATTRIB *attr,
2935                                     u32 nsize)
2936 {
2937         u32 asize = le32_to_cpu(attr->size);
2938         int dsize = nsize - asize;
2939         u8 *next = Add2Ptr(attr, asize);
2940         u32 used = le32_to_cpu(rec->used);
2941
2942         memmove(Add2Ptr(attr, nsize), next, used - PtrOffset(rec, next));
2943
2944         rec->used = cpu_to_le32(used + dsize);
2945         attr->size = cpu_to_le32(nsize);
2946 }
2947
2948 struct OpenAttr {
2949         struct ATTRIB *attr;
2950         struct runs_tree *run1;
2951         struct runs_tree run0;
2952         struct ntfs_inode *ni;
2953         // CLST rno;
2954 };
2955
2956 /*
2957  * cmp_type_and_name
2958  *
2959  * Return: 0 if 'attr' has the same type and name.
2960  */
2961 static inline int cmp_type_and_name(const struct ATTRIB *a1,
2962                                     const struct ATTRIB *a2)
2963 {
2964         return a1->type != a2->type || a1->name_len != a2->name_len ||
2965                (a1->name_len && memcmp(attr_name(a1), attr_name(a2),
2966                                        a1->name_len * sizeof(short)));
2967 }
2968
2969 static struct OpenAttr *find_loaded_attr(struct ntfs_log *log,
2970                                          const struct ATTRIB *attr, CLST rno)
2971 {
2972         struct OPEN_ATTR_ENRTY *oe = NULL;
2973
2974         while ((oe = enum_rstbl(log->open_attr_tbl, oe))) {
2975                 struct OpenAttr *op_attr;
2976
2977                 if (ino_get(&oe->ref) != rno)
2978                         continue;
2979
2980                 op_attr = (struct OpenAttr *)oe->ptr;
2981                 if (!cmp_type_and_name(op_attr->attr, attr))
2982                         return op_attr;
2983         }
2984         return NULL;
2985 }
2986
2987 static struct ATTRIB *attr_create_nonres_log(struct ntfs_sb_info *sbi,
2988                                              enum ATTR_TYPE type, u64 size,
2989                                              const u16 *name, size_t name_len,
2990                                              __le16 flags)
2991 {
2992         struct ATTRIB *attr;
2993         u32 name_size = ALIGN(name_len * sizeof(short), 8);
2994         bool is_ext = flags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED);
2995         u32 asize = name_size +
2996                     (is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT);
2997
2998         attr = kzalloc(asize, GFP_NOFS);
2999         if (!attr)
3000                 return NULL;
3001
3002         attr->type = type;
3003         attr->size = cpu_to_le32(asize);
3004         attr->flags = flags;
3005         attr->non_res = 1;
3006         attr->name_len = name_len;
3007
3008         attr->nres.evcn = cpu_to_le64((u64)bytes_to_cluster(sbi, size) - 1);
3009         attr->nres.alloc_size = cpu_to_le64(ntfs_up_cluster(sbi, size));
3010         attr->nres.data_size = cpu_to_le64(size);
3011         attr->nres.valid_size = attr->nres.data_size;
3012         if (is_ext) {
3013                 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
3014                 if (is_attr_compressed(attr))
3015                         attr->nres.c_unit = COMPRESSION_UNIT;
3016
3017                 attr->nres.run_off =
3018                         cpu_to_le16(SIZEOF_NONRESIDENT_EX + name_size);
3019                 memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT_EX), name,
3020                        name_len * sizeof(short));
3021         } else {
3022                 attr->name_off = SIZEOF_NONRESIDENT_LE;
3023                 attr->nres.run_off =
3024                         cpu_to_le16(SIZEOF_NONRESIDENT + name_size);
3025                 memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT), name,
3026                        name_len * sizeof(short));
3027         }
3028
3029         return attr;
3030 }
3031
3032 /*
3033  * do_action - Common routine for the Redo and Undo Passes.
3034  * @rlsn: If it is NULL then undo.
3035  */
3036 static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe,
3037                      const struct LOG_REC_HDR *lrh, u32 op, void *data,
3038                      u32 dlen, u32 rec_len, const u64 *rlsn)
3039 {
3040         int err = 0;
3041         struct ntfs_sb_info *sbi = log->ni->mi.sbi;
3042         struct inode *inode = NULL, *inode_parent;
3043         struct mft_inode *mi = NULL, *mi2_child = NULL;
3044         CLST rno = 0, rno_base = 0;
3045         struct INDEX_BUFFER *ib = NULL;
3046         struct MFT_REC *rec = NULL;
3047         struct ATTRIB *attr = NULL, *attr2;
3048         struct INDEX_HDR *hdr;
3049         struct INDEX_ROOT *root;
3050         struct NTFS_DE *e, *e1, *e2;
3051         struct NEW_ATTRIBUTE_SIZES *new_sz;
3052         struct ATTR_FILE_NAME *fname;
3053         struct OpenAttr *oa, *oa2;
3054         u32 nsize, t32, asize, used, esize, bmp_off, bmp_bits;
3055         u16 id, id2;
3056         u32 record_size = sbi->record_size;
3057         u64 t64;
3058         u16 roff = le16_to_cpu(lrh->record_off);
3059         u16 aoff = le16_to_cpu(lrh->attr_off);
3060         u64 lco = 0;
3061         u64 cbo = (u64)le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT;
3062         u64 tvo = le64_to_cpu(lrh->target_vcn) << sbi->cluster_bits;
3063         u64 vbo = cbo + tvo;
3064         void *buffer_le = NULL;
3065         u32 bytes = 0;
3066         bool a_dirty = false;
3067         u16 data_off;
3068
3069         oa = oe->ptr;
3070
3071         /* Big switch to prepare. */
3072         switch (op) {
3073         /* ============================================================
3074          * Process MFT records, as described by the current log record.
3075          * ============================================================
3076          */
3077         case InitializeFileRecordSegment:
3078         case DeallocateFileRecordSegment:
3079         case WriteEndOfFileRecordSegment:
3080         case CreateAttribute:
3081         case DeleteAttribute:
3082         case UpdateResidentValue:
3083         case UpdateMappingPairs:
3084         case SetNewAttributeSizes:
3085         case AddIndexEntryRoot:
3086         case DeleteIndexEntryRoot:
3087         case SetIndexEntryVcnRoot:
3088         case UpdateFileNameRoot:
3089         case UpdateRecordDataRoot:
3090         case ZeroEndOfFileRecord:
3091                 rno = vbo >> sbi->record_bits;
3092                 inode = ilookup(sbi->sb, rno);
3093                 if (inode) {
3094                         mi = &ntfs_i(inode)->mi;
3095                 } else if (op == InitializeFileRecordSegment) {
3096                         mi = kzalloc(sizeof(struct mft_inode), GFP_NOFS);
3097                         if (!mi)
3098                                 return -ENOMEM;
3099                         err = mi_format_new(mi, sbi, rno, 0, false);
3100                         if (err)
3101                                 goto out;
3102                 } else {
3103                         /* Read from disk. */
3104                         err = mi_get(sbi, rno, &mi);
3105                         if (err)
3106                                 return err;
3107                 }
3108                 rec = mi->mrec;
3109
3110                 if (op == DeallocateFileRecordSegment)
3111                         goto skip_load_parent;
3112
3113                 if (InitializeFileRecordSegment != op) {
3114                         if (rec->rhdr.sign == NTFS_BAAD_SIGNATURE)
3115                                 goto dirty_vol;
3116                         if (!check_lsn(&rec->rhdr, rlsn))
3117                                 goto out;
3118                         if (!check_file_record(rec, NULL, sbi))
3119                                 goto dirty_vol;
3120                         attr = Add2Ptr(rec, roff);
3121                 }
3122
3123                 if (is_rec_base(rec) || InitializeFileRecordSegment == op) {
3124                         rno_base = rno;
3125                         goto skip_load_parent;
3126                 }
3127
3128                 rno_base = ino_get(&rec->parent_ref);
3129                 inode_parent = ntfs_iget5(sbi->sb, &rec->parent_ref, NULL);
3130                 if (IS_ERR(inode_parent))
3131                         goto skip_load_parent;
3132
3133                 if (is_bad_inode(inode_parent)) {
3134                         iput(inode_parent);
3135                         goto skip_load_parent;
3136                 }
3137
3138                 if (ni_load_mi_ex(ntfs_i(inode_parent), rno, &mi2_child)) {
3139                         iput(inode_parent);
3140                 } else {
3141                         if (mi2_child->mrec != mi->mrec)
3142                                 memcpy(mi2_child->mrec, mi->mrec,
3143                                        sbi->record_size);
3144
3145                         if (inode)
3146                                 iput(inode);
3147                         else if (mi)
3148                                 mi_put(mi);
3149
3150                         inode = inode_parent;
3151                         mi = mi2_child;
3152                         rec = mi2_child->mrec;
3153                         attr = Add2Ptr(rec, roff);
3154                 }
3155
3156 skip_load_parent:
3157                 inode_parent = NULL;
3158                 break;
3159
3160         /*
3161          * Process attributes, as described by the current log record.
3162          */
3163         case UpdateNonresidentValue:
3164         case AddIndexEntryAllocation:
3165         case DeleteIndexEntryAllocation:
3166         case WriteEndOfIndexBuffer:
3167         case SetIndexEntryVcnAllocation:
3168         case UpdateFileNameAllocation:
3169         case SetBitsInNonresidentBitMap:
3170         case ClearBitsInNonresidentBitMap:
3171         case UpdateRecordDataAllocation:
3172                 attr = oa->attr;
3173                 bytes = UpdateNonresidentValue == op ? dlen : 0;
3174                 lco = (u64)le16_to_cpu(lrh->lcns_follow) << sbi->cluster_bits;
3175
3176                 if (attr->type == ATTR_ALLOC) {
3177                         t32 = le32_to_cpu(oe->bytes_per_index);
3178                         if (bytes < t32)
3179                                 bytes = t32;
3180                 }
3181
3182                 if (!bytes)
3183                         bytes = lco - cbo;
3184
3185                 bytes += roff;
3186                 if (attr->type == ATTR_ALLOC)
3187                         bytes = (bytes + 511) & ~511; // align
3188
3189                 buffer_le = kmalloc(bytes, GFP_NOFS);
3190                 if (!buffer_le)
3191                         return -ENOMEM;
3192
3193                 err = ntfs_read_run_nb(sbi, oa->run1, vbo, buffer_le, bytes,
3194                                        NULL);
3195                 if (err)
3196                         goto out;
3197
3198                 if (attr->type == ATTR_ALLOC && *(int *)buffer_le)
3199                         ntfs_fix_post_read(buffer_le, bytes, false);
3200                 break;
3201
3202         default:
3203                 WARN_ON(1);
3204         }
3205
3206         /* Big switch to do operation. */
3207         switch (op) {
3208         case InitializeFileRecordSegment:
3209                 if (roff + dlen > record_size)
3210                         goto dirty_vol;
3211
3212                 memcpy(Add2Ptr(rec, roff), data, dlen);
3213                 mi->dirty = true;
3214                 break;
3215
3216         case DeallocateFileRecordSegment:
3217                 clear_rec_inuse(rec);
3218                 le16_add_cpu(&rec->seq, 1);
3219                 mi->dirty = true;
3220                 break;
3221
3222         case WriteEndOfFileRecordSegment:
3223                 attr2 = (struct ATTRIB *)data;
3224                 if (!check_if_attr(rec, lrh) || roff + dlen > record_size)
3225                         goto dirty_vol;
3226
3227                 memmove(attr, attr2, dlen);
3228                 rec->used = cpu_to_le32(ALIGN(roff + dlen, 8));
3229
3230                 mi->dirty = true;
3231                 break;
3232
3233         case CreateAttribute:
3234                 attr2 = (struct ATTRIB *)data;
3235                 asize = le32_to_cpu(attr2->size);
3236                 used = le32_to_cpu(rec->used);
3237
3238                 if (!check_if_attr(rec, lrh) || dlen < SIZEOF_RESIDENT ||
3239                     !IS_ALIGNED(asize, 8) ||
3240                     Add2Ptr(attr2, asize) > Add2Ptr(lrh, rec_len) ||
3241                     dlen > record_size - used) {
3242                         goto dirty_vol;
3243                 }
3244
3245                 memmove(Add2Ptr(attr, asize), attr, used - roff);
3246                 memcpy(attr, attr2, asize);
3247
3248                 rec->used = cpu_to_le32(used + asize);
3249                 id = le16_to_cpu(rec->next_attr_id);
3250                 id2 = le16_to_cpu(attr2->id);
3251                 if (id <= id2)
3252                         rec->next_attr_id = cpu_to_le16(id2 + 1);
3253                 if (is_attr_indexed(attr))
3254                         le16_add_cpu(&rec->hard_links, 1);
3255
3256                 oa2 = find_loaded_attr(log, attr, rno_base);
3257                 if (oa2) {
3258                         void *p2 = kmemdup(attr, le32_to_cpu(attr->size),
3259                                            GFP_NOFS);
3260                         if (p2) {
3261                                 // run_close(oa2->run1);
3262                                 kfree(oa2->attr);
3263                                 oa2->attr = p2;
3264                         }
3265                 }
3266
3267                 mi->dirty = true;
3268                 break;
3269
3270         case DeleteAttribute:
3271                 asize = le32_to_cpu(attr->size);
3272                 used = le32_to_cpu(rec->used);
3273
3274                 if (!check_if_attr(rec, lrh))
3275                         goto dirty_vol;
3276
3277                 rec->used = cpu_to_le32(used - asize);
3278                 if (is_attr_indexed(attr))
3279                         le16_add_cpu(&rec->hard_links, -1);
3280
3281                 memmove(attr, Add2Ptr(attr, asize), used - asize - roff);
3282
3283                 mi->dirty = true;
3284                 break;
3285
3286         case UpdateResidentValue:
3287                 nsize = aoff + dlen;
3288
3289                 if (!check_if_attr(rec, lrh))
3290                         goto dirty_vol;
3291
3292                 asize = le32_to_cpu(attr->size);
3293                 used = le32_to_cpu(rec->used);
3294
3295                 if (lrh->redo_len == lrh->undo_len) {
3296                         if (nsize > asize)
3297                                 goto dirty_vol;
3298                         goto move_data;
3299                 }
3300
3301                 if (nsize > asize && nsize - asize > record_size - used)
3302                         goto dirty_vol;
3303
3304                 nsize = ALIGN(nsize, 8);
3305                 data_off = le16_to_cpu(attr->res.data_off);
3306
3307                 if (nsize < asize) {
3308                         memmove(Add2Ptr(attr, aoff), data, dlen);
3309                         data = NULL; // To skip below memmove().
3310                 }
3311
3312                 memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
3313                         used - le16_to_cpu(lrh->record_off) - asize);
3314
3315                 rec->used = cpu_to_le32(used + nsize - asize);
3316                 attr->size = cpu_to_le32(nsize);
3317                 attr->res.data_size = cpu_to_le32(aoff + dlen - data_off);
3318
3319 move_data:
3320                 if (data)
3321                         memmove(Add2Ptr(attr, aoff), data, dlen);
3322
3323                 oa2 = find_loaded_attr(log, attr, rno_base);
3324                 if (oa2) {
3325                         void *p2 = kmemdup(attr, le32_to_cpu(attr->size),
3326                                            GFP_NOFS);
3327                         if (p2) {
3328                                 // run_close(&oa2->run0);
3329                                 oa2->run1 = &oa2->run0;
3330                                 kfree(oa2->attr);
3331                                 oa2->attr = p2;
3332                         }
3333                 }
3334
3335                 mi->dirty = true;
3336                 break;
3337
3338         case UpdateMappingPairs:
3339                 nsize = aoff + dlen;
3340                 asize = le32_to_cpu(attr->size);
3341                 used = le32_to_cpu(rec->used);
3342
3343                 if (!check_if_attr(rec, lrh) || !attr->non_res ||
3344                     aoff < le16_to_cpu(attr->nres.run_off) || aoff > asize ||
3345                     (nsize > asize && nsize - asize > record_size - used)) {
3346                         goto dirty_vol;
3347                 }
3348
3349                 nsize = ALIGN(nsize, 8);
3350
3351                 memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
3352                         used - le16_to_cpu(lrh->record_off) - asize);
3353                 rec->used = cpu_to_le32(used + nsize - asize);
3354                 attr->size = cpu_to_le32(nsize);
3355                 memmove(Add2Ptr(attr, aoff), data, dlen);
3356
3357                 if (run_get_highest_vcn(le64_to_cpu(attr->nres.svcn),
3358                                         attr_run(attr), &t64)) {
3359                         goto dirty_vol;
3360                 }
3361
3362                 attr->nres.evcn = cpu_to_le64(t64);
3363                 oa2 = find_loaded_attr(log, attr, rno_base);
3364                 if (oa2 && oa2->attr->non_res)
3365                         oa2->attr->nres.evcn = attr->nres.evcn;
3366
3367                 mi->dirty = true;
3368                 break;
3369
3370         case SetNewAttributeSizes:
3371                 new_sz = data;
3372                 if (!check_if_attr(rec, lrh) || !attr->non_res)
3373                         goto dirty_vol;
3374
3375                 attr->nres.alloc_size = new_sz->alloc_size;
3376                 attr->nres.data_size = new_sz->data_size;
3377                 attr->nres.valid_size = new_sz->valid_size;
3378
3379                 if (dlen >= sizeof(struct NEW_ATTRIBUTE_SIZES))
3380                         attr->nres.total_size = new_sz->total_size;
3381
3382                 oa2 = find_loaded_attr(log, attr, rno_base);
3383                 if (oa2) {
3384                         void *p2 = kmemdup(attr, le32_to_cpu(attr->size),
3385                                            GFP_NOFS);
3386                         if (p2) {
3387                                 kfree(oa2->attr);
3388                                 oa2->attr = p2;
3389                         }
3390                 }
3391                 mi->dirty = true;
3392                 break;
3393
3394         case AddIndexEntryRoot:
3395                 e = (struct NTFS_DE *)data;
3396                 esize = le16_to_cpu(e->size);
3397                 root = resident_data(attr);
3398                 hdr = &root->ihdr;
3399                 used = le32_to_cpu(hdr->used);
3400
3401                 if (!check_if_index_root(rec, lrh) ||
3402                     !check_if_root_index(attr, hdr, lrh) ||
3403                     Add2Ptr(data, esize) > Add2Ptr(lrh, rec_len) ||
3404                     esize > le32_to_cpu(rec->total) - le32_to_cpu(rec->used)) {
3405                         goto dirty_vol;
3406                 }
3407
3408                 e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3409
3410                 change_attr_size(rec, attr, le32_to_cpu(attr->size) + esize);
3411
3412                 memmove(Add2Ptr(e1, esize), e1,
3413                         PtrOffset(e1, Add2Ptr(hdr, used)));
3414                 memmove(e1, e, esize);
3415
3416                 le32_add_cpu(&attr->res.data_size, esize);
3417                 hdr->used = cpu_to_le32(used + esize);
3418                 le32_add_cpu(&hdr->total, esize);
3419
3420                 mi->dirty = true;
3421                 break;
3422
3423         case DeleteIndexEntryRoot:
3424                 root = resident_data(attr);
3425                 hdr = &root->ihdr;
3426                 used = le32_to_cpu(hdr->used);
3427
3428                 if (!check_if_index_root(rec, lrh) ||
3429                     !check_if_root_index(attr, hdr, lrh)) {
3430                         goto dirty_vol;
3431                 }
3432
3433                 e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3434                 esize = le16_to_cpu(e1->size);
3435                 e2 = Add2Ptr(e1, esize);
3436
3437                 memmove(e1, e2, PtrOffset(e2, Add2Ptr(hdr, used)));
3438
3439                 le32_sub_cpu(&attr->res.data_size, esize);
3440                 hdr->used = cpu_to_le32(used - esize);
3441                 le32_sub_cpu(&hdr->total, esize);
3442
3443                 change_attr_size(rec, attr, le32_to_cpu(attr->size) - esize);
3444
3445                 mi->dirty = true;
3446                 break;
3447
3448         case SetIndexEntryVcnRoot:
3449                 root = resident_data(attr);
3450                 hdr = &root->ihdr;
3451
3452                 if (!check_if_index_root(rec, lrh) ||
3453                     !check_if_root_index(attr, hdr, lrh)) {
3454                         goto dirty_vol;
3455                 }
3456
3457                 e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3458
3459                 de_set_vbn_le(e, *(__le64 *)data);
3460                 mi->dirty = true;
3461                 break;
3462
3463         case UpdateFileNameRoot:
3464                 root = resident_data(attr);
3465                 hdr = &root->ihdr;
3466
3467                 if (!check_if_index_root(rec, lrh) ||
3468                     !check_if_root_index(attr, hdr, lrh)) {
3469                         goto dirty_vol;
3470                 }
3471
3472                 e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3473                 fname = (struct ATTR_FILE_NAME *)(e + 1);
3474                 memmove(&fname->dup, data, sizeof(fname->dup)); //
3475                 mi->dirty = true;
3476                 break;
3477
3478         case UpdateRecordDataRoot:
3479                 root = resident_data(attr);
3480                 hdr = &root->ihdr;
3481
3482                 if (!check_if_index_root(rec, lrh) ||
3483                     !check_if_root_index(attr, hdr, lrh)) {
3484                         goto dirty_vol;
3485                 }
3486
3487                 e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3488
3489                 memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen);
3490
3491                 mi->dirty = true;
3492                 break;
3493
3494         case ZeroEndOfFileRecord:
3495                 if (roff + dlen > record_size)
3496                         goto dirty_vol;
3497
3498                 memset(attr, 0, dlen);
3499                 mi->dirty = true;
3500                 break;
3501
3502         case UpdateNonresidentValue:
3503                 if (lco < cbo + roff + dlen)
3504                         goto dirty_vol;
3505
3506                 memcpy(Add2Ptr(buffer_le, roff), data, dlen);
3507
3508                 a_dirty = true;
3509                 if (attr->type == ATTR_ALLOC)
3510                         ntfs_fix_pre_write(buffer_le, bytes);
3511                 break;
3512
3513         case AddIndexEntryAllocation:
3514                 ib = Add2Ptr(buffer_le, roff);
3515                 hdr = &ib->ihdr;
3516                 e = data;
3517                 esize = le16_to_cpu(e->size);
3518                 e1 = Add2Ptr(ib, aoff);
3519
3520                 if (is_baad(&ib->rhdr))
3521                         goto dirty_vol;
3522                 if (!check_lsn(&ib->rhdr, rlsn))
3523                         goto out;
3524
3525                 used = le32_to_cpu(hdr->used);
3526
3527                 if (!check_index_buffer(ib, bytes) ||
3528                     !check_if_alloc_index(hdr, aoff) ||
3529                     Add2Ptr(e, esize) > Add2Ptr(lrh, rec_len) ||
3530                     used + esize > le32_to_cpu(hdr->total)) {
3531                         goto dirty_vol;
3532                 }
3533
3534                 memmove(Add2Ptr(e1, esize), e1,
3535                         PtrOffset(e1, Add2Ptr(hdr, used)));
3536                 memcpy(e1, e, esize);
3537
3538                 hdr->used = cpu_to_le32(used + esize);
3539
3540                 a_dirty = true;
3541
3542                 ntfs_fix_pre_write(&ib->rhdr, bytes);
3543                 break;
3544
3545         case DeleteIndexEntryAllocation:
3546                 ib = Add2Ptr(buffer_le, roff);
3547                 hdr = &ib->ihdr;
3548                 e = Add2Ptr(ib, aoff);
3549                 esize = le16_to_cpu(e->size);
3550
3551                 if (is_baad(&ib->rhdr))
3552                         goto dirty_vol;
3553                 if (!check_lsn(&ib->rhdr, rlsn))
3554                         goto out;
3555
3556                 if (!check_index_buffer(ib, bytes) ||
3557                     !check_if_alloc_index(hdr, aoff)) {
3558                         goto dirty_vol;
3559                 }
3560
3561                 e1 = Add2Ptr(e, esize);
3562                 nsize = esize;
3563                 used = le32_to_cpu(hdr->used);
3564
3565                 memmove(e, e1, PtrOffset(e1, Add2Ptr(hdr, used)));
3566
3567                 hdr->used = cpu_to_le32(used - nsize);
3568
3569                 a_dirty = true;
3570
3571                 ntfs_fix_pre_write(&ib->rhdr, bytes);
3572                 break;
3573
3574         case WriteEndOfIndexBuffer:
3575                 ib = Add2Ptr(buffer_le, roff);
3576                 hdr = &ib->ihdr;
3577                 e = Add2Ptr(ib, aoff);
3578
3579                 if (is_baad(&ib->rhdr))
3580                         goto dirty_vol;
3581                 if (!check_lsn(&ib->rhdr, rlsn))
3582                         goto out;
3583                 if (!check_index_buffer(ib, bytes) ||
3584                     !check_if_alloc_index(hdr, aoff) ||
3585                     aoff + dlen > offsetof(struct INDEX_BUFFER, ihdr) +
3586                                           le32_to_cpu(hdr->total)) {
3587                         goto dirty_vol;
3588                 }
3589
3590                 hdr->used = cpu_to_le32(dlen + PtrOffset(hdr, e));
3591                 memmove(e, data, dlen);
3592
3593                 a_dirty = true;
3594                 ntfs_fix_pre_write(&ib->rhdr, bytes);
3595                 break;
3596
3597         case SetIndexEntryVcnAllocation:
3598                 ib = Add2Ptr(buffer_le, roff);
3599                 hdr = &ib->ihdr;
3600                 e = Add2Ptr(ib, aoff);
3601
3602                 if (is_baad(&ib->rhdr))
3603                         goto dirty_vol;
3604
3605                 if (!check_lsn(&ib->rhdr, rlsn))
3606                         goto out;
3607                 if (!check_index_buffer(ib, bytes) ||
3608                     !check_if_alloc_index(hdr, aoff)) {
3609                         goto dirty_vol;
3610                 }
3611
3612                 de_set_vbn_le(e, *(__le64 *)data);
3613
3614                 a_dirty = true;
3615                 ntfs_fix_pre_write(&ib->rhdr, bytes);
3616                 break;
3617
3618         case UpdateFileNameAllocation:
3619                 ib = Add2Ptr(buffer_le, roff);
3620                 hdr = &ib->ihdr;
3621                 e = Add2Ptr(ib, aoff);
3622
3623                 if (is_baad(&ib->rhdr))
3624                         goto dirty_vol;
3625
3626                 if (!check_lsn(&ib->rhdr, rlsn))
3627                         goto out;
3628                 if (!check_index_buffer(ib, bytes) ||
3629                     !check_if_alloc_index(hdr, aoff)) {
3630                         goto dirty_vol;
3631                 }
3632
3633                 fname = (struct ATTR_FILE_NAME *)(e + 1);
3634                 memmove(&fname->dup, data, sizeof(fname->dup));
3635
3636                 a_dirty = true;
3637                 ntfs_fix_pre_write(&ib->rhdr, bytes);
3638                 break;
3639
3640         case SetBitsInNonresidentBitMap:
3641                 bmp_off =
3642                         le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
3643                 bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
3644
3645                 if (cbo + (bmp_off + 7) / 8 > lco ||
3646                     cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) {
3647                         goto dirty_vol;
3648                 }
3649
3650                 __bitmap_set(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits);
3651                 a_dirty = true;
3652                 break;
3653
3654         case ClearBitsInNonresidentBitMap:
3655                 bmp_off =
3656                         le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
3657                 bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
3658
3659                 if (cbo + (bmp_off + 7) / 8 > lco ||
3660                     cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) {
3661                         goto dirty_vol;
3662                 }
3663
3664                 __bitmap_clear(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits);
3665                 a_dirty = true;
3666                 break;
3667
3668         case UpdateRecordDataAllocation:
3669                 ib = Add2Ptr(buffer_le, roff);
3670                 hdr = &ib->ihdr;
3671                 e = Add2Ptr(ib, aoff);
3672
3673                 if (is_baad(&ib->rhdr))
3674                         goto dirty_vol;
3675
3676                 if (!check_lsn(&ib->rhdr, rlsn))
3677                         goto out;
3678                 if (!check_index_buffer(ib, bytes) ||
3679                     !check_if_alloc_index(hdr, aoff)) {
3680                         goto dirty_vol;
3681                 }
3682
3683                 memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen);
3684
3685                 a_dirty = true;
3686                 ntfs_fix_pre_write(&ib->rhdr, bytes);
3687                 break;
3688
3689         default:
3690                 WARN_ON(1);
3691         }
3692
3693         if (rlsn) {
3694                 __le64 t64 = cpu_to_le64(*rlsn);
3695
3696                 if (rec)
3697                         rec->rhdr.lsn = t64;
3698                 if (ib)
3699                         ib->rhdr.lsn = t64;
3700         }
3701
3702         if (mi && mi->dirty) {
3703                 err = mi_write(mi, 0);
3704                 if (err)
3705                         goto out;
3706         }
3707
3708         if (a_dirty) {
3709                 attr = oa->attr;
3710                 err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes, 0);
3711                 if (err)
3712                         goto out;
3713         }
3714
3715 out:
3716
3717         if (inode)
3718                 iput(inode);
3719         else if (mi != mi2_child)
3720                 mi_put(mi);
3721
3722         kfree(buffer_le);
3723
3724         return err;
3725
3726 dirty_vol:
3727         log->set_dirty = true;
3728         goto out;
3729 }
3730
3731 /*
3732  * log_replay - Replays log and empties it.
3733  *
3734  * This function is called during mount operation.
3735  * It replays log and empties it.
3736  * Initialized is set false if logfile contains '-1'.
3737  */
3738 int log_replay(struct ntfs_inode *ni, bool *initialized)
3739 {
3740         int err;
3741         struct ntfs_sb_info *sbi = ni->mi.sbi;
3742         struct ntfs_log *log;
3743
3744         struct restart_info rst_info, rst_info2;
3745         u64 rec_lsn, ra_lsn, checkpt_lsn = 0, rlsn = 0;
3746         struct ATTR_NAME_ENTRY *attr_names = NULL;
3747         struct ATTR_NAME_ENTRY *ane;
3748         struct RESTART_TABLE *dptbl = NULL;
3749         struct RESTART_TABLE *trtbl = NULL;
3750         const struct RESTART_TABLE *rt;
3751         struct RESTART_TABLE *oatbl = NULL;
3752         struct inode *inode;
3753         struct OpenAttr *oa;
3754         struct ntfs_inode *ni_oe;
3755         struct ATTRIB *attr = NULL;
3756         u64 size, vcn, undo_next_lsn;
3757         CLST rno, lcn, lcn0, len0, clen;
3758         void *data;
3759         struct NTFS_RESTART *rst = NULL;
3760         struct lcb *lcb = NULL;
3761         struct OPEN_ATTR_ENRTY *oe;
3762         struct TRANSACTION_ENTRY *tr;
3763         struct DIR_PAGE_ENTRY *dp;
3764         u32 i, bytes_per_attr_entry;
3765         u32 l_size = ni->vfs_inode.i_size;
3766         u32 orig_file_size = l_size;
3767         u32 page_size, vbo, tail, off, dlen;
3768         u32 saved_len, rec_len, transact_id;
3769         bool use_second_page;
3770         struct RESTART_AREA *ra2, *ra = NULL;
3771         struct CLIENT_REC *ca, *cr;
3772         __le16 client;
3773         struct RESTART_HDR *rh;
3774         const struct LFS_RECORD_HDR *frh;
3775         const struct LOG_REC_HDR *lrh;
3776         bool is_mapped;
3777         bool is_ro = sb_rdonly(sbi->sb);
3778         u64 t64;
3779         u16 t16;
3780         u32 t32;
3781
3782         /* Get the size of page. NOTE: To replay we can use default page. */
3783 #if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
3784         page_size = norm_file_page(PAGE_SIZE, &l_size, true);
3785 #else
3786         page_size = norm_file_page(PAGE_SIZE, &l_size, false);
3787 #endif
3788         if (!page_size)
3789                 return -EINVAL;
3790
3791         log = kzalloc(sizeof(struct ntfs_log), GFP_NOFS);
3792         if (!log)
3793                 return -ENOMEM;
3794
3795         memset(&rst_info, 0, sizeof(struct restart_info));
3796
3797         log->ni = ni;
3798         log->l_size = l_size;
3799         log->one_page_buf = kmalloc(page_size, GFP_NOFS);
3800         if (!log->one_page_buf) {
3801                 err = -ENOMEM;
3802                 goto out;
3803         }
3804
3805         log->page_size = page_size;
3806         log->page_mask = page_size - 1;
3807         log->page_bits = blksize_bits(page_size);
3808
3809         /* Look for a restart area on the disk. */
3810         err = log_read_rst(log, l_size, true, &rst_info);
3811         if (err)
3812                 goto out;
3813
3814         /* remember 'initialized' */
3815         *initialized = rst_info.initialized;
3816
3817         if (!rst_info.restart) {
3818                 if (rst_info.initialized) {
3819                         /* No restart area but the file is not initialized. */
3820                         err = -EINVAL;
3821                         goto out;
3822                 }
3823
3824                 log_init_pg_hdr(log, page_size, page_size, 1, 1);
3825                 log_create(log, l_size, 0, get_random_int(), false, false);
3826
3827                 log->ra = ra;
3828
3829                 ra = log_create_ra(log);
3830                 if (!ra) {
3831                         err = -ENOMEM;
3832                         goto out;
3833                 }
3834                 log->ra = ra;
3835                 log->init_ra = true;
3836
3837                 goto process_log;
3838         }
3839
3840         /*
3841          * If the restart offset above wasn't zero then we won't
3842          * look for a second restart.
3843          */
3844         if (rst_info.vbo)
3845                 goto check_restart_area;
3846
3847         memset(&rst_info2, 0, sizeof(struct restart_info));
3848         err = log_read_rst(log, l_size, false, &rst_info2);
3849
3850         /* Determine which restart area to use. */
3851         if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
3852                 goto use_first_page;
3853
3854         use_second_page = true;
3855
3856         if (rst_info.chkdsk_was_run && page_size != rst_info.vbo) {
3857                 struct RECORD_PAGE_HDR *sp = NULL;
3858                 bool usa_error;
3859
3860                 if (!read_log_page(log, page_size, &sp, &usa_error) &&
3861                     sp->rhdr.sign == NTFS_CHKD_SIGNATURE) {
3862                         use_second_page = false;
3863                 }
3864                 kfree(sp);
3865         }
3866
3867         if (use_second_page) {
3868                 kfree(rst_info.r_page);
3869                 memcpy(&rst_info, &rst_info2, sizeof(struct restart_info));
3870                 rst_info2.r_page = NULL;
3871         }
3872
3873 use_first_page:
3874         kfree(rst_info2.r_page);
3875
3876 check_restart_area:
3877         /*
3878          * If the restart area is at offset 0, we want
3879          * to write the second restart area first.
3880          */
3881         log->init_ra = !!rst_info.vbo;
3882
3883         /* If we have a valid page then grab a pointer to the restart area. */
3884         ra2 = rst_info.valid_page
3885                       ? Add2Ptr(rst_info.r_page,
3886                                 le16_to_cpu(rst_info.r_page->ra_off))
3887                       : NULL;
3888
3889         if (rst_info.chkdsk_was_run ||
3890             (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) {
3891                 bool wrapped = false;
3892                 bool use_multi_page = false;
3893                 u32 open_log_count;
3894
3895                 /* Do some checks based on whether we have a valid log page. */
3896                 if (!rst_info.valid_page) {
3897                         open_log_count = get_random_int();
3898                         goto init_log_instance;
3899                 }
3900                 open_log_count = le32_to_cpu(ra2->open_log_count);
3901
3902                 /*
3903                  * If the restart page size isn't changing then we want to
3904                  * check how much work we need to do.
3905                  */
3906                 if (page_size != le32_to_cpu(rst_info.r_page->sys_page_size))
3907                         goto init_log_instance;
3908
3909 init_log_instance:
3910                 log_init_pg_hdr(log, page_size, page_size, 1, 1);
3911
3912                 log_create(log, l_size, rst_info.last_lsn, open_log_count,
3913                            wrapped, use_multi_page);
3914
3915                 ra = log_create_ra(log);
3916                 if (!ra) {
3917                         err = -ENOMEM;
3918                         goto out;
3919                 }
3920                 log->ra = ra;
3921
3922                 /* Put the restart areas and initialize
3923                  * the log file as required.
3924                  */
3925                 goto process_log;
3926         }
3927
3928         if (!ra2) {
3929                 err = -EINVAL;
3930                 goto out;
3931         }
3932
3933         /*
3934          * If the log page or the system page sizes have changed, we can't
3935          * use the log file. We must use the system page size instead of the
3936          * default size if there is not a clean shutdown.
3937          */
3938         t32 = le32_to_cpu(rst_info.r_page->sys_page_size);
3939         if (page_size != t32) {
3940                 l_size = orig_file_size;
3941                 page_size =
3942                         norm_file_page(t32, &l_size, t32 == DefaultLogPageSize);
3943         }
3944
3945         if (page_size != t32 ||
3946             page_size != le32_to_cpu(rst_info.r_page->page_size)) {
3947                 err = -EINVAL;
3948                 goto out;
3949         }
3950
3951         /* If the file size has shrunk then we won't mount it. */
3952         if (l_size < le64_to_cpu(ra2->l_size)) {
3953                 err = -EINVAL;
3954                 goto out;
3955         }
3956
3957         log_init_pg_hdr(log, page_size, page_size,
3958                         le16_to_cpu(rst_info.r_page->major_ver),
3959                         le16_to_cpu(rst_info.r_page->minor_ver));
3960
3961         log->l_size = le64_to_cpu(ra2->l_size);
3962         log->seq_num_bits = le32_to_cpu(ra2->seq_num_bits);
3963         log->file_data_bits = sizeof(u64) * 8 - log->seq_num_bits;
3964         log->seq_num_mask = (8 << log->file_data_bits) - 1;
3965         log->last_lsn = le64_to_cpu(ra2->current_lsn);
3966         log->seq_num = log->last_lsn >> log->file_data_bits;
3967         log->ra_off = le16_to_cpu(rst_info.r_page->ra_off);
3968         log->restart_size = log->sys_page_size - log->ra_off;
3969         log->record_header_len = le16_to_cpu(ra2->rec_hdr_len);
3970         log->ra_size = le16_to_cpu(ra2->ra_len);
3971         log->data_off = le16_to_cpu(ra2->data_off);
3972         log->data_size = log->page_size - log->data_off;
3973         log->reserved = log->data_size - log->record_header_len;
3974
3975         vbo = lsn_to_vbo(log, log->last_lsn);
3976
3977         if (vbo < log->first_page) {
3978                 /* This is a pseudo lsn. */
3979                 log->l_flags |= NTFSLOG_NO_LAST_LSN;
3980                 log->next_page = log->first_page;
3981                 goto find_oldest;
3982         }
3983
3984         /* Find the end of this log record. */
3985         off = final_log_off(log, log->last_lsn,
3986                             le32_to_cpu(ra2->last_lsn_data_len));
3987
3988         /* If we wrapped the file then increment the sequence number. */
3989         if (off <= vbo) {
3990                 log->seq_num += 1;
3991                 log->l_flags |= NTFSLOG_WRAPPED;
3992         }
3993
3994         /* Now compute the next log page to use. */
3995         vbo &= ~log->sys_page_mask;
3996         tail = log->page_size - (off & log->page_mask) - 1;
3997
3998         /*
3999          *If we can fit another log record on the page,
4000          * move back a page the log file.
4001          */
4002         if (tail >= log->record_header_len) {
4003                 log->l_flags |= NTFSLOG_REUSE_TAIL;
4004                 log->next_page = vbo;
4005         } else {
4006                 log->next_page = next_page_off(log, vbo);
4007         }
4008
4009 find_oldest:
4010         /*
4011          * Find the oldest client lsn. Use the last
4012          * flushed lsn as a starting point.
4013          */
4014         log->oldest_lsn = log->last_lsn;
4015         oldest_client_lsn(Add2Ptr(ra2, le16_to_cpu(ra2->client_off)),
4016                           ra2->client_idx[1], &log->oldest_lsn);
4017         log->oldest_lsn_off = lsn_to_vbo(log, log->oldest_lsn);
4018
4019         if (log->oldest_lsn_off < log->first_page)
4020                 log->l_flags |= NTFSLOG_NO_OLDEST_LSN;
4021
4022         if (!(ra2->flags & RESTART_SINGLE_PAGE_IO))
4023                 log->l_flags |= NTFSLOG_WRAPPED | NTFSLOG_MULTIPLE_PAGE_IO;
4024
4025         log->current_openlog_count = le32_to_cpu(ra2->open_log_count);
4026         log->total_avail_pages = log->l_size - log->first_page;
4027         log->total_avail = log->total_avail_pages >> log->page_bits;
4028         log->max_current_avail = log->total_avail * log->reserved;
4029         log->total_avail = log->total_avail * log->data_size;
4030
4031         log->current_avail = current_log_avail(log);
4032
4033         ra = kzalloc(log->restart_size, GFP_NOFS);
4034         if (!ra) {
4035                 err = -ENOMEM;
4036                 goto out;
4037         }
4038         log->ra = ra;
4039
4040         t16 = le16_to_cpu(ra2->client_off);
4041         if (t16 == offsetof(struct RESTART_AREA, clients)) {
4042                 memcpy(ra, ra2, log->ra_size);
4043         } else {
4044                 memcpy(ra, ra2, offsetof(struct RESTART_AREA, clients));
4045                 memcpy(ra->clients, Add2Ptr(ra2, t16),
4046                        le16_to_cpu(ra2->ra_len) - t16);
4047
4048                 log->current_openlog_count = get_random_int();
4049                 ra->open_log_count = cpu_to_le32(log->current_openlog_count);
4050                 log->ra_size = offsetof(struct RESTART_AREA, clients) +
4051                                sizeof(struct CLIENT_REC);
4052                 ra->client_off =
4053                         cpu_to_le16(offsetof(struct RESTART_AREA, clients));
4054                 ra->ra_len = cpu_to_le16(log->ra_size);
4055         }
4056
4057         le32_add_cpu(&ra->open_log_count, 1);
4058
4059         /* Now we need to walk through looking for the last lsn. */
4060         err = last_log_lsn(log);
4061         if (err)
4062                 goto out;
4063
4064         log->current_avail = current_log_avail(log);
4065
4066         /* Remember which restart area to write first. */
4067         log->init_ra = rst_info.vbo;
4068
4069 process_log:
4070         /* 1.0, 1.1, 2.0 log->major_ver/minor_ver - short values. */
4071         switch ((log->major_ver << 16) + log->minor_ver) {
4072         case 0x10000:
4073         case 0x10001:
4074         case 0x20000:
4075                 break;
4076         default:
4077                 ntfs_warn(sbi->sb, "\x24LogFile version %d.%d is not supported",
4078                           log->major_ver, log->minor_ver);
4079                 err = -EOPNOTSUPP;
4080                 log->set_dirty = true;
4081                 goto out;
4082         }
4083
4084         /* One client "NTFS" per logfile. */
4085         ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
4086
4087         for (client = ra->client_idx[1];; client = cr->next_client) {
4088                 if (client == LFS_NO_CLIENT_LE) {
4089                         /* Insert "NTFS" client LogFile. */
4090                         client = ra->client_idx[0];
4091                         if (client == LFS_NO_CLIENT_LE) {
4092                                 err = -EINVAL;
4093                                 goto out;
4094                         }
4095
4096                         t16 = le16_to_cpu(client);
4097                         cr = ca + t16;
4098
4099                         remove_client(ca, cr, &ra->client_idx[0]);
4100
4101                         cr->restart_lsn = 0;
4102                         cr->oldest_lsn = cpu_to_le64(log->oldest_lsn);
4103                         cr->name_bytes = cpu_to_le32(8);
4104                         cr->name[0] = cpu_to_le16('N');
4105                         cr->name[1] = cpu_to_le16('T');
4106                         cr->name[2] = cpu_to_le16('F');
4107                         cr->name[3] = cpu_to_le16('S');
4108
4109                         add_client(ca, t16, &ra->client_idx[1]);
4110                         break;
4111                 }
4112
4113                 cr = ca + le16_to_cpu(client);
4114
4115                 if (cpu_to_le32(8) == cr->name_bytes &&
4116                     cpu_to_le16('N') == cr->name[0] &&
4117                     cpu_to_le16('T') == cr->name[1] &&
4118                     cpu_to_le16('F') == cr->name[2] &&
4119                     cpu_to_le16('S') == cr->name[3])
4120                         break;
4121         }
4122
4123         /* Update the client handle with the client block information. */
4124         log->client_id.seq_num = cr->seq_num;
4125         log->client_id.client_idx = client;
4126
4127         err = read_rst_area(log, &rst, &ra_lsn);
4128         if (err)
4129                 goto out;
4130
4131         if (!rst)
4132                 goto out;
4133
4134         bytes_per_attr_entry = !rst->major_ver ? 0x2C : 0x28;
4135
4136         checkpt_lsn = le64_to_cpu(rst->check_point_start);
4137         if (!checkpt_lsn)
4138                 checkpt_lsn = ra_lsn;
4139
4140         /* Allocate and Read the Transaction Table. */
4141         if (!rst->transact_table_len)
4142                 goto check_dirty_page_table;
4143
4144         t64 = le64_to_cpu(rst->transact_table_lsn);
4145         err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
4146         if (err)
4147                 goto out;
4148
4149         lrh = lcb->log_rec;
4150         frh = lcb->lrh;
4151         rec_len = le32_to_cpu(frh->client_data_len);
4152
4153         if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
4154                            bytes_per_attr_entry)) {
4155                 err = -EINVAL;
4156                 goto out;
4157         }
4158
4159         t16 = le16_to_cpu(lrh->redo_off);
4160
4161         rt = Add2Ptr(lrh, t16);
4162         t32 = rec_len - t16;
4163
4164         /* Now check that this is a valid restart table. */
4165         if (!check_rstbl(rt, t32)) {
4166                 err = -EINVAL;
4167                 goto out;
4168         }
4169
4170         trtbl = kmemdup(rt, t32, GFP_NOFS);
4171         if (!trtbl) {
4172                 err = -ENOMEM;
4173                 goto out;
4174         }
4175
4176         lcb_put(lcb);
4177         lcb = NULL;
4178
4179 check_dirty_page_table:
4180         /* The next record back should be the Dirty Pages Table. */
4181         if (!rst->dirty_pages_len)
4182                 goto check_attribute_names;
4183
4184         t64 = le64_to_cpu(rst->dirty_pages_table_lsn);
4185         err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
4186         if (err)
4187                 goto out;
4188
4189         lrh = lcb->log_rec;
4190         frh = lcb->lrh;
4191         rec_len = le32_to_cpu(frh->client_data_len);
4192
4193         if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
4194                            bytes_per_attr_entry)) {
4195                 err = -EINVAL;
4196                 goto out;
4197         }
4198
4199         t16 = le16_to_cpu(lrh->redo_off);
4200
4201         rt = Add2Ptr(lrh, t16);
4202         t32 = rec_len - t16;
4203
4204         /* Now check that this is a valid restart table. */
4205         if (!check_rstbl(rt, t32)) {
4206                 err = -EINVAL;
4207                 goto out;
4208         }
4209
4210         dptbl = kmemdup(rt, t32, GFP_NOFS);
4211         if (!dptbl) {
4212                 err = -ENOMEM;
4213                 goto out;
4214         }
4215
4216         /* Convert Ra version '0' into version '1'. */
4217         if (rst->major_ver)
4218                 goto end_conv_1;
4219
4220         dp = NULL;
4221         while ((dp = enum_rstbl(dptbl, dp))) {
4222                 struct DIR_PAGE_ENTRY_32 *dp0 = (struct DIR_PAGE_ENTRY_32 *)dp;
4223                 // NOTE: Danger. Check for of boundary.
4224                 memmove(&dp->vcn, &dp0->vcn_low,
4225                         2 * sizeof(u64) +
4226                                 le32_to_cpu(dp->lcns_follow) * sizeof(u64));
4227         }
4228
4229 end_conv_1:
4230         lcb_put(lcb);
4231         lcb = NULL;
4232
4233         /*
4234          * Go through the table and remove the duplicates,
4235          * remembering the oldest lsn values.
4236          */
4237         if (sbi->cluster_size <= log->page_size)
4238                 goto trace_dp_table;
4239
4240         dp = NULL;
4241         while ((dp = enum_rstbl(dptbl, dp))) {
4242                 struct DIR_PAGE_ENTRY *next = dp;
4243
4244                 while ((next = enum_rstbl(dptbl, next))) {
4245                         if (next->target_attr == dp->target_attr &&
4246                             next->vcn == dp->vcn) {
4247                                 if (le64_to_cpu(next->oldest_lsn) <
4248                                     le64_to_cpu(dp->oldest_lsn)) {
4249                                         dp->oldest_lsn = next->oldest_lsn;
4250                                 }
4251
4252                                 free_rsttbl_idx(dptbl, PtrOffset(dptbl, next));
4253                         }
4254                 }
4255         }
4256 trace_dp_table:
4257 check_attribute_names:
4258         /* The next record should be the Attribute Names. */
4259         if (!rst->attr_names_len)
4260                 goto check_attr_table;
4261
4262         t64 = le64_to_cpu(rst->attr_names_lsn);
4263         err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
4264         if (err)
4265                 goto out;
4266
4267         lrh = lcb->log_rec;
4268         frh = lcb->lrh;
4269         rec_len = le32_to_cpu(frh->client_data_len);
4270
4271         if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
4272                            bytes_per_attr_entry)) {
4273                 err = -EINVAL;
4274                 goto out;
4275         }
4276
4277         t32 = lrh_length(lrh);
4278         rec_len -= t32;
4279
4280         attr_names = kmemdup(Add2Ptr(lrh, t32), rec_len, GFP_NOFS);
4281
4282         lcb_put(lcb);
4283         lcb = NULL;
4284
4285 check_attr_table:
4286         /* The next record should be the attribute Table. */
4287         if (!rst->open_attr_len)
4288                 goto check_attribute_names2;
4289
4290         t64 = le64_to_cpu(rst->open_attr_table_lsn);
4291         err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
4292         if (err)
4293                 goto out;
4294
4295         lrh = lcb->log_rec;
4296         frh = lcb->lrh;
4297         rec_len = le32_to_cpu(frh->client_data_len);
4298
4299         if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
4300                            bytes_per_attr_entry)) {
4301                 err = -EINVAL;
4302                 goto out;
4303         }
4304
4305         t16 = le16_to_cpu(lrh->redo_off);
4306
4307         rt = Add2Ptr(lrh, t16);
4308         t32 = rec_len - t16;
4309
4310         if (!check_rstbl(rt, t32)) {
4311                 err = -EINVAL;
4312                 goto out;
4313         }
4314
4315         oatbl = kmemdup(rt, t32, GFP_NOFS);
4316         if (!oatbl) {
4317                 err = -ENOMEM;
4318                 goto out;
4319         }
4320
4321         log->open_attr_tbl = oatbl;
4322
4323         /* Clear all of the Attr pointers. */
4324         oe = NULL;
4325         while ((oe = enum_rstbl(oatbl, oe))) {
4326                 if (!rst->major_ver) {
4327                         struct OPEN_ATTR_ENRTY_32 oe0;
4328
4329                         /* Really 'oe' points to OPEN_ATTR_ENRTY_32. */
4330                         memcpy(&oe0, oe, SIZEOF_OPENATTRIBUTEENTRY0);
4331
4332                         oe->bytes_per_index = oe0.bytes_per_index;
4333                         oe->type = oe0.type;
4334                         oe->is_dirty_pages = oe0.is_dirty_pages;
4335                         oe->name_len = 0;
4336                         oe->ref = oe0.ref;
4337                         oe->open_record_lsn = oe0.open_record_lsn;
4338                 }
4339
4340                 oe->is_attr_name = 0;
4341                 oe->ptr = NULL;
4342         }
4343
4344         lcb_put(lcb);
4345         lcb = NULL;
4346
4347 check_attribute_names2:
4348         if (!rst->attr_names_len)
4349                 goto trace_attribute_table;
4350
4351         ane = attr_names;
4352         if (!oatbl)
4353                 goto trace_attribute_table;
4354         while (ane->off) {
4355                 /* TODO: Clear table on exit! */
4356                 oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
4357                 t16 = le16_to_cpu(ane->name_bytes);
4358                 oe->name_len = t16 / sizeof(short);
4359                 oe->ptr = ane->name;
4360                 oe->is_attr_name = 2;
4361                 ane = Add2Ptr(ane, sizeof(struct ATTR_NAME_ENTRY) + t16);
4362         }
4363
4364 trace_attribute_table:
4365         /*
4366          * If the checkpt_lsn is zero, then this is a freshly
4367          * formatted disk and we have no work to do.
4368          */
4369         if (!checkpt_lsn) {
4370                 err = 0;
4371                 goto out;
4372         }
4373
4374         if (!oatbl) {
4375                 oatbl = init_rsttbl(bytes_per_attr_entry, 8);
4376                 if (!oatbl) {
4377                         err = -ENOMEM;
4378                         goto out;
4379                 }
4380         }
4381
4382         log->open_attr_tbl = oatbl;
4383
4384         /* Start the analysis pass from the Checkpoint lsn. */
4385         rec_lsn = checkpt_lsn;
4386
4387         /* Read the first lsn. */
4388         err = read_log_rec_lcb(log, checkpt_lsn, lcb_ctx_next, &lcb);
4389         if (err)
4390                 goto out;
4391
4392         /* Loop to read all subsequent records to the end of the log file. */
4393 next_log_record_analyze:
4394         err = read_next_log_rec(log, lcb, &rec_lsn);
4395         if (err)
4396                 goto out;
4397
4398         if (!rec_lsn)
4399                 goto end_log_records_enumerate;
4400
4401         frh = lcb->lrh;
4402         transact_id = le32_to_cpu(frh->transact_id);
4403         rec_len = le32_to_cpu(frh->client_data_len);
4404         lrh = lcb->log_rec;
4405
4406         if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
4407                 err = -EINVAL;
4408                 goto out;
4409         }
4410
4411         /*
4412          * The first lsn after the previous lsn remembered
4413          * the checkpoint is the first candidate for the rlsn.
4414          */
4415         if (!rlsn)
4416                 rlsn = rec_lsn;
4417
4418         if (LfsClientRecord != frh->record_type)
4419                 goto next_log_record_analyze;
4420
4421         /*
4422          * Now update the Transaction Table for this transaction. If there
4423          * is no entry present or it is unallocated we allocate the entry.
4424          */
4425         if (!trtbl) {
4426                 trtbl = init_rsttbl(sizeof(struct TRANSACTION_ENTRY),
4427                                     INITIAL_NUMBER_TRANSACTIONS);
4428                 if (!trtbl) {
4429                         err = -ENOMEM;
4430                         goto out;
4431                 }
4432         }
4433
4434         tr = Add2Ptr(trtbl, transact_id);
4435
4436         if (transact_id >= bytes_per_rt(trtbl) ||
4437             tr->next != RESTART_ENTRY_ALLOCATED_LE) {
4438                 tr = alloc_rsttbl_from_idx(&trtbl, transact_id);
4439                 if (!tr) {
4440                         err = -ENOMEM;
4441                         goto out;
4442                 }
4443                 tr->transact_state = TransactionActive;
4444                 tr->first_lsn = cpu_to_le64(rec_lsn);
4445         }
4446
4447         tr->prev_lsn = tr->undo_next_lsn = cpu_to_le64(rec_lsn);
4448
4449         /*
4450          * If this is a compensation log record, then change
4451          * the undo_next_lsn to be the undo_next_lsn of this record.
4452          */
4453         if (lrh->undo_op == cpu_to_le16(CompensationLogRecord))
4454                 tr->undo_next_lsn = frh->client_undo_next_lsn;
4455
4456         /* Dispatch to handle log record depending on type. */
4457         switch (le16_to_cpu(lrh->redo_op)) {
4458         case InitializeFileRecordSegment:
4459         case DeallocateFileRecordSegment:
4460         case WriteEndOfFileRecordSegment:
4461         case CreateAttribute:
4462         case DeleteAttribute:
4463         case UpdateResidentValue:
4464         case UpdateNonresidentValue:
4465         case UpdateMappingPairs:
4466         case SetNewAttributeSizes:
4467         case AddIndexEntryRoot:
4468         case DeleteIndexEntryRoot:
4469         case AddIndexEntryAllocation:
4470         case DeleteIndexEntryAllocation:
4471         case WriteEndOfIndexBuffer:
4472         case SetIndexEntryVcnRoot:
4473         case SetIndexEntryVcnAllocation:
4474         case UpdateFileNameRoot:
4475         case UpdateFileNameAllocation:
4476         case SetBitsInNonresidentBitMap:
4477         case ClearBitsInNonresidentBitMap:
4478         case UpdateRecordDataRoot:
4479         case UpdateRecordDataAllocation:
4480         case ZeroEndOfFileRecord:
4481                 t16 = le16_to_cpu(lrh->target_attr);
4482                 t64 = le64_to_cpu(lrh->target_vcn);
4483                 dp = find_dp(dptbl, t16, t64);
4484
4485                 if (dp)
4486                         goto copy_lcns;
4487
4488                 /*
4489                  * Calculate the number of clusters per page the system
4490                  * which wrote the checkpoint, possibly creating the table.
4491                  */
4492                 if (dptbl) {
4493                         t32 = (le16_to_cpu(dptbl->size) -
4494                                sizeof(struct DIR_PAGE_ENTRY)) /
4495                               sizeof(u64);
4496                 } else {
4497                         t32 = log->clst_per_page;
4498                         kfree(dptbl);
4499                         dptbl = init_rsttbl(struct_size(dp, page_lcns, t32),
4500                                             32);
4501                         if (!dptbl) {
4502                                 err = -ENOMEM;
4503                                 goto out;
4504                         }
4505                 }
4506
4507                 dp = alloc_rsttbl_idx(&dptbl);
4508                 if (!dp) {
4509                         err = -ENOMEM;
4510                         goto out;
4511                 }
4512                 dp->target_attr = cpu_to_le32(t16);
4513                 dp->transfer_len = cpu_to_le32(t32 << sbi->cluster_bits);
4514                 dp->lcns_follow = cpu_to_le32(t32);
4515                 dp->vcn = cpu_to_le64(t64 & ~((u64)t32 - 1));
4516                 dp->oldest_lsn = cpu_to_le64(rec_lsn);
4517
4518 copy_lcns:
4519                 /*
4520                  * Copy the Lcns from the log record into the Dirty Page Entry.
4521                  * TODO: For different page size support, must somehow make
4522                  * whole routine a loop, case Lcns do not fit below.
4523                  */
4524                 t16 = le16_to_cpu(lrh->lcns_follow);
4525                 for (i = 0; i < t16; i++) {
4526                         size_t j = (size_t)(le64_to_cpu(lrh->target_vcn) -
4527                                             le64_to_cpu(dp->vcn));
4528                         dp->page_lcns[j + i] = lrh->page_lcns[i];
4529                 }
4530
4531                 goto next_log_record_analyze;
4532
4533         case DeleteDirtyClusters: {
4534                 u32 range_count =
4535                         le16_to_cpu(lrh->redo_len) / sizeof(struct LCN_RANGE);
4536                 const struct LCN_RANGE *r =
4537                         Add2Ptr(lrh, le16_to_cpu(lrh->redo_off));
4538
4539                 /* Loop through all of the Lcn ranges this log record. */
4540                 for (i = 0; i < range_count; i++, r++) {
4541                         u64 lcn0 = le64_to_cpu(r->lcn);
4542                         u64 lcn_e = lcn0 + le64_to_cpu(r->len) - 1;
4543
4544                         dp = NULL;
4545                         while ((dp = enum_rstbl(dptbl, dp))) {
4546                                 u32 j;
4547
4548                                 t32 = le32_to_cpu(dp->lcns_follow);
4549                                 for (j = 0; j < t32; j++) {
4550                                         t64 = le64_to_cpu(dp->page_lcns[j]);
4551                                         if (t64 >= lcn0 && t64 <= lcn_e)
4552                                                 dp->page_lcns[j] = 0;
4553                                 }
4554                         }
4555                 }
4556                 goto next_log_record_analyze;
4557                 ;
4558         }
4559
4560         case OpenNonresidentAttribute:
4561                 t16 = le16_to_cpu(lrh->target_attr);
4562                 if (t16 >= bytes_per_rt(oatbl)) {
4563                         /*
4564                          * Compute how big the table needs to be.
4565                          * Add 10 extra entries for some cushion.
4566                          */
4567                         u32 new_e = t16 / le16_to_cpu(oatbl->size);
4568
4569                         new_e += 10 - le16_to_cpu(oatbl->used);
4570
4571                         oatbl = extend_rsttbl(oatbl, new_e, ~0u);
4572                         log->open_attr_tbl = oatbl;
4573                         if (!oatbl) {
4574                                 err = -ENOMEM;
4575                                 goto out;
4576                         }
4577                 }
4578
4579                 /* Point to the entry being opened. */
4580                 oe = alloc_rsttbl_from_idx(&oatbl, t16);
4581                 log->open_attr_tbl = oatbl;
4582                 if (!oe) {
4583                         err = -ENOMEM;
4584                         goto out;
4585                 }
4586
4587                 /* Initialize this entry from the log record. */
4588                 t16 = le16_to_cpu(lrh->redo_off);
4589                 if (!rst->major_ver) {
4590                         /* Convert version '0' into version '1'. */
4591                         struct OPEN_ATTR_ENRTY_32 *oe0 = Add2Ptr(lrh, t16);
4592
4593                         oe->bytes_per_index = oe0->bytes_per_index;
4594                         oe->type = oe0->type;
4595                         oe->is_dirty_pages = oe0->is_dirty_pages;
4596                         oe->name_len = 0; //oe0.name_len;
4597                         oe->ref = oe0->ref;
4598                         oe->open_record_lsn = oe0->open_record_lsn;
4599                 } else {
4600                         memcpy(oe, Add2Ptr(lrh, t16), bytes_per_attr_entry);
4601                 }
4602
4603                 t16 = le16_to_cpu(lrh->undo_len);
4604                 if (t16) {
4605                         oe->ptr = kmalloc(t16, GFP_NOFS);
4606                         if (!oe->ptr) {
4607                                 err = -ENOMEM;
4608                                 goto out;
4609                         }
4610                         oe->name_len = t16 / sizeof(short);
4611                         memcpy(oe->ptr,
4612                                Add2Ptr(lrh, le16_to_cpu(lrh->undo_off)), t16);
4613                         oe->is_attr_name = 1;
4614                 } else {
4615                         oe->ptr = NULL;
4616                         oe->is_attr_name = 0;
4617                 }
4618
4619                 goto next_log_record_analyze;
4620
4621         case HotFix:
4622                 t16 = le16_to_cpu(lrh->target_attr);
4623                 t64 = le64_to_cpu(lrh->target_vcn);
4624                 dp = find_dp(dptbl, t16, t64);
4625                 if (dp) {
4626                         size_t j = le64_to_cpu(lrh->target_vcn) -
4627                                    le64_to_cpu(dp->vcn);
4628                         if (dp->page_lcns[j])
4629                                 dp->page_lcns[j] = lrh->page_lcns[0];
4630                 }
4631                 goto next_log_record_analyze;
4632
4633         case EndTopLevelAction:
4634                 tr = Add2Ptr(trtbl, transact_id);
4635                 tr->prev_lsn = cpu_to_le64(rec_lsn);
4636                 tr->undo_next_lsn = frh->client_undo_next_lsn;
4637                 goto next_log_record_analyze;
4638
4639         case PrepareTransaction:
4640                 tr = Add2Ptr(trtbl, transact_id);
4641                 tr->transact_state = TransactionPrepared;
4642                 goto next_log_record_analyze;
4643
4644         case CommitTransaction:
4645                 tr = Add2Ptr(trtbl, transact_id);
4646                 tr->transact_state = TransactionCommitted;
4647                 goto next_log_record_analyze;
4648
4649         case ForgetTransaction:
4650                 free_rsttbl_idx(trtbl, transact_id);
4651                 goto next_log_record_analyze;
4652
4653         case Noop:
4654         case OpenAttributeTableDump:
4655         case AttributeNamesDump:
4656         case DirtyPageTableDump:
4657         case TransactionTableDump:
4658                 /* The following cases require no action the Analysis Pass. */
4659                 goto next_log_record_analyze;
4660
4661         default:
4662                 /*
4663                  * All codes will be explicitly handled.
4664                  * If we see a code we do not expect, then we are trouble.
4665                  */
4666                 goto next_log_record_analyze;
4667         }
4668
4669 end_log_records_enumerate:
4670         lcb_put(lcb);
4671         lcb = NULL;
4672
4673         /*
4674          * Scan the Dirty Page Table and Transaction Table for
4675          * the lowest lsn, and return it as the Redo lsn.
4676          */
4677         dp = NULL;
4678         while ((dp = enum_rstbl(dptbl, dp))) {
4679                 t64 = le64_to_cpu(dp->oldest_lsn);
4680                 if (t64 && t64 < rlsn)
4681                         rlsn = t64;
4682         }
4683
4684         tr = NULL;
4685         while ((tr = enum_rstbl(trtbl, tr))) {
4686                 t64 = le64_to_cpu(tr->first_lsn);
4687                 if (t64 && t64 < rlsn)
4688                         rlsn = t64;
4689         }
4690
4691         /*
4692          * Only proceed if the Dirty Page Table or Transaction
4693          * table are not empty.
4694          */
4695         if ((!dptbl || !dptbl->total) && (!trtbl || !trtbl->total))
4696                 goto end_reply;
4697
4698         sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
4699         if (is_ro)
4700                 goto out;
4701
4702         /* Reopen all of the attributes with dirty pages. */
4703         oe = NULL;
4704 next_open_attribute:
4705
4706         oe = enum_rstbl(oatbl, oe);
4707         if (!oe) {
4708                 err = 0;
4709                 dp = NULL;
4710                 goto next_dirty_page;
4711         }
4712
4713         oa = kzalloc(sizeof(struct OpenAttr), GFP_NOFS);
4714         if (!oa) {
4715                 err = -ENOMEM;
4716                 goto out;
4717         }
4718
4719         inode = ntfs_iget5(sbi->sb, &oe->ref, NULL);
4720         if (IS_ERR(inode))
4721                 goto fake_attr;
4722
4723         if (is_bad_inode(inode)) {
4724                 iput(inode);
4725 fake_attr:
4726                 if (oa->ni) {
4727                         iput(&oa->ni->vfs_inode);
4728                         oa->ni = NULL;
4729                 }
4730
4731                 attr = attr_create_nonres_log(sbi, oe->type, 0, oe->ptr,
4732                                               oe->name_len, 0);
4733                 if (!attr) {
4734                         kfree(oa);
4735                         err = -ENOMEM;
4736                         goto out;
4737                 }
4738                 oa->attr = attr;
4739                 oa->run1 = &oa->run0;
4740                 goto final_oe;
4741         }
4742
4743         ni_oe = ntfs_i(inode);
4744         oa->ni = ni_oe;
4745
4746         attr = ni_find_attr(ni_oe, NULL, NULL, oe->type, oe->ptr, oe->name_len,
4747                             NULL, NULL);
4748
4749         if (!attr)
4750                 goto fake_attr;
4751
4752         t32 = le32_to_cpu(attr->size);
4753         oa->attr = kmemdup(attr, t32, GFP_NOFS);
4754         if (!oa->attr)
4755                 goto fake_attr;
4756
4757         if (!S_ISDIR(inode->i_mode)) {
4758                 if (attr->type == ATTR_DATA && !attr->name_len) {
4759                         oa->run1 = &ni_oe->file.run;
4760                         goto final_oe;
4761                 }
4762         } else {
4763                 if (attr->type == ATTR_ALLOC &&
4764                     attr->name_len == ARRAY_SIZE(I30_NAME) &&
4765                     !memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME))) {
4766                         oa->run1 = &ni_oe->dir.alloc_run;
4767                         goto final_oe;
4768                 }
4769         }
4770
4771         if (attr->non_res) {
4772                 u16 roff = le16_to_cpu(attr->nres.run_off);
4773                 CLST svcn = le64_to_cpu(attr->nres.svcn);
4774
4775                 if (roff > t32) {
4776                         kfree(oa->attr);
4777                         oa->attr = NULL;
4778                         goto fake_attr;
4779                 }
4780
4781                 err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn,
4782                                  le64_to_cpu(attr->nres.evcn), svcn,
4783                                  Add2Ptr(attr, roff), t32 - roff);
4784                 if (err < 0) {
4785                         kfree(oa->attr);
4786                         oa->attr = NULL;
4787                         goto fake_attr;
4788                 }
4789                 err = 0;
4790         }
4791         oa->run1 = &oa->run0;
4792         attr = oa->attr;
4793
4794 final_oe:
4795         if (oe->is_attr_name == 1)
4796                 kfree(oe->ptr);
4797         oe->is_attr_name = 0;
4798         oe->ptr = oa;
4799         oe->name_len = attr->name_len;
4800
4801         goto next_open_attribute;
4802
4803         /*
4804          * Now loop through the dirty page table to extract all of the Vcn/Lcn.
4805          * Mapping that we have, and insert it into the appropriate run.
4806          */
4807 next_dirty_page:
4808         dp = enum_rstbl(dptbl, dp);
4809         if (!dp)
4810                 goto do_redo_1;
4811
4812         oe = Add2Ptr(oatbl, le32_to_cpu(dp->target_attr));
4813
4814         if (oe->next != RESTART_ENTRY_ALLOCATED_LE)
4815                 goto next_dirty_page;
4816
4817         oa = oe->ptr;
4818         if (!oa)
4819                 goto next_dirty_page;
4820
4821         i = -1;
4822 next_dirty_page_vcn:
4823         i += 1;
4824         if (i >= le32_to_cpu(dp->lcns_follow))
4825                 goto next_dirty_page;
4826
4827         vcn = le64_to_cpu(dp->vcn) + i;
4828         size = (vcn + 1) << sbi->cluster_bits;
4829
4830         if (!dp->page_lcns[i])
4831                 goto next_dirty_page_vcn;
4832
4833         rno = ino_get(&oe->ref);
4834         if (rno <= MFT_REC_MIRR &&
4835             size < (MFT_REC_VOL + 1) * sbi->record_size &&
4836             oe->type == ATTR_DATA) {
4837                 goto next_dirty_page_vcn;
4838         }
4839
4840         lcn = le64_to_cpu(dp->page_lcns[i]);
4841
4842         if ((!run_lookup_entry(oa->run1, vcn, &lcn0, &len0, NULL) ||
4843              lcn0 != lcn) &&
4844             !run_add_entry(oa->run1, vcn, lcn, 1, false)) {
4845                 err = -ENOMEM;
4846                 goto out;
4847         }
4848         attr = oa->attr;
4849         t64 = le64_to_cpu(attr->nres.alloc_size);
4850         if (size > t64) {
4851                 attr->nres.valid_size = attr->nres.data_size =
4852                         attr->nres.alloc_size = cpu_to_le64(size);
4853         }
4854         goto next_dirty_page_vcn;
4855
4856 do_redo_1:
4857         /*
4858          * Perform the Redo Pass, to restore all of the dirty pages to the same
4859          * contents that they had immediately before the crash. If the dirty
4860          * page table is empty, then we can skip the entire Redo Pass.
4861          */
4862         if (!dptbl || !dptbl->total)
4863                 goto do_undo_action;
4864
4865         rec_lsn = rlsn;
4866
4867         /*
4868          * Read the record at the Redo lsn, before falling
4869          * into common code to handle each record.
4870          */
4871         err = read_log_rec_lcb(log, rlsn, lcb_ctx_next, &lcb);
4872         if (err)
4873                 goto out;
4874
4875         /*
4876          * Now loop to read all of our log records forwards, until
4877          * we hit the end of the file, cleaning up at the end.
4878          */
4879 do_action_next:
4880         frh = lcb->lrh;
4881
4882         if (LfsClientRecord != frh->record_type)
4883                 goto read_next_log_do_action;
4884
4885         transact_id = le32_to_cpu(frh->transact_id);
4886         rec_len = le32_to_cpu(frh->client_data_len);
4887         lrh = lcb->log_rec;
4888
4889         if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
4890                 err = -EINVAL;
4891                 goto out;
4892         }
4893
4894         /* Ignore log records that do not update pages. */
4895         if (lrh->lcns_follow)
4896                 goto find_dirty_page;
4897
4898         goto read_next_log_do_action;
4899
4900 find_dirty_page:
4901         t16 = le16_to_cpu(lrh->target_attr);
4902         t64 = le64_to_cpu(lrh->target_vcn);
4903         dp = find_dp(dptbl, t16, t64);
4904
4905         if (!dp)
4906                 goto read_next_log_do_action;
4907
4908         if (rec_lsn < le64_to_cpu(dp->oldest_lsn))
4909                 goto read_next_log_do_action;
4910
4911         t16 = le16_to_cpu(lrh->target_attr);
4912         if (t16 >= bytes_per_rt(oatbl)) {
4913                 err = -EINVAL;
4914                 goto out;
4915         }
4916
4917         oe = Add2Ptr(oatbl, t16);
4918
4919         if (oe->next != RESTART_ENTRY_ALLOCATED_LE) {
4920                 err = -EINVAL;
4921                 goto out;
4922         }
4923
4924         oa = oe->ptr;
4925
4926         if (!oa) {
4927                 err = -EINVAL;
4928                 goto out;
4929         }
4930         attr = oa->attr;
4931
4932         vcn = le64_to_cpu(lrh->target_vcn);
4933
4934         if (!run_lookup_entry(oa->run1, vcn, &lcn, NULL, NULL) ||
4935             lcn == SPARSE_LCN) {
4936                 goto read_next_log_do_action;
4937         }
4938
4939         /* Point to the Redo data and get its length. */
4940         data = Add2Ptr(lrh, le16_to_cpu(lrh->redo_off));
4941         dlen = le16_to_cpu(lrh->redo_len);
4942
4943         /* Shorten length by any Lcns which were deleted. */
4944         saved_len = dlen;
4945
4946         for (i = le16_to_cpu(lrh->lcns_follow); i; i--) {
4947                 size_t j;
4948                 u32 alen, voff;
4949
4950                 voff = le16_to_cpu(lrh->record_off) +
4951                        le16_to_cpu(lrh->attr_off);
4952                 voff += le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT;
4953
4954                 /* If the Vcn question is allocated, we can just get out. */
4955                 j = le64_to_cpu(lrh->target_vcn) - le64_to_cpu(dp->vcn);
4956                 if (dp->page_lcns[j + i - 1])
4957                         break;
4958
4959                 if (!saved_len)
4960                         saved_len = 1;
4961
4962                 /*
4963                  * Calculate the allocated space left relative to the
4964                  * log record Vcn, after removing this unallocated Vcn.
4965                  */
4966                 alen = (i - 1) << sbi->cluster_bits;
4967
4968                 /*
4969                  * If the update described this log record goes beyond
4970                  * the allocated space, then we will have to reduce the length.
4971                  */
4972                 if (voff >= alen)
4973                         dlen = 0;
4974                 else if (voff + dlen > alen)
4975                         dlen = alen - voff;
4976         }
4977
4978         /*
4979          * If the resulting dlen from above is now zero,
4980          * we can skip this log record.
4981          */
4982         if (!dlen && saved_len)
4983                 goto read_next_log_do_action;
4984
4985         t16 = le16_to_cpu(lrh->redo_op);
4986         if (can_skip_action(t16))
4987                 goto read_next_log_do_action;
4988
4989         /* Apply the Redo operation a common routine. */
4990         err = do_action(log, oe, lrh, t16, data, dlen, rec_len, &rec_lsn);
4991         if (err)
4992                 goto out;
4993
4994         /* Keep reading and looping back until end of file. */
4995 read_next_log_do_action:
4996         err = read_next_log_rec(log, lcb, &rec_lsn);
4997         if (!err && rec_lsn)
4998                 goto do_action_next;
4999
5000         lcb_put(lcb);
5001         lcb = NULL;
5002
5003 do_undo_action:
5004         /* Scan Transaction Table. */
5005         tr = NULL;
5006 transaction_table_next:
5007         tr = enum_rstbl(trtbl, tr);
5008         if (!tr)
5009                 goto undo_action_done;
5010
5011         if (TransactionActive != tr->transact_state || !tr->undo_next_lsn) {
5012                 free_rsttbl_idx(trtbl, PtrOffset(trtbl, tr));
5013                 goto transaction_table_next;
5014         }
5015
5016         log->transaction_id = PtrOffset(trtbl, tr);
5017         undo_next_lsn = le64_to_cpu(tr->undo_next_lsn);
5018
5019         /*
5020          * We only have to do anything if the transaction has
5021          * something its undo_next_lsn field.
5022          */
5023         if (!undo_next_lsn)
5024                 goto commit_undo;
5025
5026         /* Read the first record to be undone by this transaction. */
5027         err = read_log_rec_lcb(log, undo_next_lsn, lcb_ctx_undo_next, &lcb);
5028         if (err)
5029                 goto out;
5030
5031         /*
5032          * Now loop to read all of our log records forwards,
5033          * until we hit the end of the file, cleaning up at the end.
5034          */
5035 undo_action_next:
5036
5037         lrh = lcb->log_rec;
5038         frh = lcb->lrh;
5039         transact_id = le32_to_cpu(frh->transact_id);
5040         rec_len = le32_to_cpu(frh->client_data_len);
5041
5042         if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
5043                 err = -EINVAL;
5044                 goto out;
5045         }
5046
5047         if (lrh->undo_op == cpu_to_le16(Noop))
5048                 goto read_next_log_undo_action;
5049
5050         oe = Add2Ptr(oatbl, le16_to_cpu(lrh->target_attr));
5051         oa = oe->ptr;
5052
5053         t16 = le16_to_cpu(lrh->lcns_follow);
5054         if (!t16)
5055                 goto add_allocated_vcns;
5056
5057         is_mapped = run_lookup_entry(oa->run1, le64_to_cpu(lrh->target_vcn),
5058                                      &lcn, &clen, NULL);
5059
5060         /*
5061          * If the mapping isn't already the table or the  mapping
5062          * corresponds to a hole the mapping, we need to make sure
5063          * there is no partial page already memory.
5064          */
5065         if (is_mapped && lcn != SPARSE_LCN && clen >= t16)
5066                 goto add_allocated_vcns;
5067
5068         vcn = le64_to_cpu(lrh->target_vcn);
5069         vcn &= ~(u64)(log->clst_per_page - 1);
5070
5071 add_allocated_vcns:
5072         for (i = 0, vcn = le64_to_cpu(lrh->target_vcn),
5073             size = (vcn + 1) << sbi->cluster_bits;
5074              i < t16; i++, vcn += 1, size += sbi->cluster_size) {
5075                 attr = oa->attr;
5076                 if (!attr->non_res) {
5077                         if (size > le32_to_cpu(attr->res.data_size))
5078                                 attr->res.data_size = cpu_to_le32(size);
5079                 } else {
5080                         if (size > le64_to_cpu(attr->nres.data_size))
5081                                 attr->nres.valid_size = attr->nres.data_size =
5082                                         attr->nres.alloc_size =
5083                                                 cpu_to_le64(size);
5084                 }
5085         }
5086
5087         t16 = le16_to_cpu(lrh->undo_op);
5088         if (can_skip_action(t16))
5089                 goto read_next_log_undo_action;
5090
5091         /* Point to the Redo data and get its length. */
5092         data = Add2Ptr(lrh, le16_to_cpu(lrh->undo_off));
5093         dlen = le16_to_cpu(lrh->undo_len);
5094
5095         /* It is time to apply the undo action. */
5096         err = do_action(log, oe, lrh, t16, data, dlen, rec_len, NULL);
5097
5098 read_next_log_undo_action:
5099         /*
5100          * Keep reading and looping back until we have read the
5101          * last record for this transaction.
5102          */
5103         err = read_next_log_rec(log, lcb, &rec_lsn);
5104         if (err)
5105                 goto out;
5106
5107         if (rec_lsn)
5108                 goto undo_action_next;
5109
5110         lcb_put(lcb);
5111         lcb = NULL;
5112
5113 commit_undo:
5114         free_rsttbl_idx(trtbl, log->transaction_id);
5115
5116         log->transaction_id = 0;
5117
5118         goto transaction_table_next;
5119
5120 undo_action_done:
5121
5122         ntfs_update_mftmirr(sbi, 0);
5123
5124         sbi->flags &= ~NTFS_FLAGS_NEED_REPLAY;
5125
5126 end_reply:
5127
5128         err = 0;
5129         if (is_ro)
5130                 goto out;
5131
5132         rh = kzalloc(log->page_size, GFP_NOFS);
5133         if (!rh) {
5134                 err = -ENOMEM;
5135                 goto out;
5136         }
5137
5138         rh->rhdr.sign = NTFS_RSTR_SIGNATURE;
5139         rh->rhdr.fix_off = cpu_to_le16(offsetof(struct RESTART_HDR, fixups));
5140         t16 = (log->page_size >> SECTOR_SHIFT) + 1;
5141         rh->rhdr.fix_num = cpu_to_le16(t16);
5142         rh->sys_page_size = cpu_to_le32(log->page_size);
5143         rh->page_size = cpu_to_le32(log->page_size);
5144
5145         t16 = ALIGN(offsetof(struct RESTART_HDR, fixups) + sizeof(short) * t16,
5146                     8);
5147         rh->ra_off = cpu_to_le16(t16);
5148         rh->minor_ver = cpu_to_le16(1); // 0x1A:
5149         rh->major_ver = cpu_to_le16(1); // 0x1C:
5150
5151         ra2 = Add2Ptr(rh, t16);
5152         memcpy(ra2, ra, sizeof(struct RESTART_AREA));
5153
5154         ra2->client_idx[0] = 0;
5155         ra2->client_idx[1] = LFS_NO_CLIENT_LE;
5156         ra2->flags = cpu_to_le16(2);
5157
5158         le32_add_cpu(&ra2->open_log_count, 1);
5159
5160         ntfs_fix_pre_write(&rh->rhdr, log->page_size);
5161
5162         err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size, 0);
5163         if (!err)
5164                 err = ntfs_sb_write_run(sbi, &log->ni->file.run, log->page_size,
5165                                         rh, log->page_size, 0);
5166
5167         kfree(rh);
5168         if (err)
5169                 goto out;
5170
5171 out:
5172         kfree(rst);
5173         if (lcb)
5174                 lcb_put(lcb);
5175
5176         /*
5177          * Scan the Open Attribute Table to close all of
5178          * the open attributes.
5179          */
5180         oe = NULL;
5181         while ((oe = enum_rstbl(oatbl, oe))) {
5182                 rno = ino_get(&oe->ref);
5183
5184                 if (oe->is_attr_name == 1) {
5185                         kfree(oe->ptr);
5186                         oe->ptr = NULL;
5187                         continue;
5188                 }
5189
5190                 if (oe->is_attr_name)
5191                         continue;
5192
5193                 oa = oe->ptr;
5194                 if (!oa)
5195                         continue;
5196
5197                 run_close(&oa->run0);
5198                 kfree(oa->attr);
5199                 if (oa->ni)
5200                         iput(&oa->ni->vfs_inode);
5201                 kfree(oa);
5202         }
5203
5204         kfree(trtbl);
5205         kfree(oatbl);
5206         kfree(dptbl);
5207         kfree(attr_names);
5208         kfree(rst_info.r_page);
5209
5210         kfree(ra);
5211         kfree(log->one_page_buf);
5212
5213         if (err)
5214                 sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
5215
5216         if (err == -EROFS)
5217                 err = 0;
5218         else if (log->set_dirty)
5219                 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
5220
5221         kfree(log);
5222
5223         return err;
5224 }