2 * linux/fs/hpfs/anode.c
4 * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
6 * handling HPFS anode tree that contains file allocation info
11 /* Find a sector in allocation tree */
13 secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
14 struct bplus_header *btree, unsigned sec,
15 struct buffer_head *bh)
22 if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
23 if (btree->internal) {
24 for (i = 0; i < btree->n_used_nodes; i++)
25 if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
26 a = le32_to_cpu(btree->u.internal[i].down);
28 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
29 btree = &anode->btree;
32 hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a);
36 for (i = 0; i < btree->n_used_nodes; i++)
37 if (le32_to_cpu(btree->u.external[i].file_secno) <= sec &&
38 le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) {
39 a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno);
40 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) {
45 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
46 hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno);
47 hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno);
48 hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length);
53 hpfs_error(s, "sector %08x not found in external anode %08x", sec, a);
58 /* Add a sector to tree */
60 secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno)
62 struct bplus_header *btree;
63 struct anode *anode = NULL, *ranode = NULL;
65 anode_secno a, na = -1, ra, up = -1;
67 struct buffer_head *bh, *bh1, *bh2;
72 if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1;
73 btree = &fnode->btree;
75 if (!(anode = hpfs_map_anode(s, node, &bh))) return -1;
76 btree = &anode->btree;
80 if ((n = btree->n_used_nodes - 1) < -!!fnod) {
81 hpfs_error(s, "anode %08x has no entries", a);
85 if (btree->internal) {
86 a = le32_to_cpu(btree->u.internal[n].down);
87 btree->u.internal[n].file_secno = cpu_to_le32(-1);
88 mark_buffer_dirty(bh);
90 if (hpfs_sb(s)->sb_chk)
91 if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1;
92 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
93 btree = &anode->btree;
97 if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) {
98 hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x",
99 le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno,
104 if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) {
105 btree->u.external[n].length = cpu_to_le32(le32_to_cpu(btree->u.external[n].length) + 1);
106 mark_buffer_dirty(bh);
112 hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno);
116 se = !fnod ? node : (node + 16384) & ~16383;
118 if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) {
122 fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length);
123 if (!btree->n_free_nodes) {
124 up = a != node ? le32_to_cpu(anode->up) : -1;
125 if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) {
127 hpfs_free_sectors(s, se, 1);
130 if (a == node && fnod) {
131 anode->up = cpu_to_le32(node);
132 anode->btree.fnode_parent = 1;
133 anode->btree.n_used_nodes = btree->n_used_nodes;
134 anode->btree.first_free = btree->first_free;
135 anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes;
136 memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12);
138 btree->n_free_nodes = 11;
139 btree->n_used_nodes = 1;
140 btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
141 btree->u.internal[0].file_secno = cpu_to_le32(-1);
142 btree->u.internal[0].down = cpu_to_le32(na);
143 mark_buffer_dirty(bh);
144 } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) {
147 hpfs_free_sectors(s, se, 1);
148 hpfs_free_sectors(s, na, 1);
153 btree = &anode->btree;
155 btree->n_free_nodes--; n = btree->n_used_nodes++;
156 btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 12);
157 btree->u.external[n].disk_secno = cpu_to_le32(se);
158 btree->u.external[n].file_secno = cpu_to_le32(fs);
159 btree->u.external[n].length = cpu_to_le32(1);
160 mark_buffer_dirty(bh);
162 if ((a == node && fnod) || na == -1) return se;
164 while (up != (anode_secno)-1) {
165 struct anode *new_anode;
166 if (hpfs_sb(s)->sb_chk)
167 if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
168 if (up != node || !fnod) {
169 if (!(anode = hpfs_map_anode(s, up, &bh))) return -1;
170 btree = &anode->btree;
172 if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1;
173 btree = &fnode->btree;
175 if (btree->n_free_nodes) {
176 btree->n_free_nodes--; n = btree->n_used_nodes++;
177 btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 8);
178 btree->u.internal[n].file_secno = cpu_to_le32(-1);
179 btree->u.internal[n].down = cpu_to_le32(na);
180 btree->u.internal[n-1].file_secno = cpu_to_le32(fs);
181 mark_buffer_dirty(bh);
184 hpfs_free_sectors(s, ra, 1);
185 if ((anode = hpfs_map_anode(s, na, &bh))) {
186 anode->up = cpu_to_le32(up);
187 anode->btree.fnode_parent = up == node && fnod;
188 mark_buffer_dirty(bh);
193 up = up != node ? le32_to_cpu(anode->up) : -1;
194 btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1);
195 mark_buffer_dirty(bh);
198 if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
200 /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
201 anode->btree.internal = 1;
202 anode->btree.n_used_nodes = 1;
203 anode->btree.n_free_nodes = 59;
204 anode->btree.first_free = cpu_to_le16(16);
205 anode->btree.u.internal[0].down = cpu_to_le32(a);
206 anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
207 mark_buffer_dirty(bh);
209 if ((anode = hpfs_map_anode(s, a, &bh))) {
210 anode->up = cpu_to_le32(na);
211 mark_buffer_dirty(bh);
216 if ((anode = hpfs_map_anode(s, na, &bh))) {
217 anode->up = cpu_to_le32(node);
218 if (fnod) anode->btree.fnode_parent = 1;
219 mark_buffer_dirty(bh);
223 if (!(anode = hpfs_map_anode(s, node, &bh))) {
227 btree = &anode->btree;
229 if (!(fnode = hpfs_map_fnode(s, node, &bh))) {
233 btree = &fnode->btree;
235 ranode->up = cpu_to_le32(node);
236 memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
237 if (fnod) ranode->btree.fnode_parent = 1;
238 ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes;
239 if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
241 if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
242 unode->up = cpu_to_le32(ra);
243 unode->btree.fnode_parent = 0;
244 mark_buffer_dirty(bh1);
249 btree->n_free_nodes = fnod ? 10 : 58;
250 btree->n_used_nodes = 2;
251 btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
252 btree->u.internal[0].file_secno = cpu_to_le32(fs);
253 btree->u.internal[0].down = cpu_to_le32(ra);
254 btree->u.internal[1].file_secno = cpu_to_le32(-1);
255 btree->u.internal[1].down = cpu_to_le32(na);
256 mark_buffer_dirty(bh);
258 mark_buffer_dirty(bh2);
264 * Remove allocation tree. Recursion would look much nicer but
265 * I want to avoid it because it can cause stack overflow.
268 void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
270 struct bplus_header *btree1 = btree;
271 struct anode *anode = NULL;
272 anode_secno ano = 0, oano;
273 struct buffer_head *bh;
281 while (btree1->internal) {
282 ano = le32_to_cpu(btree1->u.internal[pos].down);
283 if (level) brelse(bh);
284 if (hpfs_sb(s)->sb_chk)
285 if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
287 if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
288 btree1 = &anode->btree;
292 for (i = 0; i < btree1->n_used_nodes; i++)
293 hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length));
297 if (hpfs_sb(s)->sb_chk)
298 if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return;
299 hpfs_free_sectors(s, ano, 1);
301 ano = le32_to_cpu(anode->up);
303 if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
304 btree1 = &anode->btree;
305 } else btree1 = btree;
306 for (i = 0; i < btree1->n_used_nodes; i++) {
307 if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
308 if ((pos = i + 1) < btree1->n_used_nodes)
315 "reference to anode %08x not found in anode %08x "
316 "(probably bad up pointer)",
317 oano, level ? ano : -1);
322 /* Just a wrapper around hpfs_bplus_lookup .. used for reading eas */
324 static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec)
327 struct buffer_head *bh;
328 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
329 return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh);
332 int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
333 unsigned len, char *buf)
335 struct buffer_head *bh;
341 if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
343 } else sec = a + (pos >> 9);
344 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #1")) return -1;
345 if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
347 l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
348 memcpy(buf, data + (pos & 0x1ff), l);
350 buf += l; pos += l; len -= l;
355 int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos,
356 unsigned len, const char *buf)
358 struct buffer_head *bh;
364 if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
366 } else sec = a + (pos >> 9);
367 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #2")) return -1;
368 if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
370 l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
371 memcpy(data + (pos & 0x1ff), buf, l);
372 mark_buffer_dirty(bh);
374 buf += l; pos += l; len -= l;
379 void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len)
382 struct buffer_head *bh;
384 if (!(anode = hpfs_map_anode(s, a, &bh))) return;
385 hpfs_remove_btree(s, &anode->btree);
387 hpfs_free_sectors(s, a, 1);
388 } else hpfs_free_sectors(s, a, (len + 511) >> 9);
391 /* Truncate allocation tree. Doesn't join anodes - I hope it doesn't matter */
393 void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
397 struct buffer_head *bh;
398 struct bplus_header *btree;
399 anode_secno node = f;
403 if (!(fnode = hpfs_map_fnode(s, f, &bh))) return;
404 btree = &fnode->btree;
406 if (!(anode = hpfs_map_anode(s, f, &bh))) return;
407 btree = &anode->btree;
410 hpfs_remove_btree(s, btree);
412 btree->n_free_nodes = 8;
413 btree->n_used_nodes = 0;
414 btree->first_free = cpu_to_le16(8);
416 mark_buffer_dirty(bh);
417 } else hpfs_free_sectors(s, f, 1);
421 while (btree->internal) {
422 nodes = btree->n_used_nodes + btree->n_free_nodes;
423 for (i = 0; i < btree->n_used_nodes; i++)
424 if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
426 hpfs_error(s, "internal btree %08x doesn't end with -1", node);
429 for (j = i + 1; j < btree->n_used_nodes; j++)
430 hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0);
431 btree->n_used_nodes = i + 1;
432 btree->n_free_nodes = nodes - btree->n_used_nodes;
433 btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes);
434 mark_buffer_dirty(bh);
435 if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) {
439 node = le32_to_cpu(btree->u.internal[i].down);
441 if (hpfs_sb(s)->sb_chk)
442 if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
444 if (!(anode = hpfs_map_anode(s, node, &bh))) return;
445 btree = &anode->btree;
447 nodes = btree->n_used_nodes + btree->n_free_nodes;
448 for (i = 0; i < btree->n_used_nodes; i++)
449 if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff;
453 if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) {
454 hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs);
457 else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) {
458 hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs -
459 le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length)
460 - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */
461 btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno));
463 for (j = i + 1; j < btree->n_used_nodes; j++)
464 hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length));
465 btree->n_used_nodes = i + 1;
466 btree->n_free_nodes = nodes - btree->n_used_nodes;
467 btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes);
468 mark_buffer_dirty(bh);
472 /* Remove file or directory and it's eas - note that directory must
473 be empty when this is called. */
475 void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
477 struct buffer_head *bh;
479 struct extended_attribute *ea;
480 struct extended_attribute *ea_end;
481 if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
482 if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree);
483 else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
484 ea_end = fnode_end_ea(fnode);
485 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
487 hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
488 hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l));
490 hpfs_free_sectors(s, fno, 1);