5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
18 * 02/24/99 blf Created.
24 #include <linux/quotaops.h>
25 #include <linux/buffer_head.h>
26 #include <linux/bitops.h>
31 #define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr)
32 #define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
35 #define udf_find_next_one_bit(addr, size, offset) \
36 find_next_one_bit(addr, size, offset)
38 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
39 #define leNUM_to_cpup(x, y) xleNUM_to_cpup(x, y)
40 #define xleNUM_to_cpup(x, y) (le ## x ## _to_cpup(y))
41 #define uintBPL_t uint(BITS_PER_LONG)
42 #define uint(x) xuint(x)
43 #define xuint(x) __le ## x
45 static inline int find_next_one_bit(void *addr, int size, int offset)
47 uintBPL_t *p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
48 int result = offset & ~(BITS_PER_LONG - 1);
54 offset &= (BITS_PER_LONG - 1);
56 tmp = leBPL_to_cpup(p++);
57 tmp &= ~0UL << offset;
58 if (size < BITS_PER_LONG)
62 size -= BITS_PER_LONG;
63 result += BITS_PER_LONG;
65 while (size & ~(BITS_PER_LONG - 1)) {
66 tmp = leBPL_to_cpup(p++);
69 result += BITS_PER_LONG;
70 size -= BITS_PER_LONG;
74 tmp = leBPL_to_cpup(p);
76 tmp &= ~0UL >> (BITS_PER_LONG - size);
78 return result + ffz(~tmp);
81 #define find_first_one_bit(addr, size)\
82 find_next_one_bit((addr), (size), 0)
84 static int read_block_bitmap(struct super_block *sb,
85 struct udf_bitmap *bitmap, unsigned int block,
86 unsigned long bitmap_nr)
88 struct buffer_head *bh = NULL;
92 loc.logicalBlockNum = bitmap->s_extPosition;
93 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
95 bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
99 bitmap->s_block_bitmap[bitmap_nr] = bh;
103 static int __load_block_bitmap(struct super_block *sb,
104 struct udf_bitmap *bitmap,
105 unsigned int block_group)
108 int nr_groups = bitmap->s_nr_groups;
110 if (block_group >= nr_groups) {
111 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
115 if (bitmap->s_block_bitmap[block_group]) {
118 retval = read_block_bitmap(sb, bitmap, block_group,
126 static inline int load_block_bitmap(struct super_block *sb,
127 struct udf_bitmap *bitmap,
128 unsigned int block_group)
132 slot = __load_block_bitmap(sb, bitmap, block_group);
137 if (!bitmap->s_block_bitmap[slot])
143 static void udf_bitmap_free_blocks(struct super_block *sb,
145 struct udf_bitmap *bitmap,
146 kernel_lb_addr bloc, uint32_t offset,
149 struct udf_sb_info *sbi = UDF_SB(sb);
150 struct buffer_head *bh = NULL;
152 unsigned long block_group;
156 unsigned long overflow;
158 mutex_lock(&sbi->s_alloc_mutex);
159 if (bloc.logicalBlockNum < 0 ||
160 (bloc.logicalBlockNum + count) >
161 sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
162 udf_debug("%d < %d || %d + %d > %d\n",
163 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
164 sbi->s_partmaps[bloc.partitionReferenceNum].
169 block = bloc.logicalBlockNum + offset +
170 (sizeof(struct spaceBitmapDesc) << 3);
174 block_group = block >> (sb->s_blocksize_bits + 3);
175 bit = block % (sb->s_blocksize << 3);
178 * Check to see if we are freeing blocks across a group boundary.
180 if (bit + count > (sb->s_blocksize << 3)) {
181 overflow = bit + count - (sb->s_blocksize << 3);
184 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
188 bh = bitmap->s_block_bitmap[bitmap_nr];
189 for (i = 0; i < count; i++) {
190 if (udf_set_bit(bit + i, bh->b_data)) {
191 udf_debug("bit %ld already set\n", bit + i);
192 udf_debug("byte=%2x\n",
193 ((char *)bh->b_data)[(bit + i) >> 3]);
196 DQUOT_FREE_BLOCK(inode, 1);
197 if (sbi->s_lvid_bh) {
198 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
199 lvid->freeSpaceTable[sbi->s_partition] =
200 cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[sbi->s_partition]) + 1);
204 mark_buffer_dirty(bh);
213 mark_buffer_dirty(sbi->s_lvid_bh);
214 mutex_unlock(&sbi->s_alloc_mutex);
218 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
220 struct udf_bitmap *bitmap,
221 uint16_t partition, uint32_t first_block,
222 uint32_t block_count)
224 struct udf_sb_info *sbi = UDF_SB(sb);
226 int bit, block, block_group, group_start;
227 int nr_groups, bitmap_nr;
228 struct buffer_head *bh;
231 mutex_lock(&sbi->s_alloc_mutex);
232 part_len = sbi->s_partmaps[partition].s_partition_len;
233 if (first_block < 0 || first_block >= part_len)
236 if (first_block + block_count > part_len)
237 block_count = part_len - first_block;
240 nr_groups = udf_compute_nr_groups(sb, partition);
241 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
242 block_group = block >> (sb->s_blocksize_bits + 3);
243 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
245 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
248 bh = bitmap->s_block_bitmap[bitmap_nr];
250 bit = block % (sb->s_blocksize << 3);
252 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
253 if (!udf_test_bit(bit, bh->b_data)) {
255 } else if (DQUOT_PREALLOC_BLOCK(inode, 1)) {
257 } else if (!udf_clear_bit(bit, bh->b_data)) {
258 udf_debug("bit already cleared for block %d\n", bit);
259 DQUOT_FREE_BLOCK(inode, 1);
267 mark_buffer_dirty(bh);
271 if (sbi->s_lvid_bh) {
272 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
273 lvid->freeSpaceTable[partition] =
274 cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - alloc_count);
275 mark_buffer_dirty(sbi->s_lvid_bh);
278 mutex_unlock(&sbi->s_alloc_mutex);
282 static int udf_bitmap_new_block(struct super_block *sb,
284 struct udf_bitmap *bitmap, uint16_t partition,
285 uint32_t goal, int *err)
287 struct udf_sb_info *sbi = UDF_SB(sb);
288 int newbit, bit = 0, block, block_group, group_start;
289 int end_goal, nr_groups, bitmap_nr, i;
290 struct buffer_head *bh = NULL;
295 mutex_lock(&sbi->s_alloc_mutex);
298 if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
301 nr_groups = bitmap->s_nr_groups;
302 block = goal + (sizeof(struct spaceBitmapDesc) << 3);
303 block_group = block >> (sb->s_blocksize_bits + 3);
304 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
306 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
309 bh = bitmap->s_block_bitmap[bitmap_nr];
310 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
311 sb->s_blocksize - group_start);
313 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
314 bit = block % (sb->s_blocksize << 3);
315 if (udf_test_bit(bit, bh->b_data))
318 end_goal = (bit + 63) & ~63;
319 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
323 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
324 sb->s_blocksize - ((bit + 7) >> 3));
325 newbit = (ptr - ((char *)bh->b_data)) << 3;
326 if (newbit < sb->s_blocksize << 3) {
331 newbit = udf_find_next_one_bit(bh->b_data,
332 sb->s_blocksize << 3, bit);
333 if (newbit < sb->s_blocksize << 3) {
339 for (i = 0; i < (nr_groups * 2); i++) {
341 if (block_group >= nr_groups)
343 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
345 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
348 bh = bitmap->s_block_bitmap[bitmap_nr];
350 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
351 sb->s_blocksize - group_start);
352 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
353 bit = (ptr - ((char *)bh->b_data)) << 3;
357 bit = udf_find_next_one_bit((char *)bh->b_data,
358 sb->s_blocksize << 3,
360 if (bit < sb->s_blocksize << 3)
364 if (i >= (nr_groups * 2)) {
365 mutex_unlock(&sbi->s_alloc_mutex);
368 if (bit < sb->s_blocksize << 3)
371 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
373 if (bit >= sb->s_blocksize << 3) {
374 mutex_unlock(&sbi->s_alloc_mutex);
380 while (i < 7 && bit > (group_start << 3) &&
381 udf_test_bit(bit - 1, bh->b_data)) {
389 * Check quota for allocation of this block.
391 if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
392 mutex_unlock(&sbi->s_alloc_mutex);
397 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
398 (sizeof(struct spaceBitmapDesc) << 3);
400 if (!udf_clear_bit(bit, bh->b_data)) {
401 udf_debug("bit already cleared for block %d\n", bit);
405 mark_buffer_dirty(bh);
407 if (sbi->s_lvid_bh) {
408 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
409 lvid->freeSpaceTable[partition] =
410 cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - 1);
411 mark_buffer_dirty(sbi->s_lvid_bh);
414 mutex_unlock(&sbi->s_alloc_mutex);
420 mutex_unlock(&sbi->s_alloc_mutex);
424 static void udf_table_free_blocks(struct super_block *sb,
427 kernel_lb_addr bloc, uint32_t offset,
430 struct udf_sb_info *sbi = UDF_SB(sb);
434 struct extent_position oepos, epos;
438 mutex_lock(&sbi->s_alloc_mutex);
439 if (bloc.logicalBlockNum < 0 ||
440 (bloc.logicalBlockNum + count) >
441 sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
442 udf_debug("%d < %d || %d + %d > %d\n",
443 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
444 sbi->s_partmaps[bloc.partitionReferenceNum].
449 /* We do this up front - There are some error conditions that
450 could occure, but.. oh well */
452 DQUOT_FREE_BLOCK(inode, count);
453 if (sbi->s_lvid_bh) {
454 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
455 lvid->freeSpaceTable[sbi->s_partition] =
456 cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[sbi->s_partition]) + count);
457 mark_buffer_dirty(sbi->s_lvid_bh);
460 start = bloc.logicalBlockNum + offset;
461 end = bloc.logicalBlockNum + offset + count - 1;
463 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
465 epos.block = oepos.block = UDF_I_LOCATION(table);
466 epos.bh = oepos.bh = NULL;
469 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
470 if (((eloc.logicalBlockNum +
471 (elen >> sb->s_blocksize_bits)) == start)) {
472 if ((0x3FFFFFFF - elen) <
473 (count << sb->s_blocksize_bits)) {
474 uint32_t tmp = ((0x3FFFFFFF - elen) >>
475 sb->s_blocksize_bits);
478 elen = (etype << 30) |
479 (0x40000000 - sb->s_blocksize);
481 elen = (etype << 30) |
483 (count << sb->s_blocksize_bits));
487 udf_write_aext(table, &oepos, eloc, elen, 1);
488 } else if (eloc.logicalBlockNum == (end + 1)) {
489 if ((0x3FFFFFFF - elen) <
490 (count << sb->s_blocksize_bits)) {
491 uint32_t tmp = ((0x3FFFFFFF - elen) >>
492 sb->s_blocksize_bits);
495 eloc.logicalBlockNum -= tmp;
496 elen = (etype << 30) |
497 (0x40000000 - sb->s_blocksize);
499 eloc.logicalBlockNum = start;
500 elen = (etype << 30) |
502 (count << sb->s_blocksize_bits));
506 udf_write_aext(table, &oepos, eloc, elen, 1);
509 if (epos.bh != oepos.bh) {
511 oepos.block = epos.block;
517 oepos.offset = epos.offset;
523 * NOTE: we CANNOT use udf_add_aext here, as it can try to
524 * allocate a new block, and since we hold the super block
525 * lock already very bad things would happen :)
527 * We copy the behavior of udf_add_aext, but instead of
528 * trying to allocate a new block close to the existing one,
529 * we just steal a block from the extent we are trying to add.
531 * It would be nice if the blocks were close together, but it
536 short_ad *sad = NULL;
538 struct allocExtDesc *aed;
540 eloc.logicalBlockNum = start;
541 elen = EXT_RECORDED_ALLOCATED |
542 (count << sb->s_blocksize_bits);
544 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) {
545 adsize = sizeof(short_ad);
546 } else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) {
547 adsize = sizeof(long_ad);
554 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
561 /* Steal a block from the extent being free'd */
562 epos.block.logicalBlockNum = eloc.logicalBlockNum;
563 eloc.logicalBlockNum++;
564 elen -= sb->s_blocksize;
566 epos.bh = udf_tread(sb,
567 udf_get_lb_pblock(sb, epos.block, 0));
572 aed = (struct allocExtDesc *)(epos.bh->b_data);
573 aed->previousAllocExtLocation =
574 cpu_to_le32(oepos.block.logicalBlockNum);
575 if (epos.offset + adsize > sb->s_blocksize) {
576 loffset = epos.offset;
577 aed->lengthAllocDescs = cpu_to_le32(adsize);
578 sptr = UDF_I_DATA(table) + epos.offset - adsize;
579 dptr = epos.bh->b_data +
580 sizeof(struct allocExtDesc);
581 memcpy(dptr, sptr, adsize);
582 epos.offset = sizeof(struct allocExtDesc) +
585 loffset = epos.offset + adsize;
586 aed->lengthAllocDescs = cpu_to_le32(0);
588 sptr = oepos.bh->b_data + epos.offset;
589 aed = (struct allocExtDesc *)
591 aed->lengthAllocDescs =
592 cpu_to_le32(le32_to_cpu(
593 aed->lengthAllocDescs) +
596 sptr = UDF_I_DATA(table) + epos.offset;
597 UDF_I_LENALLOC(table) += adsize;
598 mark_inode_dirty(table);
600 epos.offset = sizeof(struct allocExtDesc);
602 if (sbi->s_udfrev >= 0x0200)
603 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
604 3, 1, epos.block.logicalBlockNum,
607 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
608 2, 1, epos.block.logicalBlockNum,
611 switch (UDF_I_ALLOCTYPE(table)) {
612 case ICBTAG_FLAG_AD_SHORT:
613 sad = (short_ad *)sptr;
614 sad->extLength = cpu_to_le32(
615 EXT_NEXT_EXTENT_ALLOCDECS |
618 cpu_to_le32(epos.block.logicalBlockNum);
620 case ICBTAG_FLAG_AD_LONG:
621 lad = (long_ad *)sptr;
622 lad->extLength = cpu_to_le32(
623 EXT_NEXT_EXTENT_ALLOCDECS |
626 cpu_to_lelb(epos.block);
630 udf_update_tag(oepos.bh->b_data, loffset);
631 mark_buffer_dirty(oepos.bh);
633 mark_inode_dirty(table);
637 /* It's possible that stealing the block emptied the extent */
639 udf_write_aext(table, &epos, eloc, elen, 1);
642 UDF_I_LENALLOC(table) += adsize;
643 mark_inode_dirty(table);
645 aed = (struct allocExtDesc *)epos.bh->b_data;
646 aed->lengthAllocDescs =
647 cpu_to_le32(le32_to_cpu(
648 aed->lengthAllocDescs) + adsize);
649 udf_update_tag(epos.bh->b_data, epos.offset);
650 mark_buffer_dirty(epos.bh);
660 mutex_unlock(&sbi->s_alloc_mutex);
664 static int udf_table_prealloc_blocks(struct super_block *sb,
666 struct inode *table, uint16_t partition,
667 uint32_t first_block, uint32_t block_count)
669 struct udf_sb_info *sbi = UDF_SB(sb);
671 uint32_t elen, adsize;
673 struct extent_position epos;
676 if (first_block < 0 ||
677 first_block >= sbi->s_partmaps[partition].s_partition_len)
680 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
681 adsize = sizeof(short_ad);
682 else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
683 adsize = sizeof(long_ad);
687 mutex_lock(&sbi->s_alloc_mutex);
688 epos.offset = sizeof(struct unallocSpaceEntry);
689 epos.block = UDF_I_LOCATION(table);
691 eloc.logicalBlockNum = 0xFFFFFFFF;
693 while (first_block != eloc.logicalBlockNum &&
694 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
695 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
696 eloc.logicalBlockNum, elen, first_block);
697 ; /* empty loop body */
700 if (first_block == eloc.logicalBlockNum) {
701 epos.offset -= adsize;
703 alloc_count = (elen >> sb->s_blocksize_bits);
704 if (inode && DQUOT_PREALLOC_BLOCK(inode,
705 alloc_count > block_count ? block_count : alloc_count))
707 else if (alloc_count > block_count) {
708 alloc_count = block_count;
709 eloc.logicalBlockNum += alloc_count;
710 elen -= (alloc_count << sb->s_blocksize_bits);
711 udf_write_aext(table, &epos, eloc,
712 (etype << 30) | elen, 1);
714 udf_delete_aext(table, epos, eloc,
715 (etype << 30) | elen);
722 if (alloc_count && sbi->s_lvid_bh) {
723 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
724 lvid->freeSpaceTable[partition] =
725 cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - alloc_count);
726 mark_buffer_dirty(sbi->s_lvid_bh);
729 mutex_unlock(&sbi->s_alloc_mutex);
733 static int udf_table_new_block(struct super_block *sb,
735 struct inode *table, uint16_t partition,
736 uint32_t goal, int *err)
738 struct udf_sb_info *sbi = UDF_SB(sb);
739 uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
740 uint32_t newblock = 0, adsize;
741 uint32_t elen, goal_elen = 0;
742 kernel_lb_addr eloc, uninitialized_var(goal_eloc);
743 struct extent_position epos, goal_epos;
748 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
749 adsize = sizeof(short_ad);
750 else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
751 adsize = sizeof(long_ad);
755 mutex_lock(&sbi->s_alloc_mutex);
756 if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
759 /* We search for the closest matching block to goal. If we find
760 a exact hit, we stop. Otherwise we keep going till we run out
761 of extents. We store the buffer_head, bloc, and extoffset
762 of the current closest match and use that when we are done.
764 epos.offset = sizeof(struct unallocSpaceEntry);
765 epos.block = UDF_I_LOCATION(table);
766 epos.bh = goal_epos.bh = NULL;
769 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
770 if (goal >= eloc.logicalBlockNum) {
771 if (goal < eloc.logicalBlockNum +
772 (elen >> sb->s_blocksize_bits))
775 nspread = goal - eloc.logicalBlockNum -
776 (elen >> sb->s_blocksize_bits);
778 nspread = eloc.logicalBlockNum - goal;
781 if (nspread < spread) {
783 if (goal_epos.bh != epos.bh) {
784 brelse(goal_epos.bh);
785 goal_epos.bh = epos.bh;
786 get_bh(goal_epos.bh);
788 goal_epos.block = epos.block;
789 goal_epos.offset = epos.offset - adsize;
791 goal_elen = (etype << 30) | elen;
797 if (spread == 0xFFFFFFFF) {
798 brelse(goal_epos.bh);
799 mutex_unlock(&sbi->s_alloc_mutex);
803 /* Only allocate blocks from the beginning of the extent.
804 That way, we only delete (empty) extents, never have to insert an
805 extent because of splitting */
806 /* This works, but very poorly.... */
808 newblock = goal_eloc.logicalBlockNum;
809 goal_eloc.logicalBlockNum++;
810 goal_elen -= sb->s_blocksize;
812 if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
813 brelse(goal_epos.bh);
814 mutex_unlock(&sbi->s_alloc_mutex);
820 udf_write_aext(table, &goal_epos, goal_eloc, goal_elen, 1);
822 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
823 brelse(goal_epos.bh);
825 if (sbi->s_lvid_bh) {
826 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
827 lvid->freeSpaceTable[partition] =
828 cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - 1);
829 mark_buffer_dirty(sbi->s_lvid_bh);
833 mutex_unlock(&sbi->s_alloc_mutex);
838 inline void udf_free_blocks(struct super_block *sb,
840 kernel_lb_addr bloc, uint32_t offset,
843 uint16_t partition = bloc.partitionReferenceNum;
844 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
846 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
847 return udf_bitmap_free_blocks(sb, inode,
848 map->s_uspace.s_bitmap,
849 bloc, offset, count);
850 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
851 return udf_table_free_blocks(sb, inode,
852 map->s_uspace.s_table,
853 bloc, offset, count);
854 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
855 return udf_bitmap_free_blocks(sb, inode,
856 map->s_fspace.s_bitmap,
857 bloc, offset, count);
858 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
859 return udf_table_free_blocks(sb, inode,
860 map->s_fspace.s_table,
861 bloc, offset, count);
867 inline int udf_prealloc_blocks(struct super_block *sb,
869 uint16_t partition, uint32_t first_block,
870 uint32_t block_count)
872 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
874 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
875 return udf_bitmap_prealloc_blocks(sb, inode,
876 map->s_uspace.s_bitmap,
877 partition, first_block,
879 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
880 return udf_table_prealloc_blocks(sb, inode,
881 map->s_uspace.s_table,
882 partition, first_block,
884 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
885 return udf_bitmap_prealloc_blocks(sb, inode,
886 map->s_fspace.s_bitmap,
887 partition, first_block,
889 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
890 return udf_table_prealloc_blocks(sb, inode,
891 map->s_fspace.s_table,
892 partition, first_block,
898 inline int udf_new_block(struct super_block *sb,
900 uint16_t partition, uint32_t goal, int *err)
902 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
904 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
905 return udf_bitmap_new_block(sb, inode,
906 map->s_uspace.s_bitmap,
907 partition, goal, err);
908 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
909 return udf_table_new_block(sb, inode,
910 map->s_uspace.s_table,
911 partition, goal, err);
912 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
913 return udf_bitmap_new_block(sb, inode,
914 map->s_fspace.s_bitmap,
915 partition, goal, err);
916 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
917 return udf_table_new_block(sb, inode,
918 map->s_fspace.s_table,
919 partition, goal, err);