Merge branch 'nfsd-next' of git://linux-nfs.org/~bfields/linux
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / udf / balloc.c
1 /*
2  * balloc.c
3  *
4  * PURPOSE
5  *      Block allocation handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *      This file is distributed under the terms of the GNU General Public
9  *      License (GPL). Copies of the GPL can be obtained from:
10  *              ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *      Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1999-2001 Ben Fennema
14  *  (C) 1999 Stelias Computing Inc
15  *
16  * HISTORY
17  *
18  *  02/24/99 blf  Created.
19  *
20  */
21
22 #include "udfdecl.h"
23
24 #include <linux/buffer_head.h>
25 #include <linux/bitops.h>
26
27 #include "udf_i.h"
28 #include "udf_sb.h"
29
30 #define udf_clear_bit   __test_and_clear_bit_le
31 #define udf_set_bit     __test_and_set_bit_le
32 #define udf_test_bit    test_bit_le
33 #define udf_find_next_one_bit   find_next_bit_le
34
35 static int read_block_bitmap(struct super_block *sb,
36                              struct udf_bitmap *bitmap, unsigned int block,
37                              unsigned long bitmap_nr)
38 {
39         struct buffer_head *bh = NULL;
40         int retval = 0;
41         struct kernel_lb_addr loc;
42
43         loc.logicalBlockNum = bitmap->s_extPosition;
44         loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
45
46         bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
47         if (!bh)
48                 retval = -EIO;
49
50         bitmap->s_block_bitmap[bitmap_nr] = bh;
51         return retval;
52 }
53
54 static int __load_block_bitmap(struct super_block *sb,
55                                struct udf_bitmap *bitmap,
56                                unsigned int block_group)
57 {
58         int retval = 0;
59         int nr_groups = bitmap->s_nr_groups;
60
61         if (block_group >= nr_groups) {
62                 udf_debug("block_group (%d) > nr_groups (%d)\n",
63                           block_group, nr_groups);
64         }
65
66         if (bitmap->s_block_bitmap[block_group]) {
67                 return block_group;
68         } else {
69                 retval = read_block_bitmap(sb, bitmap, block_group,
70                                            block_group);
71                 if (retval < 0)
72                         return retval;
73                 return block_group;
74         }
75 }
76
77 static inline int load_block_bitmap(struct super_block *sb,
78                                     struct udf_bitmap *bitmap,
79                                     unsigned int block_group)
80 {
81         int slot;
82
83         slot = __load_block_bitmap(sb, bitmap, block_group);
84
85         if (slot < 0)
86                 return slot;
87
88         if (!bitmap->s_block_bitmap[slot])
89                 return -EIO;
90
91         return slot;
92 }
93
94 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
95 {
96         struct udf_sb_info *sbi = UDF_SB(sb);
97         struct logicalVolIntegrityDesc *lvid;
98
99         if (!sbi->s_lvid_bh)
100                 return;
101
102         lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
103         le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
104         udf_updated_lvid(sb);
105 }
106
107 static void udf_bitmap_free_blocks(struct super_block *sb,
108                                    struct udf_bitmap *bitmap,
109                                    struct kernel_lb_addr *bloc,
110                                    uint32_t offset,
111                                    uint32_t count)
112 {
113         struct udf_sb_info *sbi = UDF_SB(sb);
114         struct buffer_head *bh = NULL;
115         struct udf_part_map *partmap;
116         unsigned long block;
117         unsigned long block_group;
118         unsigned long bit;
119         unsigned long i;
120         int bitmap_nr;
121         unsigned long overflow;
122
123         mutex_lock(&sbi->s_alloc_mutex);
124         partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
125         if (bloc->logicalBlockNum + count < count ||
126             (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
127                 udf_debug("%d < %d || %d + %d > %d\n",
128                           bloc->logicalBlockNum, 0,
129                           bloc->logicalBlockNum, count,
130                           partmap->s_partition_len);
131                 goto error_return;
132         }
133
134         block = bloc->logicalBlockNum + offset +
135                 (sizeof(struct spaceBitmapDesc) << 3);
136
137         do {
138                 overflow = 0;
139                 block_group = block >> (sb->s_blocksize_bits + 3);
140                 bit = block % (sb->s_blocksize << 3);
141
142                 /*
143                 * Check to see if we are freeing blocks across a group boundary.
144                 */
145                 if (bit + count > (sb->s_blocksize << 3)) {
146                         overflow = bit + count - (sb->s_blocksize << 3);
147                         count -= overflow;
148                 }
149                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
150                 if (bitmap_nr < 0)
151                         goto error_return;
152
153                 bh = bitmap->s_block_bitmap[bitmap_nr];
154                 for (i = 0; i < count; i++) {
155                         if (udf_set_bit(bit + i, bh->b_data)) {
156                                 udf_debug("bit %ld already set\n", bit + i);
157                                 udf_debug("byte=%2x\n",
158                                           ((char *)bh->b_data)[(bit + i) >> 3]);
159                         }
160                 }
161                 udf_add_free_space(sb, sbi->s_partition, count);
162                 mark_buffer_dirty(bh);
163                 if (overflow) {
164                         block += count;
165                         count = overflow;
166                 }
167         } while (overflow);
168
169 error_return:
170         mutex_unlock(&sbi->s_alloc_mutex);
171 }
172
173 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
174                                       struct udf_bitmap *bitmap,
175                                       uint16_t partition, uint32_t first_block,
176                                       uint32_t block_count)
177 {
178         struct udf_sb_info *sbi = UDF_SB(sb);
179         int alloc_count = 0;
180         int bit, block, block_group, group_start;
181         int nr_groups, bitmap_nr;
182         struct buffer_head *bh;
183         __u32 part_len;
184
185         mutex_lock(&sbi->s_alloc_mutex);
186         part_len = sbi->s_partmaps[partition].s_partition_len;
187         if (first_block >= part_len)
188                 goto out;
189
190         if (first_block + block_count > part_len)
191                 block_count = part_len - first_block;
192
193         do {
194                 nr_groups = udf_compute_nr_groups(sb, partition);
195                 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
196                 block_group = block >> (sb->s_blocksize_bits + 3);
197                 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
198
199                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
200                 if (bitmap_nr < 0)
201                         goto out;
202                 bh = bitmap->s_block_bitmap[bitmap_nr];
203
204                 bit = block % (sb->s_blocksize << 3);
205
206                 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
207                         if (!udf_clear_bit(bit, bh->b_data))
208                                 goto out;
209                         block_count--;
210                         alloc_count++;
211                         bit++;
212                         block++;
213                 }
214                 mark_buffer_dirty(bh);
215         } while (block_count > 0);
216
217 out:
218         udf_add_free_space(sb, partition, -alloc_count);
219         mutex_unlock(&sbi->s_alloc_mutex);
220         return alloc_count;
221 }
222
223 static int udf_bitmap_new_block(struct super_block *sb,
224                                 struct udf_bitmap *bitmap, uint16_t partition,
225                                 uint32_t goal, int *err)
226 {
227         struct udf_sb_info *sbi = UDF_SB(sb);
228         int newbit, bit = 0, block, block_group, group_start;
229         int end_goal, nr_groups, bitmap_nr, i;
230         struct buffer_head *bh = NULL;
231         char *ptr;
232         int newblock = 0;
233
234         *err = -ENOSPC;
235         mutex_lock(&sbi->s_alloc_mutex);
236
237 repeat:
238         if (goal >= sbi->s_partmaps[partition].s_partition_len)
239                 goal = 0;
240
241         nr_groups = bitmap->s_nr_groups;
242         block = goal + (sizeof(struct spaceBitmapDesc) << 3);
243         block_group = block >> (sb->s_blocksize_bits + 3);
244         group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
245
246         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
247         if (bitmap_nr < 0)
248                 goto error_return;
249         bh = bitmap->s_block_bitmap[bitmap_nr];
250         ptr = memscan((char *)bh->b_data + group_start, 0xFF,
251                       sb->s_blocksize - group_start);
252
253         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
254                 bit = block % (sb->s_blocksize << 3);
255                 if (udf_test_bit(bit, bh->b_data))
256                         goto got_block;
257
258                 end_goal = (bit + 63) & ~63;
259                 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
260                 if (bit < end_goal)
261                         goto got_block;
262
263                 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
264                               sb->s_blocksize - ((bit + 7) >> 3));
265                 newbit = (ptr - ((char *)bh->b_data)) << 3;
266                 if (newbit < sb->s_blocksize << 3) {
267                         bit = newbit;
268                         goto search_back;
269                 }
270
271                 newbit = udf_find_next_one_bit(bh->b_data,
272                                                sb->s_blocksize << 3, bit);
273                 if (newbit < sb->s_blocksize << 3) {
274                         bit = newbit;
275                         goto got_block;
276                 }
277         }
278
279         for (i = 0; i < (nr_groups * 2); i++) {
280                 block_group++;
281                 if (block_group >= nr_groups)
282                         block_group = 0;
283                 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
284
285                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
286                 if (bitmap_nr < 0)
287                         goto error_return;
288                 bh = bitmap->s_block_bitmap[bitmap_nr];
289                 if (i < nr_groups) {
290                         ptr = memscan((char *)bh->b_data + group_start, 0xFF,
291                                       sb->s_blocksize - group_start);
292                         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
293                                 bit = (ptr - ((char *)bh->b_data)) << 3;
294                                 break;
295                         }
296                 } else {
297                         bit = udf_find_next_one_bit(bh->b_data,
298                                                     sb->s_blocksize << 3,
299                                                     group_start << 3);
300                         if (bit < sb->s_blocksize << 3)
301                                 break;
302                 }
303         }
304         if (i >= (nr_groups * 2)) {
305                 mutex_unlock(&sbi->s_alloc_mutex);
306                 return newblock;
307         }
308         if (bit < sb->s_blocksize << 3)
309                 goto search_back;
310         else
311                 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
312                                             group_start << 3);
313         if (bit >= sb->s_blocksize << 3) {
314                 mutex_unlock(&sbi->s_alloc_mutex);
315                 return 0;
316         }
317
318 search_back:
319         i = 0;
320         while (i < 7 && bit > (group_start << 3) &&
321                udf_test_bit(bit - 1, bh->b_data)) {
322                 ++i;
323                 --bit;
324         }
325
326 got_block:
327         newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
328                 (sizeof(struct spaceBitmapDesc) << 3);
329
330         if (!udf_clear_bit(bit, bh->b_data)) {
331                 udf_debug("bit already cleared for block %d\n", bit);
332                 goto repeat;
333         }
334
335         mark_buffer_dirty(bh);
336
337         udf_add_free_space(sb, partition, -1);
338         mutex_unlock(&sbi->s_alloc_mutex);
339         *err = 0;
340         return newblock;
341
342 error_return:
343         *err = -EIO;
344         mutex_unlock(&sbi->s_alloc_mutex);
345         return 0;
346 }
347
348 static void udf_table_free_blocks(struct super_block *sb,
349                                   struct inode *table,
350                                   struct kernel_lb_addr *bloc,
351                                   uint32_t offset,
352                                   uint32_t count)
353 {
354         struct udf_sb_info *sbi = UDF_SB(sb);
355         struct udf_part_map *partmap;
356         uint32_t start, end;
357         uint32_t elen;
358         struct kernel_lb_addr eloc;
359         struct extent_position oepos, epos;
360         int8_t etype;
361         int i;
362         struct udf_inode_info *iinfo;
363
364         mutex_lock(&sbi->s_alloc_mutex);
365         partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
366         if (bloc->logicalBlockNum + count < count ||
367             (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
368                 udf_debug("%d < %d || %d + %d > %d\n",
369                           bloc->logicalBlockNum, 0,
370                           bloc->logicalBlockNum, count,
371                           partmap->s_partition_len);
372                 goto error_return;
373         }
374
375         iinfo = UDF_I(table);
376         udf_add_free_space(sb, sbi->s_partition, count);
377
378         start = bloc->logicalBlockNum + offset;
379         end = bloc->logicalBlockNum + offset + count - 1;
380
381         epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
382         elen = 0;
383         epos.block = oepos.block = iinfo->i_location;
384         epos.bh = oepos.bh = NULL;
385
386         while (count &&
387                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
388                 if (((eloc.logicalBlockNum +
389                         (elen >> sb->s_blocksize_bits)) == start)) {
390                         if ((0x3FFFFFFF - elen) <
391                                         (count << sb->s_blocksize_bits)) {
392                                 uint32_t tmp = ((0x3FFFFFFF - elen) >>
393                                                         sb->s_blocksize_bits);
394                                 count -= tmp;
395                                 start += tmp;
396                                 elen = (etype << 30) |
397                                         (0x40000000 - sb->s_blocksize);
398                         } else {
399                                 elen = (etype << 30) |
400                                         (elen +
401                                         (count << sb->s_blocksize_bits));
402                                 start += count;
403                                 count = 0;
404                         }
405                         udf_write_aext(table, &oepos, &eloc, elen, 1);
406                 } else if (eloc.logicalBlockNum == (end + 1)) {
407                         if ((0x3FFFFFFF - elen) <
408                                         (count << sb->s_blocksize_bits)) {
409                                 uint32_t tmp = ((0x3FFFFFFF - elen) >>
410                                                 sb->s_blocksize_bits);
411                                 count -= tmp;
412                                 end -= tmp;
413                                 eloc.logicalBlockNum -= tmp;
414                                 elen = (etype << 30) |
415                                         (0x40000000 - sb->s_blocksize);
416                         } else {
417                                 eloc.logicalBlockNum = start;
418                                 elen = (etype << 30) |
419                                         (elen +
420                                         (count << sb->s_blocksize_bits));
421                                 end -= count;
422                                 count = 0;
423                         }
424                         udf_write_aext(table, &oepos, &eloc, elen, 1);
425                 }
426
427                 if (epos.bh != oepos.bh) {
428                         i = -1;
429                         oepos.block = epos.block;
430                         brelse(oepos.bh);
431                         get_bh(epos.bh);
432                         oepos.bh = epos.bh;
433                         oepos.offset = 0;
434                 } else {
435                         oepos.offset = epos.offset;
436                 }
437         }
438
439         if (count) {
440                 /*
441                  * NOTE: we CANNOT use udf_add_aext here, as it can try to
442                  * allocate a new block, and since we hold the super block
443                  * lock already very bad things would happen :)
444                  *
445                  * We copy the behavior of udf_add_aext, but instead of
446                  * trying to allocate a new block close to the existing one,
447                  * we just steal a block from the extent we are trying to add.
448                  *
449                  * It would be nice if the blocks were close together, but it
450                  * isn't required.
451                  */
452
453                 int adsize;
454                 struct short_ad *sad = NULL;
455                 struct long_ad *lad = NULL;
456                 struct allocExtDesc *aed;
457
458                 eloc.logicalBlockNum = start;
459                 elen = EXT_RECORDED_ALLOCATED |
460                         (count << sb->s_blocksize_bits);
461
462                 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
463                         adsize = sizeof(struct short_ad);
464                 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
465                         adsize = sizeof(struct long_ad);
466                 else {
467                         brelse(oepos.bh);
468                         brelse(epos.bh);
469                         goto error_return;
470                 }
471
472                 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
473                         unsigned char *sptr, *dptr;
474                         int loffset;
475
476                         brelse(oepos.bh);
477                         oepos = epos;
478
479                         /* Steal a block from the extent being free'd */
480                         epos.block.logicalBlockNum = eloc.logicalBlockNum;
481                         eloc.logicalBlockNum++;
482                         elen -= sb->s_blocksize;
483
484                         epos.bh = udf_tread(sb,
485                                         udf_get_lb_pblock(sb, &epos.block, 0));
486                         if (!epos.bh) {
487                                 brelse(oepos.bh);
488                                 goto error_return;
489                         }
490                         aed = (struct allocExtDesc *)(epos.bh->b_data);
491                         aed->previousAllocExtLocation =
492                                 cpu_to_le32(oepos.block.logicalBlockNum);
493                         if (epos.offset + adsize > sb->s_blocksize) {
494                                 loffset = epos.offset;
495                                 aed->lengthAllocDescs = cpu_to_le32(adsize);
496                                 sptr = iinfo->i_ext.i_data + epos.offset
497                                                                 - adsize;
498                                 dptr = epos.bh->b_data +
499                                         sizeof(struct allocExtDesc);
500                                 memcpy(dptr, sptr, adsize);
501                                 epos.offset = sizeof(struct allocExtDesc) +
502                                                 adsize;
503                         } else {
504                                 loffset = epos.offset + adsize;
505                                 aed->lengthAllocDescs = cpu_to_le32(0);
506                                 if (oepos.bh) {
507                                         sptr = oepos.bh->b_data + epos.offset;
508                                         aed = (struct allocExtDesc *)
509                                                 oepos.bh->b_data;
510                                         le32_add_cpu(&aed->lengthAllocDescs,
511                                                         adsize);
512                                 } else {
513                                         sptr = iinfo->i_ext.i_data +
514                                                                 epos.offset;
515                                         iinfo->i_lenAlloc += adsize;
516                                         mark_inode_dirty(table);
517                                 }
518                                 epos.offset = sizeof(struct allocExtDesc);
519                         }
520                         if (sbi->s_udfrev >= 0x0200)
521                                 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
522                                             3, 1, epos.block.logicalBlockNum,
523                                             sizeof(struct tag));
524                         else
525                                 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
526                                             2, 1, epos.block.logicalBlockNum,
527                                             sizeof(struct tag));
528
529                         switch (iinfo->i_alloc_type) {
530                         case ICBTAG_FLAG_AD_SHORT:
531                                 sad = (struct short_ad *)sptr;
532                                 sad->extLength = cpu_to_le32(
533                                         EXT_NEXT_EXTENT_ALLOCDECS |
534                                         sb->s_blocksize);
535                                 sad->extPosition =
536                                         cpu_to_le32(epos.block.logicalBlockNum);
537                                 break;
538                         case ICBTAG_FLAG_AD_LONG:
539                                 lad = (struct long_ad *)sptr;
540                                 lad->extLength = cpu_to_le32(
541                                         EXT_NEXT_EXTENT_ALLOCDECS |
542                                         sb->s_blocksize);
543                                 lad->extLocation =
544                                         cpu_to_lelb(epos.block);
545                                 break;
546                         }
547                         if (oepos.bh) {
548                                 udf_update_tag(oepos.bh->b_data, loffset);
549                                 mark_buffer_dirty(oepos.bh);
550                         } else {
551                                 mark_inode_dirty(table);
552                         }
553                 }
554
555                 /* It's possible that stealing the block emptied the extent */
556                 if (elen) {
557                         udf_write_aext(table, &epos, &eloc, elen, 1);
558
559                         if (!epos.bh) {
560                                 iinfo->i_lenAlloc += adsize;
561                                 mark_inode_dirty(table);
562                         } else {
563                                 aed = (struct allocExtDesc *)epos.bh->b_data;
564                                 le32_add_cpu(&aed->lengthAllocDescs, adsize);
565                                 udf_update_tag(epos.bh->b_data, epos.offset);
566                                 mark_buffer_dirty(epos.bh);
567                         }
568                 }
569         }
570
571         brelse(epos.bh);
572         brelse(oepos.bh);
573
574 error_return:
575         mutex_unlock(&sbi->s_alloc_mutex);
576         return;
577 }
578
579 static int udf_table_prealloc_blocks(struct super_block *sb,
580                                      struct inode *table, uint16_t partition,
581                                      uint32_t first_block, uint32_t block_count)
582 {
583         struct udf_sb_info *sbi = UDF_SB(sb);
584         int alloc_count = 0;
585         uint32_t elen, adsize;
586         struct kernel_lb_addr eloc;
587         struct extent_position epos;
588         int8_t etype = -1;
589         struct udf_inode_info *iinfo;
590
591         if (first_block >= sbi->s_partmaps[partition].s_partition_len)
592                 return 0;
593
594         iinfo = UDF_I(table);
595         if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
596                 adsize = sizeof(struct short_ad);
597         else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
598                 adsize = sizeof(struct long_ad);
599         else
600                 return 0;
601
602         mutex_lock(&sbi->s_alloc_mutex);
603         epos.offset = sizeof(struct unallocSpaceEntry);
604         epos.block = iinfo->i_location;
605         epos.bh = NULL;
606         eloc.logicalBlockNum = 0xFFFFFFFF;
607
608         while (first_block != eloc.logicalBlockNum &&
609                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
610                 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
611                           eloc.logicalBlockNum, elen, first_block);
612                 ; /* empty loop body */
613         }
614
615         if (first_block == eloc.logicalBlockNum) {
616                 epos.offset -= adsize;
617
618                 alloc_count = (elen >> sb->s_blocksize_bits);
619                 if (alloc_count > block_count) {
620                         alloc_count = block_count;
621                         eloc.logicalBlockNum += alloc_count;
622                         elen -= (alloc_count << sb->s_blocksize_bits);
623                         udf_write_aext(table, &epos, &eloc,
624                                         (etype << 30) | elen, 1);
625                 } else
626                         udf_delete_aext(table, epos, eloc,
627                                         (etype << 30) | elen);
628         } else {
629                 alloc_count = 0;
630         }
631
632         brelse(epos.bh);
633
634         if (alloc_count)
635                 udf_add_free_space(sb, partition, -alloc_count);
636         mutex_unlock(&sbi->s_alloc_mutex);
637         return alloc_count;
638 }
639
640 static int udf_table_new_block(struct super_block *sb,
641                                struct inode *table, uint16_t partition,
642                                uint32_t goal, int *err)
643 {
644         struct udf_sb_info *sbi = UDF_SB(sb);
645         uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
646         uint32_t newblock = 0, adsize;
647         uint32_t elen, goal_elen = 0;
648         struct kernel_lb_addr eloc, uninitialized_var(goal_eloc);
649         struct extent_position epos, goal_epos;
650         int8_t etype;
651         struct udf_inode_info *iinfo = UDF_I(table);
652
653         *err = -ENOSPC;
654
655         if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
656                 adsize = sizeof(struct short_ad);
657         else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
658                 adsize = sizeof(struct long_ad);
659         else
660                 return newblock;
661
662         mutex_lock(&sbi->s_alloc_mutex);
663         if (goal >= sbi->s_partmaps[partition].s_partition_len)
664                 goal = 0;
665
666         /* We search for the closest matching block to goal. If we find
667            a exact hit, we stop. Otherwise we keep going till we run out
668            of extents. We store the buffer_head, bloc, and extoffset
669            of the current closest match and use that when we are done.
670          */
671         epos.offset = sizeof(struct unallocSpaceEntry);
672         epos.block = iinfo->i_location;
673         epos.bh = goal_epos.bh = NULL;
674
675         while (spread &&
676                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
677                 if (goal >= eloc.logicalBlockNum) {
678                         if (goal < eloc.logicalBlockNum +
679                                         (elen >> sb->s_blocksize_bits))
680                                 nspread = 0;
681                         else
682                                 nspread = goal - eloc.logicalBlockNum -
683                                         (elen >> sb->s_blocksize_bits);
684                 } else {
685                         nspread = eloc.logicalBlockNum - goal;
686                 }
687
688                 if (nspread < spread) {
689                         spread = nspread;
690                         if (goal_epos.bh != epos.bh) {
691                                 brelse(goal_epos.bh);
692                                 goal_epos.bh = epos.bh;
693                                 get_bh(goal_epos.bh);
694                         }
695                         goal_epos.block = epos.block;
696                         goal_epos.offset = epos.offset - adsize;
697                         goal_eloc = eloc;
698                         goal_elen = (etype << 30) | elen;
699                 }
700         }
701
702         brelse(epos.bh);
703
704         if (spread == 0xFFFFFFFF) {
705                 brelse(goal_epos.bh);
706                 mutex_unlock(&sbi->s_alloc_mutex);
707                 return 0;
708         }
709
710         /* Only allocate blocks from the beginning of the extent.
711            That way, we only delete (empty) extents, never have to insert an
712            extent because of splitting */
713         /* This works, but very poorly.... */
714
715         newblock = goal_eloc.logicalBlockNum;
716         goal_eloc.logicalBlockNum++;
717         goal_elen -= sb->s_blocksize;
718
719         if (goal_elen)
720                 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
721         else
722                 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
723         brelse(goal_epos.bh);
724
725         udf_add_free_space(sb, partition, -1);
726
727         mutex_unlock(&sbi->s_alloc_mutex);
728         *err = 0;
729         return newblock;
730 }
731
732 void udf_free_blocks(struct super_block *sb, struct inode *inode,
733                      struct kernel_lb_addr *bloc, uint32_t offset,
734                      uint32_t count)
735 {
736         uint16_t partition = bloc->partitionReferenceNum;
737         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
738
739         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
740                 udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
741                                        bloc, offset, count);
742         } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
743                 udf_table_free_blocks(sb, map->s_uspace.s_table,
744                                       bloc, offset, count);
745         } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
746                 udf_bitmap_free_blocks(sb, map->s_fspace.s_bitmap,
747                                        bloc, offset, count);
748         } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
749                 udf_table_free_blocks(sb, map->s_fspace.s_table,
750                                       bloc, offset, count);
751         }
752
753         if (inode) {
754                 inode_sub_bytes(inode,
755                                 ((sector_t)count) << sb->s_blocksize_bits);
756         }
757 }
758
759 inline int udf_prealloc_blocks(struct super_block *sb,
760                                struct inode *inode,
761                                uint16_t partition, uint32_t first_block,
762                                uint32_t block_count)
763 {
764         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
765         sector_t allocated;
766
767         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
768                 allocated = udf_bitmap_prealloc_blocks(sb,
769                                                        map->s_uspace.s_bitmap,
770                                                        partition, first_block,
771                                                        block_count);
772         else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
773                 allocated = udf_table_prealloc_blocks(sb,
774                                                       map->s_uspace.s_table,
775                                                       partition, first_block,
776                                                       block_count);
777         else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
778                 allocated = udf_bitmap_prealloc_blocks(sb,
779                                                        map->s_fspace.s_bitmap,
780                                                        partition, first_block,
781                                                        block_count);
782         else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
783                 allocated = udf_table_prealloc_blocks(sb,
784                                                       map->s_fspace.s_table,
785                                                       partition, first_block,
786                                                       block_count);
787         else
788                 return 0;
789
790         if (inode && allocated > 0)
791                 inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
792         return allocated;
793 }
794
795 inline int udf_new_block(struct super_block *sb,
796                          struct inode *inode,
797                          uint16_t partition, uint32_t goal, int *err)
798 {
799         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
800         int block;
801
802         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
803                 block = udf_bitmap_new_block(sb,
804                                              map->s_uspace.s_bitmap,
805                                              partition, goal, err);
806         else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
807                 block = udf_table_new_block(sb,
808                                             map->s_uspace.s_table,
809                                             partition, goal, err);
810         else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
811                 block = udf_bitmap_new_block(sb,
812                                              map->s_fspace.s_bitmap,
813                                              partition, goal, err);
814         else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
815                 block = udf_table_new_block(sb,
816                                             map->s_fspace.s_table,
817                                             partition, goal, err);
818         else {
819                 *err = -EIO;
820                 return 0;
821         }
822         if (inode && block)
823                 inode_add_bytes(inode, sb->s_blocksize);
824         return block;
825 }