Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / ufs / balloc.c
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10
11 #include <linux/fs.h>
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <asm/byteorder.h>
19
20 #include "ufs_fs.h"
21 #include "ufs.h"
22 #include "swab.h"
23 #include "util.h"
24
25 #define INVBLOCK ((u64)-1L)
26
27 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned, int *);
28 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
29 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
30 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
31 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
32 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
33
34 /*
35  * Free 'count' fragments from fragment number 'fragment'
36  */
37 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
38 {
39         struct super_block * sb;
40         struct ufs_sb_private_info * uspi;
41         struct ufs_super_block_first * usb1;
42         struct ufs_cg_private_info * ucpi;
43         struct ufs_cylinder_group * ucg;
44         unsigned cgno, bit, end_bit, bbase, blkmap, i;
45         u64 blkno;
46         
47         sb = inode->i_sb;
48         uspi = UFS_SB(sb)->s_uspi;
49         usb1 = ubh_get_usb_first(uspi);
50         
51         UFSD("ENTER, fragment %llu, count %u\n",
52              (unsigned long long)fragment, count);
53         
54         if (ufs_fragnum(fragment) + count > uspi->s_fpg)
55                 ufs_error (sb, "ufs_free_fragments", "internal error");
56         
57         mutex_lock(&UFS_SB(sb)->s_lock);
58         
59         cgno = ufs_dtog(uspi, fragment);
60         bit = ufs_dtogd(uspi, fragment);
61         if (cgno >= uspi->s_ncg) {
62                 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
63                 goto failed;
64         }
65                 
66         ucpi = ufs_load_cylinder (sb, cgno);
67         if (!ucpi) 
68                 goto failed;
69         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
70         if (!ufs_cg_chkmagic(sb, ucg)) {
71                 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
72                 goto failed;
73         }
74
75         end_bit = bit + count;
76         bbase = ufs_blknum (bit);
77         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
78         ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
79         for (i = bit; i < end_bit; i++) {
80                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
81                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
82                 else 
83                         ufs_error (sb, "ufs_free_fragments",
84                                    "bit already cleared for fragment %u", i);
85         }
86         
87         fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
88         uspi->cs_total.cs_nffree += count;
89         fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
90         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
91         ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92
93         /*
94          * Trying to reassemble free fragments into block
95          */
96         blkno = ufs_fragstoblks (bbase);
97         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
98                 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
99                 uspi->cs_total.cs_nffree -= uspi->s_fpb;
100                 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
101                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
102                         ufs_clusteracct (sb, ucpi, blkno, 1);
103                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
104                 uspi->cs_total.cs_nbfree++;
105                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
106                 if (uspi->fs_magic != UFS2_MAGIC) {
107                         unsigned cylno = ufs_cbtocylno (bbase);
108
109                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
110                                                   ufs_cbtorpos(bbase)), 1);
111                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
112                 }
113         }
114         
115         ubh_mark_buffer_dirty (USPI_UBH(uspi));
116         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
117         if (sb->s_flags & MS_SYNCHRONOUS)
118                 ubh_sync_block(UCPI_UBH(ucpi));
119         ufs_mark_sb_dirty(sb);
120         
121         mutex_unlock(&UFS_SB(sb)->s_lock);
122         UFSD("EXIT\n");
123         return;
124
125 failed:
126         mutex_unlock(&UFS_SB(sb)->s_lock);
127         UFSD("EXIT (FAILED)\n");
128         return;
129 }
130
131 /*
132  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
133  */
134 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
135 {
136         struct super_block * sb;
137         struct ufs_sb_private_info * uspi;
138         struct ufs_super_block_first * usb1;
139         struct ufs_cg_private_info * ucpi;
140         struct ufs_cylinder_group * ucg;
141         unsigned overflow, cgno, bit, end_bit, i;
142         u64 blkno;
143         
144         sb = inode->i_sb;
145         uspi = UFS_SB(sb)->s_uspi;
146         usb1 = ubh_get_usb_first(uspi);
147
148         UFSD("ENTER, fragment %llu, count %u\n",
149              (unsigned long long)fragment, count);
150         
151         if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
152                 ufs_error (sb, "ufs_free_blocks", "internal error, "
153                            "fragment %llu, count %u\n",
154                            (unsigned long long)fragment, count);
155                 goto failed;
156         }
157
158         mutex_lock(&UFS_SB(sb)->s_lock);
159         
160 do_more:
161         overflow = 0;
162         cgno = ufs_dtog(uspi, fragment);
163         bit = ufs_dtogd(uspi, fragment);
164         if (cgno >= uspi->s_ncg) {
165                 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
166                 goto failed_unlock;
167         }
168         end_bit = bit + count;
169         if (end_bit > uspi->s_fpg) {
170                 overflow = bit + count - uspi->s_fpg;
171                 count -= overflow;
172                 end_bit -= overflow;
173         }
174
175         ucpi = ufs_load_cylinder (sb, cgno);
176         if (!ucpi) 
177                 goto failed_unlock;
178         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
179         if (!ufs_cg_chkmagic(sb, ucg)) {
180                 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
181                 goto failed_unlock;
182         }
183
184         for (i = bit; i < end_bit; i += uspi->s_fpb) {
185                 blkno = ufs_fragstoblks(i);
186                 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
187                         ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
188                 }
189                 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
190                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
191                         ufs_clusteracct (sb, ucpi, blkno, 1);
192
193                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
194                 uspi->cs_total.cs_nbfree++;
195                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
196
197                 if (uspi->fs_magic != UFS2_MAGIC) {
198                         unsigned cylno = ufs_cbtocylno(i);
199
200                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
201                                                   ufs_cbtorpos(i)), 1);
202                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
203                 }
204         }
205
206         ubh_mark_buffer_dirty (USPI_UBH(uspi));
207         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
208         if (sb->s_flags & MS_SYNCHRONOUS)
209                 ubh_sync_block(UCPI_UBH(ucpi));
210
211         if (overflow) {
212                 fragment += count;
213                 count = overflow;
214                 goto do_more;
215         }
216
217         ufs_mark_sb_dirty(sb);
218         mutex_unlock(&UFS_SB(sb)->s_lock);
219         UFSD("EXIT\n");
220         return;
221
222 failed_unlock:
223         mutex_unlock(&UFS_SB(sb)->s_lock);
224 failed:
225         UFSD("EXIT (FAILED)\n");
226         return;
227 }
228
229 /*
230  * Modify inode page cache in such way:
231  * have - blocks with b_blocknr equal to oldb...oldb+count-1
232  * get - blocks with b_blocknr equal to newb...newb+count-1
233  * also we suppose that oldb...oldb+count-1 blocks
234  * situated at the end of file.
235  *
236  * We can come here from ufs_writepage or ufs_prepare_write,
237  * locked_page is argument of these functions, so we already lock it.
238  */
239 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
240                                unsigned int count, sector_t oldb,
241                                sector_t newb, struct page *locked_page)
242 {
243         const unsigned blks_per_page =
244                 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
245         const unsigned mask = blks_per_page - 1;
246         struct address_space * const mapping = inode->i_mapping;
247         pgoff_t index, cur_index, last_index;
248         unsigned pos, j, lblock;
249         sector_t end, i;
250         struct page *page;
251         struct buffer_head *head, *bh;
252
253         UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
254               inode->i_ino, count,
255              (unsigned long long)oldb, (unsigned long long)newb);
256
257         BUG_ON(!locked_page);
258         BUG_ON(!PageLocked(locked_page));
259
260         cur_index = locked_page->index;
261         end = count + beg;
262         last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
263         for (i = beg; i < end; i = (i | mask) + 1) {
264                 index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
265
266                 if (likely(cur_index != index)) {
267                         page = ufs_get_locked_page(mapping, index);
268                         if (!page)/* it was truncated */
269                                 continue;
270                         if (IS_ERR(page)) {/* or EIO */
271                                 ufs_error(inode->i_sb, __func__,
272                                           "read of page %llu failed\n",
273                                           (unsigned long long)index);
274                                 continue;
275                         }
276                 } else
277                         page = locked_page;
278
279                 head = page_buffers(page);
280                 bh = head;
281                 pos = i & mask;
282                 for (j = 0; j < pos; ++j)
283                         bh = bh->b_this_page;
284
285
286                 if (unlikely(index == last_index))
287                         lblock = end & mask;
288                 else
289                         lblock = blks_per_page;
290
291                 do {
292                         if (j >= lblock)
293                                 break;
294                         pos = (i - beg) + j;
295
296                         if (!buffer_mapped(bh))
297                                         map_bh(bh, inode->i_sb, oldb + pos);
298                         if (!buffer_uptodate(bh)) {
299                                 ll_rw_block(READ, 1, &bh);
300                                 wait_on_buffer(bh);
301                                 if (!buffer_uptodate(bh)) {
302                                         ufs_error(inode->i_sb, __func__,
303                                                   "read of block failed\n");
304                                         break;
305                                 }
306                         }
307
308                         UFSD(" change from %llu to %llu, pos %u\n",
309                              (unsigned long long)(pos + oldb),
310                              (unsigned long long)(pos + newb), pos);
311
312                         bh->b_blocknr = newb + pos;
313                         unmap_underlying_metadata(bh->b_bdev,
314                                                   bh->b_blocknr);
315                         mark_buffer_dirty(bh);
316                         ++j;
317                         bh = bh->b_this_page;
318                 } while (bh != head);
319
320                 if (likely(cur_index != index))
321                         ufs_put_locked_page(page);
322         }
323         UFSD("EXIT\n");
324 }
325
326 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
327                             int sync)
328 {
329         struct buffer_head *bh;
330         sector_t end = beg + n;
331
332         for (; beg < end; ++beg) {
333                 bh = sb_getblk(inode->i_sb, beg);
334                 lock_buffer(bh);
335                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
336                 set_buffer_uptodate(bh);
337                 mark_buffer_dirty(bh);
338                 unlock_buffer(bh);
339                 if (IS_SYNC(inode) || sync)
340                         sync_dirty_buffer(bh);
341                 brelse(bh);
342         }
343 }
344
345 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
346                            u64 goal, unsigned count, int *err,
347                            struct page *locked_page)
348 {
349         struct super_block * sb;
350         struct ufs_sb_private_info * uspi;
351         struct ufs_super_block_first * usb1;
352         unsigned cgno, oldcount, newcount;
353         u64 tmp, request, result;
354         
355         UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
356              inode->i_ino, (unsigned long long)fragment,
357              (unsigned long long)goal, count);
358         
359         sb = inode->i_sb;
360         uspi = UFS_SB(sb)->s_uspi;
361         usb1 = ubh_get_usb_first(uspi);
362         *err = -ENOSPC;
363
364         mutex_lock(&UFS_SB(sb)->s_lock);
365         tmp = ufs_data_ptr_to_cpu(sb, p);
366
367         if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
368                 ufs_warning(sb, "ufs_new_fragments", "internal warning"
369                             " fragment %llu, count %u",
370                             (unsigned long long)fragment, count);
371                 count = uspi->s_fpb - ufs_fragnum(fragment); 
372         }
373         oldcount = ufs_fragnum (fragment);
374         newcount = oldcount + count;
375
376         /*
377          * Somebody else has just allocated our fragments
378          */
379         if (oldcount) {
380                 if (!tmp) {
381                         ufs_error(sb, "ufs_new_fragments", "internal error, "
382                                   "fragment %llu, tmp %llu\n",
383                                   (unsigned long long)fragment,
384                                   (unsigned long long)tmp);
385                         mutex_unlock(&UFS_SB(sb)->s_lock);
386                         return INVBLOCK;
387                 }
388                 if (fragment < UFS_I(inode)->i_lastfrag) {
389                         UFSD("EXIT (ALREADY ALLOCATED)\n");
390                         mutex_unlock(&UFS_SB(sb)->s_lock);
391                         return 0;
392                 }
393         }
394         else {
395                 if (tmp) {
396                         UFSD("EXIT (ALREADY ALLOCATED)\n");
397                         mutex_unlock(&UFS_SB(sb)->s_lock);
398                         return 0;
399                 }
400         }
401
402         /*
403          * There is not enough space for user on the device
404          */
405         if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
406                 mutex_unlock(&UFS_SB(sb)->s_lock);
407                 UFSD("EXIT (FAILED)\n");
408                 return 0;
409         }
410
411         if (goal >= uspi->s_size) 
412                 goal = 0;
413         if (goal == 0) 
414                 cgno = ufs_inotocg (inode->i_ino);
415         else
416                 cgno = ufs_dtog(uspi, goal);
417          
418         /*
419          * allocate new fragment
420          */
421         if (oldcount == 0) {
422                 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
423                 if (result) {
424                         ufs_cpu_to_data_ptr(sb, p, result);
425                         *err = 0;
426                         UFS_I(inode)->i_lastfrag =
427                                 max(UFS_I(inode)->i_lastfrag, fragment + count);
428                         ufs_clear_frags(inode, result + oldcount,
429                                         newcount - oldcount, locked_page != NULL);
430                 }
431                 mutex_unlock(&UFS_SB(sb)->s_lock);
432                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
433                 return result;
434         }
435
436         /*
437          * resize block
438          */
439         result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
440         if (result) {
441                 *err = 0;
442                 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
443                                                 fragment + count);
444                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
445                                 locked_page != NULL);
446                 mutex_unlock(&UFS_SB(sb)->s_lock);
447                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
448                 return result;
449         }
450
451         /*
452          * allocate new block and move data
453          */
454         switch (fs32_to_cpu(sb, usb1->fs_optim)) {
455             case UFS_OPTSPACE:
456                 request = newcount;
457                 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
458                     > uspi->s_dsize * uspi->s_minfree / (2 * 100))
459                         break;
460                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
461                 break;
462             default:
463                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
464         
465             case UFS_OPTTIME:
466                 request = uspi->s_fpb;
467                 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
468                     (uspi->s_minfree - 2) / 100)
469                         break;
470                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
471                 break;
472         }
473         result = ufs_alloc_fragments (inode, cgno, goal, request, err);
474         if (result) {
475                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
476                                 locked_page != NULL);
477                 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
478                                    uspi->s_sbbase + tmp,
479                                    uspi->s_sbbase + result, locked_page);
480                 ufs_cpu_to_data_ptr(sb, p, result);
481                 *err = 0;
482                 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
483                                                 fragment + count);
484                 mutex_unlock(&UFS_SB(sb)->s_lock);
485                 if (newcount < request)
486                         ufs_free_fragments (inode, result + newcount, request - newcount);
487                 ufs_free_fragments (inode, tmp, oldcount);
488                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
489                 return result;
490         }
491
492         mutex_unlock(&UFS_SB(sb)->s_lock);
493         UFSD("EXIT (FAILED)\n");
494         return 0;
495 }               
496
497 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
498                              unsigned oldcount, unsigned newcount, int *err)
499 {
500         struct super_block * sb;
501         struct ufs_sb_private_info * uspi;
502         struct ufs_super_block_first * usb1;
503         struct ufs_cg_private_info * ucpi;
504         struct ufs_cylinder_group * ucg;
505         unsigned cgno, fragno, fragoff, count, fragsize, i;
506         
507         UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
508              (unsigned long long)fragment, oldcount, newcount);
509         
510         sb = inode->i_sb;
511         uspi = UFS_SB(sb)->s_uspi;
512         usb1 = ubh_get_usb_first (uspi);
513         count = newcount - oldcount;
514         
515         cgno = ufs_dtog(uspi, fragment);
516         if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
517                 return 0;
518         if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
519                 return 0;
520         ucpi = ufs_load_cylinder (sb, cgno);
521         if (!ucpi)
522                 return 0;
523         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
524         if (!ufs_cg_chkmagic(sb, ucg)) {
525                 ufs_panic (sb, "ufs_add_fragments",
526                         "internal error, bad magic number on cg %u", cgno);
527                 return 0;
528         }
529
530         fragno = ufs_dtogd(uspi, fragment);
531         fragoff = ufs_fragnum (fragno);
532         for (i = oldcount; i < newcount; i++)
533                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
534                         return 0;
535         /*
536          * Block can be extended
537          */
538         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
539         for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
540                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
541                         break;
542         fragsize = i - oldcount;
543         if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
544                 ufs_panic (sb, "ufs_add_fragments",
545                         "internal error or corrupted bitmap on cg %u", cgno);
546         fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
547         if (fragsize != count)
548                 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
549         for (i = oldcount; i < newcount; i++)
550                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
551
552         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
553         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
554         uspi->cs_total.cs_nffree -= count;
555         
556         ubh_mark_buffer_dirty (USPI_UBH(uspi));
557         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
558         if (sb->s_flags & MS_SYNCHRONOUS)
559                 ubh_sync_block(UCPI_UBH(ucpi));
560         ufs_mark_sb_dirty(sb);
561
562         UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
563         
564         return fragment;
565 }
566
567 #define UFS_TEST_FREE_SPACE_CG \
568         ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
569         if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
570                 goto cg_found; \
571         for (k = count; k < uspi->s_fpb; k++) \
572                 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
573                         goto cg_found; 
574
575 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
576                                u64 goal, unsigned count, int *err)
577 {
578         struct super_block * sb;
579         struct ufs_sb_private_info * uspi;
580         struct ufs_super_block_first * usb1;
581         struct ufs_cg_private_info * ucpi;
582         struct ufs_cylinder_group * ucg;
583         unsigned oldcg, i, j, k, allocsize;
584         u64 result;
585         
586         UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
587              inode->i_ino, cgno, (unsigned long long)goal, count);
588
589         sb = inode->i_sb;
590         uspi = UFS_SB(sb)->s_uspi;
591         usb1 = ubh_get_usb_first(uspi);
592         oldcg = cgno;
593         
594         /*
595          * 1. searching on preferred cylinder group
596          */
597         UFS_TEST_FREE_SPACE_CG
598
599         /*
600          * 2. quadratic rehash
601          */
602         for (j = 1; j < uspi->s_ncg; j *= 2) {
603                 cgno += j;
604                 if (cgno >= uspi->s_ncg) 
605                         cgno -= uspi->s_ncg;
606                 UFS_TEST_FREE_SPACE_CG
607         }
608
609         /*
610          * 3. brute force search
611          * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
612          */
613         cgno = (oldcg + 1) % uspi->s_ncg;
614         for (j = 2; j < uspi->s_ncg; j++) {
615                 cgno++;
616                 if (cgno >= uspi->s_ncg)
617                         cgno = 0;
618                 UFS_TEST_FREE_SPACE_CG
619         }
620         
621         UFSD("EXIT (FAILED)\n");
622         return 0;
623
624 cg_found:
625         ucpi = ufs_load_cylinder (sb, cgno);
626         if (!ucpi)
627                 return 0;
628         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
629         if (!ufs_cg_chkmagic(sb, ucg)) 
630                 ufs_panic (sb, "ufs_alloc_fragments",
631                         "internal error, bad magic number on cg %u", cgno);
632         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
633
634         if (count == uspi->s_fpb) {
635                 result = ufs_alloccg_block (inode, ucpi, goal, err);
636                 if (result == INVBLOCK)
637                         return 0;
638                 goto succed;
639         }
640
641         for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
642                 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
643                         break;
644         
645         if (allocsize == uspi->s_fpb) {
646                 result = ufs_alloccg_block (inode, ucpi, goal, err);
647                 if (result == INVBLOCK)
648                         return 0;
649                 goal = ufs_dtogd(uspi, result);
650                 for (i = count; i < uspi->s_fpb; i++)
651                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
652                 i = uspi->s_fpb - count;
653
654                 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
655                 uspi->cs_total.cs_nffree += i;
656                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
657                 fs32_add(sb, &ucg->cg_frsum[i], 1);
658                 goto succed;
659         }
660
661         result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
662         if (result == INVBLOCK)
663                 return 0;
664         for (i = 0; i < count; i++)
665                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
666         
667         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
668         uspi->cs_total.cs_nffree -= count;
669         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
670         fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
671
672         if (count != allocsize)
673                 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
674
675 succed:
676         ubh_mark_buffer_dirty (USPI_UBH(uspi));
677         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
678         if (sb->s_flags & MS_SYNCHRONOUS)
679                 ubh_sync_block(UCPI_UBH(ucpi));
680         ufs_mark_sb_dirty(sb);
681
682         result += cgno * uspi->s_fpg;
683         UFSD("EXIT3, result %llu\n", (unsigned long long)result);
684         return result;
685 }
686
687 static u64 ufs_alloccg_block(struct inode *inode,
688                              struct ufs_cg_private_info *ucpi,
689                              u64 goal, int *err)
690 {
691         struct super_block * sb;
692         struct ufs_sb_private_info * uspi;
693         struct ufs_super_block_first * usb1;
694         struct ufs_cylinder_group * ucg;
695         u64 result, blkno;
696
697         UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
698
699         sb = inode->i_sb;
700         uspi = UFS_SB(sb)->s_uspi;
701         usb1 = ubh_get_usb_first(uspi);
702         ucg = ubh_get_ucg(UCPI_UBH(ucpi));
703
704         if (goal == 0) {
705                 goal = ucpi->c_rotor;
706                 goto norot;
707         }
708         goal = ufs_blknum (goal);
709         goal = ufs_dtogd(uspi, goal);
710         
711         /*
712          * If the requested block is available, use it.
713          */
714         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
715                 result = goal;
716                 goto gotit;
717         }
718         
719 norot:  
720         result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
721         if (result == INVBLOCK)
722                 return INVBLOCK;
723         ucpi->c_rotor = result;
724 gotit:
725         blkno = ufs_fragstoblks(result);
726         ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
727         if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
728                 ufs_clusteracct (sb, ucpi, blkno, -1);
729
730         fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
731         uspi->cs_total.cs_nbfree--;
732         fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
733
734         if (uspi->fs_magic != UFS2_MAGIC) {
735                 unsigned cylno = ufs_cbtocylno((unsigned)result);
736
737                 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
738                                           ufs_cbtorpos((unsigned)result)), 1);
739                 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
740         }
741         
742         UFSD("EXIT, result %llu\n", (unsigned long long)result);
743
744         return result;
745 }
746
747 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
748                           struct ufs_buffer_head *ubh,
749                           unsigned begin, unsigned size,
750                           unsigned char *table, unsigned char mask)
751 {
752         unsigned rest, offset;
753         unsigned char *cp;
754         
755
756         offset = begin & ~uspi->s_fmask;
757         begin >>= uspi->s_fshift;
758         for (;;) {
759                 if ((offset + size) < uspi->s_fsize)
760                         rest = size;
761                 else
762                         rest = uspi->s_fsize - offset;
763                 size -= rest;
764                 cp = ubh->bh[begin]->b_data + offset;
765                 while ((table[*cp++] & mask) == 0 && --rest)
766                         ;
767                 if (rest || !size)
768                         break;
769                 begin++;
770                 offset = 0;
771         }
772         return (size + rest);
773 }
774
775 /*
776  * Find a block of the specified size in the specified cylinder group.
777  * @sp: pointer to super block
778  * @ucpi: pointer to cylinder group info
779  * @goal: near which block we want find new one
780  * @count: specified size
781  */
782 static u64 ufs_bitmap_search(struct super_block *sb,
783                              struct ufs_cg_private_info *ucpi,
784                              u64 goal, unsigned count)
785 {
786         /*
787          * Bit patterns for identifying fragments in the block map
788          * used as ((map & mask_arr) == want_arr)
789          */
790         static const int mask_arr[9] = {
791                 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
792         };
793         static const int want_arr[9] = {
794                 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
795         };
796         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
797         struct ufs_super_block_first *usb1;
798         struct ufs_cylinder_group *ucg;
799         unsigned start, length, loc;
800         unsigned pos, want, blockmap, mask, end;
801         u64 result;
802
803         UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
804              (unsigned long long)goal, count);
805
806         usb1 = ubh_get_usb_first (uspi);
807         ucg = ubh_get_ucg(UCPI_UBH(ucpi));
808
809         if (goal)
810                 start = ufs_dtogd(uspi, goal) >> 3;
811         else
812                 start = ucpi->c_frotor >> 3;
813                 
814         length = ((uspi->s_fpg + 7) >> 3) - start;
815         loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
816                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
817                 1 << (count - 1 + (uspi->s_fpb & 7))); 
818         if (loc == 0) {
819                 length = start + 1;
820                 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
821                                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
822                                 ufs_fragtable_other,
823                                 1 << (count - 1 + (uspi->s_fpb & 7)));
824                 if (loc == 0) {
825                         ufs_error(sb, "ufs_bitmap_search",
826                                   "bitmap corrupted on cg %u, start %u,"
827                                   " length %u, count %u, freeoff %u\n",
828                                   ucpi->c_cgx, start, length, count,
829                                   ucpi->c_freeoff);
830                         return INVBLOCK;
831                 }
832                 start = 0;
833         }
834         result = (start + length - loc) << 3;
835         ucpi->c_frotor = result;
836
837         /*
838          * found the byte in the map
839          */
840
841         for (end = result + 8; result < end; result += uspi->s_fpb) {
842                 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
843                 blockmap <<= 1;
844                 mask = mask_arr[count];
845                 want = want_arr[count];
846                 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
847                         if ((blockmap & mask) == want) {
848                                 UFSD("EXIT, result %llu\n",
849                                      (unsigned long long)result);
850                                 return result + pos;
851                         }
852                         mask <<= 1;
853                         want <<= 1;
854                 }
855         }
856
857         ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
858                   ucpi->c_cgx);
859         UFSD("EXIT (FAILED)\n");
860         return INVBLOCK;
861 }
862
863 static void ufs_clusteracct(struct super_block * sb,
864         struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
865 {
866         struct ufs_sb_private_info * uspi;
867         int i, start, end, forw, back;
868         
869         uspi = UFS_SB(sb)->s_uspi;
870         if (uspi->s_contigsumsize <= 0)
871                 return;
872
873         if (cnt > 0)
874                 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
875         else
876                 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
877
878         /*
879          * Find the size of the cluster going forward.
880          */
881         start = blkno + 1;
882         end = start + uspi->s_contigsumsize;
883         if ( end >= ucpi->c_nclusterblks)
884                 end = ucpi->c_nclusterblks;
885         i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
886         if (i > end)
887                 i = end;
888         forw = i - start;
889         
890         /*
891          * Find the size of the cluster going backward.
892          */
893         start = blkno - 1;
894         end = start - uspi->s_contigsumsize;
895         if (end < 0 ) 
896                 end = -1;
897         i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
898         if ( i < end) 
899                 i = end;
900         back = start - i;
901         
902         /*
903          * Account for old cluster and the possibly new forward and
904          * back clusters.
905          */
906         i = back + forw + 1;
907         if (i > uspi->s_contigsumsize)
908                 i = uspi->s_contigsumsize;
909         fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
910         if (back > 0)
911                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
912         if (forw > 0)
913                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
914 }
915
916
917 static unsigned char ufs_fragtable_8fpb[] = {
918         0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
919         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
920         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
921         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
922         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, 
923         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
924         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
925         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
926         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
927         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
928         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
929         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
930         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
931         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
932         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
933         0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
934 };
935
936 static unsigned char ufs_fragtable_other[] = {
937         0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
938         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
939         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
940         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
941         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
942         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
943         0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
944         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
945         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
946         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
947         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
948         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
949         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
950         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
951         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
952         0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
953 };