2 * linux/fs/minix/bitmap.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * Modified for 680x0 by Hamish Macdonald
9 * Fixed for 680x0 by Andreas Schwab
12 /* bitmap.c contains the code that handles the inode and block bitmaps */
15 #include <linux/buffer_head.h>
16 #include <linux/bitops.h>
17 #include <linux/sched.h>
19 static DEFINE_SPINLOCK(bitmap_lock);
22 * bitmap consists of blocks filled with 16bit words
23 * bit set == busy, bit clear == free
24 * endianness is a mess, but for counting zero bits it really doesn't matter...
26 static __u32 count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits)
29 unsigned blocks = DIV_ROUND_UP(numbits, blocksize * 8);
32 unsigned words = blocksize / 2;
33 __u16 *p = (__u16 *)(*map++)->b_data;
35 sum += 16 - hweight16(*p++);
41 void minix_free_block(struct inode *inode, unsigned long block)
43 struct super_block *sb = inode->i_sb;
44 struct minix_sb_info *sbi = minix_sb(sb);
45 struct buffer_head *bh;
46 int k = sb->s_blocksize_bits + 3;
47 unsigned long bit, zone;
49 if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) {
50 printk("Trying to free block not in datazone\n");
53 zone = block - sbi->s_firstdatazone + 1;
54 bit = zone & ((1<<k) - 1);
56 if (zone >= sbi->s_zmap_blocks) {
57 printk("minix_free_block: nonexistent bitmap buffer\n");
60 bh = sbi->s_zmap[zone];
61 spin_lock(&bitmap_lock);
62 if (!minix_test_and_clear_bit(bit, bh->b_data))
63 printk("minix_free_block (%s:%lu): bit already cleared\n",
65 spin_unlock(&bitmap_lock);
66 mark_buffer_dirty(bh);
70 int minix_new_block(struct inode * inode)
72 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
73 int bits_per_zone = 8 * inode->i_sb->s_blocksize;
76 for (i = 0; i < sbi->s_zmap_blocks; i++) {
77 struct buffer_head *bh = sbi->s_zmap[i];
80 spin_lock(&bitmap_lock);
81 j = minix_find_first_zero_bit(bh->b_data, bits_per_zone);
82 if (j < bits_per_zone) {
83 minix_set_bit(j, bh->b_data);
84 spin_unlock(&bitmap_lock);
85 mark_buffer_dirty(bh);
86 j += i * bits_per_zone + sbi->s_firstdatazone-1;
87 if (j < sbi->s_firstdatazone || j >= sbi->s_nzones)
91 spin_unlock(&bitmap_lock);
96 unsigned long minix_count_free_blocks(struct super_block *sb)
98 struct minix_sb_info *sbi = minix_sb(sb);
99 u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1);
101 return (count_free(sbi->s_zmap, sb->s_blocksize, bits)
102 << sbi->s_log_zone_size);
106 minix_V1_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh)
109 struct minix_sb_info *sbi = minix_sb(sb);
110 struct minix_inode *p;
112 if (!ino || ino > sbi->s_ninodes) {
113 printk("Bad inode number on dev %s: %ld is out of range\n",
114 sb->s_id, (long)ino);
118 block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
119 ino / MINIX_INODES_PER_BLOCK;
120 *bh = sb_bread(sb, block);
122 printk("Unable to read inode block\n");
125 p = (void *)(*bh)->b_data;
126 return p + ino % MINIX_INODES_PER_BLOCK;
129 struct minix2_inode *
130 minix_V2_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh)
133 struct minix_sb_info *sbi = minix_sb(sb);
134 struct minix2_inode *p;
135 int minix2_inodes_per_block = sb->s_blocksize / sizeof(struct minix2_inode);
138 if (!ino || ino > sbi->s_ninodes) {
139 printk("Bad inode number on dev %s: %ld is out of range\n",
140 sb->s_id, (long)ino);
144 block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
145 ino / minix2_inodes_per_block;
146 *bh = sb_bread(sb, block);
148 printk("Unable to read inode block\n");
151 p = (void *)(*bh)->b_data;
152 return p + ino % minix2_inodes_per_block;
155 /* Clear the link count and mode of a deleted inode on disk. */
157 static void minix_clear_inode(struct inode *inode)
159 struct buffer_head *bh = NULL;
161 if (INODE_VERSION(inode) == MINIX_V1) {
162 struct minix_inode *raw_inode;
163 raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
165 raw_inode->i_nlinks = 0;
166 raw_inode->i_mode = 0;
169 struct minix2_inode *raw_inode;
170 raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
172 raw_inode->i_nlinks = 0;
173 raw_inode->i_mode = 0;
177 mark_buffer_dirty(bh);
182 void minix_free_inode(struct inode * inode)
184 struct super_block *sb = inode->i_sb;
185 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
186 struct buffer_head *bh;
187 int k = sb->s_blocksize_bits + 3;
188 unsigned long ino, bit;
191 if (ino < 1 || ino > sbi->s_ninodes) {
192 printk("minix_free_inode: inode 0 or nonexistent inode\n");
195 bit = ino & ((1<<k) - 1);
197 if (ino >= sbi->s_imap_blocks) {
198 printk("minix_free_inode: nonexistent imap in superblock\n");
202 minix_clear_inode(inode); /* clear on-disk copy */
204 bh = sbi->s_imap[ino];
205 spin_lock(&bitmap_lock);
206 if (!minix_test_and_clear_bit(bit, bh->b_data))
207 printk("minix_free_inode: bit %lu already cleared\n", bit);
208 spin_unlock(&bitmap_lock);
209 mark_buffer_dirty(bh);
212 struct inode *minix_new_inode(const struct inode *dir, umode_t mode, int *error)
214 struct super_block *sb = dir->i_sb;
215 struct minix_sb_info *sbi = minix_sb(sb);
216 struct inode *inode = new_inode(sb);
217 struct buffer_head * bh;
218 int bits_per_zone = 8 * sb->s_blocksize;
229 spin_lock(&bitmap_lock);
230 for (i = 0; i < sbi->s_imap_blocks; i++) {
232 j = minix_find_first_zero_bit(bh->b_data, bits_per_zone);
233 if (j < bits_per_zone)
236 if (!bh || j >= bits_per_zone) {
237 spin_unlock(&bitmap_lock);
241 if (minix_test_and_set_bit(j, bh->b_data)) { /* shouldn't happen */
242 spin_unlock(&bitmap_lock);
243 printk("minix_new_inode: bit already set\n");
247 spin_unlock(&bitmap_lock);
248 mark_buffer_dirty(bh);
249 j += i * bits_per_zone;
250 if (!j || j > sbi->s_ninodes) {
254 inode_init_owner(inode, dir, mode);
256 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
258 memset(&minix_i(inode)->u, 0, sizeof(minix_i(inode)->u));
259 insert_inode_hash(inode);
260 mark_inode_dirty(inode);
266 unsigned long minix_count_free_inodes(struct super_block *sb)
268 struct minix_sb_info *sbi = minix_sb(sb);
269 u32 bits = sbi->s_ninodes + 1;
271 return count_free(sbi->s_imap, sb->s_blocksize, bits);