5 * Partition handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998-2001 Ben Fennema
17 * 12/06/98 blf Created file.
26 #include <linux/string.h>
27 #include <linux/buffer_head.h>
28 #include <linux/mutex.h>
30 uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
31 uint16_t partition, uint32_t offset)
33 struct udf_sb_info *sbi = UDF_SB(sb);
34 struct udf_part_map *map;
35 if (partition >= sbi->s_partitions) {
36 udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n",
37 block, partition, offset);
40 map = &sbi->s_partmaps[partition];
41 if (map->s_partition_func)
42 return map->s_partition_func(sb, block, partition, offset);
44 return map->s_partition_root + block + offset;
47 uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
48 uint16_t partition, uint32_t offset)
50 struct buffer_head *bh = NULL;
54 struct udf_sb_info *sbi = UDF_SB(sb);
55 struct udf_part_map *map;
56 struct udf_virtual_data *vdata;
57 struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode);
59 map = &sbi->s_partmaps[partition];
60 vdata = &map->s_type_specific.s_virtual;
62 if (block > vdata->s_num_entries) {
63 udf_debug("Trying to access block beyond end of VAT (%d max %d)\n",
64 block, vdata->s_num_entries);
68 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
69 loc = le32_to_cpu(((__le32 *)(iinfo->i_ext.i_data +
70 vdata->s_start_offset))[block]);
73 index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
76 newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
77 index = block % (sb->s_blocksize / sizeof(uint32_t));
80 index = vdata->s_start_offset / sizeof(uint32_t) + block;
83 loc = udf_block_map(sbi->s_vat_inode, newblock);
85 bh = sb_bread(sb, loc);
87 udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
88 sb, block, partition, loc, index);
92 loc = le32_to_cpu(((__le32 *)bh->b_data)[index]);
97 if (iinfo->i_location.partitionReferenceNum == partition) {
98 udf_debug("recursive call to udf_get_pblock!\n");
102 return udf_get_pblock(sb, loc,
103 iinfo->i_location.partitionReferenceNum,
107 inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block,
108 uint16_t partition, uint32_t offset)
110 return udf_get_pblock_virt15(sb, block, partition, offset);
113 uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block,
114 uint16_t partition, uint32_t offset)
117 struct sparingTable *st = NULL;
118 struct udf_sb_info *sbi = UDF_SB(sb);
119 struct udf_part_map *map;
121 struct udf_sparing_data *sdata;
123 map = &sbi->s_partmaps[partition];
124 sdata = &map->s_type_specific.s_sparing;
125 packet = (block + offset) & ~(sdata->s_packet_len - 1);
127 for (i = 0; i < 4; i++) {
128 if (sdata->s_spar_map[i] != NULL) {
129 st = (struct sparingTable *)
130 sdata->s_spar_map[i]->b_data;
136 for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
137 struct sparingEntry *entry = &st->mapEntry[i];
138 u32 origLoc = le32_to_cpu(entry->origLocation);
139 if (origLoc >= 0xFFFFFFF0)
141 else if (origLoc == packet)
142 return le32_to_cpu(entry->mappedLocation) +
144 (sdata->s_packet_len - 1));
145 else if (origLoc > packet)
150 return map->s_partition_root + block + offset;
153 int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
155 struct udf_sparing_data *sdata;
156 struct sparingTable *st = NULL;
157 struct sparingEntry mapEntry;
160 struct udf_sb_info *sbi = UDF_SB(sb);
161 u16 reallocationTableLen;
162 struct buffer_head *bh;
165 mutex_lock(&sbi->s_alloc_mutex);
166 for (i = 0; i < sbi->s_partitions; i++) {
167 struct udf_part_map *map = &sbi->s_partmaps[i];
168 if (old_block > map->s_partition_root &&
169 old_block < map->s_partition_root + map->s_partition_len) {
170 sdata = &map->s_type_specific.s_sparing;
171 packet = (old_block - map->s_partition_root) &
172 ~(sdata->s_packet_len - 1);
174 for (j = 0; j < 4; j++)
175 if (sdata->s_spar_map[j] != NULL) {
176 st = (struct sparingTable *)
177 sdata->s_spar_map[j]->b_data;
186 reallocationTableLen =
187 le16_to_cpu(st->reallocationTableLen);
188 for (k = 0; k < reallocationTableLen; k++) {
189 struct sparingEntry *entry = &st->mapEntry[k];
190 u32 origLoc = le32_to_cpu(entry->origLocation);
192 if (origLoc == 0xFFFFFFFF) {
195 bh = sdata->s_spar_map[j];
199 st = (struct sparingTable *)
201 entry->origLocation =
204 sizeof(struct sparingTable) +
205 reallocationTableLen *
206 sizeof(struct sparingEntry);
207 udf_update_tag((char *)st, len);
208 mark_buffer_dirty(bh);
210 *new_block = le32_to_cpu(
211 entry->mappedLocation) +
213 map->s_partition_root) &
214 (sdata->s_packet_len - 1));
217 } else if (origLoc == packet) {
218 *new_block = le32_to_cpu(
219 entry->mappedLocation) +
221 map->s_partition_root) &
222 (sdata->s_packet_len - 1));
225 } else if (origLoc > packet)
229 for (l = k; l < reallocationTableLen; l++) {
230 struct sparingEntry *entry = &st->mapEntry[l];
231 u32 origLoc = le32_to_cpu(entry->origLocation);
233 if (origLoc != 0xFFFFFFFF)
237 bh = sdata->s_spar_map[j];
241 st = (struct sparingTable *)bh->b_data;
242 mapEntry = st->mapEntry[l];
243 mapEntry.origLocation =
245 memmove(&st->mapEntry[k + 1],
248 sizeof(struct sparingEntry));
249 st->mapEntry[k] = mapEntry;
250 udf_update_tag((char *)st,
251 sizeof(struct sparingTable) +
252 reallocationTableLen *
253 sizeof(struct sparingEntry));
254 mark_buffer_dirty(bh);
258 st->mapEntry[k].mappedLocation) +
259 ((old_block - map->s_partition_root) &
260 (sdata->s_packet_len - 1));
270 if (i == sbi->s_partitions) {
271 /* outside of partitions */
272 /* for now, fail =) */
277 mutex_unlock(&sbi->s_alloc_mutex);
281 static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
282 uint16_t partition, uint32_t offset)
284 struct super_block *sb = inode->i_sb;
285 struct udf_part_map *map;
286 struct kernel_lb_addr eloc;
289 struct extent_position epos = {};
292 if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) !=
293 (EXT_RECORDED_ALLOCATED >> 30))
294 phyblock = 0xFFFFFFFF;
296 map = &UDF_SB(sb)->s_partmaps[partition];
297 /* map to sparable/physical partition desc */
298 phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
299 map->s_partition_num, ext_offset + offset);
306 uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
307 uint16_t partition, uint32_t offset)
309 struct udf_sb_info *sbi = UDF_SB(sb);
310 struct udf_part_map *map;
311 struct udf_meta_data *mdata;
315 udf_debug("READING from METADATA\n");
317 map = &sbi->s_partmaps[partition];
318 mdata = &map->s_type_specific.s_metadata;
319 inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
321 /* We shouldn't mount such media... */
323 retblk = udf_try_read_meta(inode, block, partition, offset);
324 if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) {
325 udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n");
326 if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) {
327 mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
328 mdata->s_mirror_file_loc, map->s_partition_num);
329 mdata->s_flags |= MF_MIRROR_FE_LOADED;
332 inode = mdata->s_mirror_fe;
335 retblk = udf_try_read_meta(inode, block, partition, offset);