3 # Simple python program to print out all the extents of a single file
5 # Copyright Facebook 2014
7 import sys,os,struct,fcntl,ctypes,stat,argparse
10 maxu64 = (1L << 64) - 1
11 maxu32 = (1L << 32) - 1
13 # the inode (like form stat)
14 BTRFS_INODE_ITEM_KEY = 1
15 # backref to the directory
16 BTRFS_INODE_REF_KEY = 12
17 # backref to the directory v2
18 BTRFS_INODE_EXTREF_KEY = 13
20 BTRFS_XATTR_ITEM_KEY = 24
21 # orphans for list files
22 BTRFS_ORPHAN_ITEM_KEY = 48
23 # treelog items for dirs
24 BTRFS_DIR_LOG_ITEM_KEY = 60
25 BTRFS_DIR_LOG_INDEX_KEY = 72
26 # dir items and dir indexes both hold filenames
27 BTRFS_DIR_ITEM_KEY = 84
28 BTRFS_DIR_INDEX_KEY = 96
29 # these are the file extent pointers
30 BTRFS_EXTENT_DATA_KEY = 108
32 BTRFS_EXTENT_CSUM_KEY = 128
33 # root item for subvols and snapshots
34 BTRFS_ROOT_ITEM_KEY = 132
36 BTRFS_ROOT_BACKREF_KEY = 144
37 BTRFS_ROOT_REF_KEY = 156
38 # each allocated extent has an extent item
39 BTRFS_EXTENT_ITEM_KEY = 168
40 # optimized extents for metadata only
41 BTRFS_METADATA_ITEM_KEY = 169
42 # backrefs for extents
43 BTRFS_TREE_BLOCK_REF_KEY = 176
44 BTRFS_EXTENT_DATA_REF_KEY = 178
45 BTRFS_EXTENT_REF_V0_KEY = 180
46 BTRFS_SHARED_BLOCK_REF_KEY = 182
47 BTRFS_SHARED_DATA_REF_KEY = 184
48 # one of these for each block group
49 BTRFS_BLOCK_GROUP_ITEM_KEY = 192
50 # dev extents records which part of each device is allocated
51 BTRFS_DEV_EXTENT_KEY = 204
52 # dev items describe devs
53 BTRFS_DEV_ITEM_KEY = 216
55 BTRFS_CHUNK_ITEM_KEY = 228
57 BTRFS_QGROUP_STATUS_KEY = 240
58 BTRFS_QGROUP_INFO_KEY = 242
59 BTRFS_QGROUP_LIMIT_KEY = 244
60 BTRFS_QGROUP_RELATION_KEY = 246
61 # records balance progress
62 BTRFS_BALANCE_ITEM_KEY = 248
63 # stats on device errors
64 BTRFS_DEV_STATS_KEY = 249
65 BTRFS_DEV_REPLACE_KEY = 250
66 BTRFS_STRING_ITEM_KEY = 253
68 # store information about which extents are in use, and reference counts
69 BTRFS_EXTENT_TREE_OBJECTID = 2
71 BTRFS_BLOCK_GROUP_DATA = (1 << 0)
73 # in the kernel sources, this is flattened
74 # btrfs_ioctl_search_args_v2. It includes both the btrfs_ioctl_search_key
75 # and the buffer. We're using a 64K buffer size.
77 args_buffer_size = 65536
78 class btrfs_ioctl_search_args(ctypes.Structure):
80 _fields_ = [ ("tree_id", ctypes.c_ulonglong),
81 ("min_objectid", ctypes.c_ulonglong),
82 ("max_objectid", ctypes.c_ulonglong),
83 ("min_offset", ctypes.c_ulonglong),
84 ("max_offset", ctypes.c_ulonglong),
85 ("min_transid", ctypes.c_ulonglong),
86 ("max_transid", ctypes.c_ulonglong),
87 ("min_type", ctypes.c_uint),
88 ("max_type", ctypes.c_uint),
89 ("nr_items", ctypes.c_uint),
90 ("unused", ctypes.c_uint),
91 ("unused1", ctypes.c_ulonglong),
92 ("unused2", ctypes.c_ulonglong),
93 ("unused3", ctypes.c_ulonglong),
94 ("unused4", ctypes.c_ulonglong),
95 ("buf_size", ctypes.c_ulonglong),
96 ("buf", ctypes.c_ubyte * args_buffer_size),
99 # the search ioctl returns one header for each item
101 class btrfs_ioctl_search_header(ctypes.Structure):
103 _fields_ = [ ("transid", ctypes.c_ulonglong),
104 ("objectid", ctypes.c_ulonglong),
105 ("offset", ctypes.c_ulonglong),
106 ("type", ctypes.c_uint),
107 ("len", ctypes.c_uint),
110 # the type field in btrfs_file_extent_item
111 BTRFS_FILE_EXTENT_INLINE = 0
112 BTRFS_FILE_EXTENT_REG = 1
113 BTRFS_FILE_EXTENT_PREALLOC = 2
115 class btrfs_file_extent_item(ctypes.LittleEndianStructure):
117 _fields_ = [ ("generation", ctypes.c_ulonglong),
118 ("ram_bytes", ctypes.c_ulonglong),
119 ("compression", ctypes.c_ubyte),
120 ("encryption", ctypes.c_ubyte),
121 ("other_encoding", ctypes.c_ubyte * 2),
122 ("type", ctypes.c_ubyte),
123 ("disk_bytenr", ctypes.c_ulonglong),
124 ("disk_num_bytes", ctypes.c_ulonglong),
125 ("offset", ctypes.c_ulonglong),
126 ("num_bytes", ctypes.c_ulonglong),
129 class btrfs_block_group_item(ctypes.LittleEndianStructure):
131 _fields_ = [ ("used", ctypes.c_ulonglong),
132 ("chunk_objectid", ctypes.c_ulonglong),
133 ("flags", ctypes.c_ulonglong),
136 class btrfs_ioctl_search():
138 self.args = btrfs_ioctl_search_args()
139 self.args.tree_id = 0
140 self.args.min_objectid = 0
141 self.args.max_objectid = maxu64
142 self.args.min_offset = 0
143 self.args.max_offset = maxu64
144 self.args.min_transid = 0
145 self.args.max_transid = maxu64
146 self.args.min_type = 0
147 self.args.max_type = maxu32
148 self.args.nr_items = 0
149 self.args.buf_size = args_buffer_size
151 # magic encoded for x86_64 this is the v2 search ioctl
152 self.ioctl_num = 3228603409L
154 # the results of the search get stored into args.buf
155 def search(self, fd, nritems=65536):
156 self.args.nr_items = nritems
157 fcntl.ioctl(fd, self.ioctl_num, self.args, 1)
159 # this moves the search key forward by one. If the end result is
160 # still a valid search key (all mins less than all maxes), we return
161 # True. Otherwise False
163 def advance_search(search):
164 if search.args.min_offset < maxu64:
165 search.args.min_offset += 1
166 elif search.args.min_type < 255:
167 search.args.min_type += 1
168 elif search.args.min_objectid < maxu64:
169 search.args.min_objectid += 1
173 if search.args.min_offset > search.args.max_offset:
175 if search.args.min_type > search.args.max_type:
177 if search.args.min_objectid > search.args.max_objectid:
182 # given one search_header and one file_item, print the details. This
183 # also tosses the [disk_bytenr,disk_num_bytes] into extent_hash to record
184 # which extents were used by this file
186 def print_one_extent(header, fi, extent_hash):
187 # we're ignoring inline items for now
188 if fi.type == BTRFS_FILE_EXTENT_INLINE:
189 # header.len is the length of the item returned. We subtract
190 # the part of the file item header that is actually used (21 bytes)
191 # and we get the length of the inlined data.
192 # this may or may not be compressed
193 inline_len = header.len - 21
195 ram_bytes = fi.ram_bytes
197 ram_bytes = inline_len
198 print "(%Lu %Lu): ram %Lu disk 0 disk_size %Lu -- inline" % \
199 (header.objectid, header.offset, ram_bytes, inline_len)
200 extent_hash[-1] = inline_len
203 if fi.disk_bytenr == 0:
207 print "(%Lu %Lu): ram %Lu disk %Lu disk_size %Lu%s" % (header.objectid,
208 header.offset, fi.num_bytes, fi.disk_bytenr, fi.disk_num_bytes, tag)
211 extent_hash[fi.disk_bytenr] = fi.disk_num_bytes
213 # open 'filename' and run the search ioctl against it, printing all the extents
215 def print_file_extents(filename):
218 s = btrfs_ioctl_search()
219 s.args.min_type = BTRFS_EXTENT_DATA_KEY
220 s.args.max_type = BTRFS_EXTENT_DATA_KEY
223 fd = os.open(filename, os.O_RDONLY)
226 sys.stderr.write("Failed to open %s (%s)\n" % (filename, e))
229 if not stat.S_ISREG(st.st_mode):
230 sys.stderr.write("%s not a regular file\n" % filename)
233 s.args.min_objectid = st.st_ino
234 s.args.max_objectid = st.st_ino
242 sys.stderr.write("Search ioctl failed for %s (%s)\n" % (filename, e))
245 if s.args.nr_items == 0:
248 # p is the results buffer from the kernel
249 p = ctypes.addressof(s.args.buf)
250 header = btrfs_ioctl_search_header()
251 header_size = ctypes.sizeof(header)
252 h = ctypes.addressof(header)
253 p_left = args_buffer_size
255 for x in xrange(0, s.args.nr_items):
256 # for each item, copy the header from the buffer into
258 ctypes.memmove(h, p, header_size)
260 p_left -= header_size
262 # this would be a kernel bug it shouldn't be sending malformed
267 if header.type == BTRFS_EXTENT_DATA_KEY:
268 fi = btrfs_file_extent_item()
270 # this would also be a kernel bug
271 if p_left < ctypes.sizeof(fi):
274 # Copy the file item out of the results buffer
275 ctypes.memmove(ctypes.addressof(fi), p, ctypes.sizeof(fi))
276 print_one_extent(header, fi, extent_hash)
283 s.args.min_offset = header.offset
285 if not advance_search(s):
290 for x in extent_hash.itervalues():
294 # don't divide by zero
295 if total_on_disk == 0:
298 print "file: %s extents %Lu disk size %Lu logical size %Lu ratio %.2f" % \
299 (filename, total_extents, total_on_disk, st.st_size,
300 float(st.st_size) / float(total_on_disk))
303 def print_block_groups(mountpoint):
304 s = btrfs_ioctl_search()
306 s.args.min_type = BTRFS_BLOCK_GROUP_ITEM_KEY
307 s.args.max_type = BTRFS_BLOCK_GROUP_ITEM_KEY
308 s.args.tree_id = BTRFS_EXTENT_TREE_OBJECTID
316 fd = os.open(mountpoint, os.O_RDONLY)
319 sys.stderr.write("Failed to open %s (%s)\n" % (mountpoint, e))
326 sys.stderr.write("Search ioctl failed for %s (%s)\n" % (mountpoint, e))
329 if s.args.nr_items == 0:
332 # p is the results buffer from kernel
333 p = ctypes.addressof(s.args.buf)
334 header = btrfs_ioctl_search_header()
335 header_size = ctypes.sizeof(header)
336 h = ctypes.addressof(header)
337 p_left = args_buffer_size
339 for x in xrange(0, s.args.nr_items):
340 # for each itme, copy the header from the buffer into
342 ctypes.memmove(h, p, header_size)
344 p_left -= header_size
346 # this would be a kernel bug it shouldn't be sending malformed
351 if header.type == BTRFS_BLOCK_GROUP_ITEM_KEY:
352 bg = btrfs_block_group_item()
354 # this would be a kernel bug
355 if p_left < ctypes.sizeof(bg):
358 ctypes.memmove(ctypes.addressof(bg), p, ctypes.sizeof(bg))
359 if bg.flags & BTRFS_BLOCK_GROUP_DATA:
360 print "block group offset %Lu len %Lu used %Lu chunk_objectid %Lu flags %Lu usage %.2f" %\
361 (header.objectid, header.offset, bg.used, bg.chunk_objectid, bg.flags, float(bg.used) / float(header.offset))
363 total_free += (header.offset - bg.used)
364 if min_used >= bg.used:
366 free_of_min_used = (header.offset - bg.used)
367 bg_of_min_used = header.objectid
374 s.args.min_objectid = header.objectid
376 if s.args.min_objectid < maxu64:
377 s.args.min_objectid += 1
378 if s.args.min_objectid > s.args.max_objectid:
381 print "total_free %Lu min_used %Lu free_of_min_used %Lu block_group_of_min_used %Lu" %\
382 (total_free, min_used, free_of_min_used, bg_of_min_used)
383 if (total_free - free_of_min_used) >= min_used:
384 print "balance block group (%Lu) can reduce the number of data block group" % bg_of_min_used
389 parser = argparse.ArgumentParser()
390 parser.add_argument('path', nargs='+')
391 parser.add_argument('-b', '--block-group', action='store_const', const=1, help='get block group information, use mountpoint as "path"')
392 parser.add_argument('-f', '--file', action='store_const', const=1, help='get file mapping, use filepath')
394 args = parser.parse_args()
397 for i in args.path[0:]:
398 print_block_groups(i)
400 for f in args.path[0:]:
401 print_file_extents(f)