1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Node local data allocation
7 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/highmem.h>
14 #include <linux/bitops.h>
16 #include <cluster/masklog.h>
21 #include "blockcheck.h"
25 #include "localalloc.h"
29 #include "ocfs2_trace.h"
31 #include "buffer_head_io.h"
33 #define OCFS2_LOCAL_ALLOC(dinode) (&((dinode)->id2.i_lab))
35 static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc);
37 static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
38 struct ocfs2_dinode *alloc,
40 struct ocfs2_alloc_reservation *resv);
42 static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc);
44 static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
46 struct ocfs2_dinode *alloc,
47 struct inode *main_bm_inode,
48 struct buffer_head *main_bm_bh);
50 static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
51 struct ocfs2_alloc_context **ac,
52 struct inode **bitmap_inode,
53 struct buffer_head **bitmap_bh);
55 static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
57 struct ocfs2_alloc_context *ac);
59 static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
60 struct inode *local_alloc_inode);
63 * ocfs2_la_default_mb() - determine a default size, in megabytes of
66 * Generally, we'd like to pick as large a local alloc as
67 * possible. Performance on large workloads tends to scale
68 * proportionally to la size. In addition to that, the reservations
69 * code functions more efficiently as it can reserve more windows for
72 * Some things work against us when trying to choose a large local alloc:
74 * - We need to ensure our sizing is picked to leave enough space in
75 * group descriptors for other allocations (such as block groups,
76 * etc). Picking default sizes which are a multiple of 4 could help
77 * - block groups are allocated in 2mb and 4mb chunks.
79 * - Likewise, we don't want to starve other nodes of bits on small
80 * file systems. This can easily be taken care of by limiting our
81 * default to a reasonable size (256M) on larger cluster sizes.
83 * - Some file systems can't support very large sizes - 4k and 8k in
84 * particular are limited to less than 128 and 256 megabytes respectively.
86 * The following reference table shows group descriptor and local
87 * alloc maximums at various cluster sizes (4k blocksize)
89 * csize: 4K group: 126M la: 121M
90 * csize: 8K group: 252M la: 243M
91 * csize: 16K group: 504M la: 486M
92 * csize: 32K group: 1008M la: 972M
93 * csize: 64K group: 2016M la: 1944M
94 * csize: 128K group: 4032M la: 3888M
95 * csize: 256K group: 8064M la: 7776M
96 * csize: 512K group: 16128M la: 15552M
97 * csize: 1024K group: 32256M la: 31104M
99 #define OCFS2_LA_MAX_DEFAULT_MB 256
100 #define OCFS2_LA_OLD_DEFAULT 8
101 unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
105 unsigned int la_max_mb;
106 unsigned int megs_per_slot;
107 struct super_block *sb = osb->sb;
109 gd_mb = ocfs2_clusters_to_megabytes(osb->sb,
110 8 * ocfs2_group_bitmap_size(sb, 0, osb->s_feature_incompat));
113 * This takes care of files systems with very small group
114 * descriptors - 512 byte blocksize at cluster sizes lower
115 * than 16K and also 1k blocksize with 4k cluster size.
117 if ((sb->s_blocksize == 512 && osb->s_clustersize <= 8192)
118 || (sb->s_blocksize == 1024 && osb->s_clustersize == 4096))
119 return OCFS2_LA_OLD_DEFAULT;
122 * Leave enough room for some block groups and make the final
123 * value we work from a multiple of 4.
131 * Keep window sizes down to a reasonable default
133 if (la_mb > OCFS2_LA_MAX_DEFAULT_MB) {
135 * Some clustersize / blocksize combinations will have
136 * given us a larger than OCFS2_LA_MAX_DEFAULT_MB
137 * default size, but get poor distribution when
138 * limited to exactly 256 megabytes.
140 * As an example, 16K clustersize at 4K blocksize
141 * gives us a cluster group size of 504M. Paring the
142 * local alloc size down to 256 however, would give us
143 * only one window and around 200MB left in the
144 * cluster group. Instead, find the first size below
145 * 256 which would give us an even distribution.
147 * Larger cluster group sizes actually work out pretty
148 * well when pared to 256, so we don't have to do this
149 * for any group that fits more than two
150 * OCFS2_LA_MAX_DEFAULT_MB windows.
152 if (gd_mb > (2 * OCFS2_LA_MAX_DEFAULT_MB))
155 unsigned int gd_mult = gd_mb;
157 while (gd_mult > 256)
158 gd_mult = gd_mult >> 1;
164 megs_per_slot = osb->osb_clusters_at_boot / osb->max_slots;
165 megs_per_slot = ocfs2_clusters_to_megabytes(osb->sb, megs_per_slot);
166 /* Too many nodes, too few disk clusters. */
167 if (megs_per_slot < la_mb)
168 la_mb = megs_per_slot;
170 /* We can't store more bits than we can in a block. */
171 la_max_mb = ocfs2_clusters_to_megabytes(osb->sb,
172 ocfs2_local_alloc_size(sb) * 8);
173 if (la_mb > la_max_mb)
179 void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb)
181 struct super_block *sb = osb->sb;
182 unsigned int la_default_mb = ocfs2_la_default_mb(osb);
183 unsigned int la_max_mb;
185 la_max_mb = ocfs2_clusters_to_megabytes(sb,
186 ocfs2_local_alloc_size(sb) * 8);
188 trace_ocfs2_la_set_sizes(requested_mb, la_max_mb, la_default_mb);
190 if (requested_mb == -1) {
191 /* No user request - use defaults */
192 osb->local_alloc_default_bits =
193 ocfs2_megabytes_to_clusters(sb, la_default_mb);
194 } else if (requested_mb > la_max_mb) {
195 /* Request is too big, we give the maximum available */
196 osb->local_alloc_default_bits =
197 ocfs2_megabytes_to_clusters(sb, la_max_mb);
199 osb->local_alloc_default_bits =
200 ocfs2_megabytes_to_clusters(sb, requested_mb);
203 osb->local_alloc_bits = osb->local_alloc_default_bits;
206 static inline int ocfs2_la_state_enabled(struct ocfs2_super *osb)
208 return (osb->local_alloc_state == OCFS2_LA_THROTTLED ||
209 osb->local_alloc_state == OCFS2_LA_ENABLED);
212 void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb,
213 unsigned int num_clusters)
215 spin_lock(&osb->osb_lock);
216 if (osb->local_alloc_state == OCFS2_LA_DISABLED ||
217 osb->local_alloc_state == OCFS2_LA_THROTTLED)
218 if (num_clusters >= osb->local_alloc_default_bits) {
219 cancel_delayed_work(&osb->la_enable_wq);
220 osb->local_alloc_state = OCFS2_LA_ENABLED;
222 spin_unlock(&osb->osb_lock);
225 void ocfs2_la_enable_worker(struct work_struct *work)
227 struct ocfs2_super *osb =
228 container_of(work, struct ocfs2_super,
230 spin_lock(&osb->osb_lock);
231 osb->local_alloc_state = OCFS2_LA_ENABLED;
232 spin_unlock(&osb->osb_lock);
236 * Tell us whether a given allocation should use the local alloc
237 * file. Otherwise, it has to go to the main bitmap.
239 * This function does semi-dirty reads of local alloc size and state!
240 * This is ok however, as the values are re-checked once under mutex.
242 int ocfs2_alloc_should_use_local(struct ocfs2_super *osb, u64 bits)
247 spin_lock(&osb->osb_lock);
248 la_bits = osb->local_alloc_bits;
250 if (!ocfs2_la_state_enabled(osb))
253 /* la_bits should be at least twice the size (in clusters) of
254 * a new block group. We want to be sure block group
255 * allocations go through the local alloc, so allow an
256 * allocation to take up to half the bitmap. */
257 if (bits > (la_bits / 2))
262 trace_ocfs2_alloc_should_use_local(
263 (unsigned long long)bits, osb->local_alloc_state, la_bits, ret);
264 spin_unlock(&osb->osb_lock);
268 int ocfs2_load_local_alloc(struct ocfs2_super *osb)
271 struct ocfs2_dinode *alloc = NULL;
272 struct buffer_head *alloc_bh = NULL;
274 struct inode *inode = NULL;
275 struct ocfs2_local_alloc *la;
277 if (osb->local_alloc_bits == 0)
280 if (osb->local_alloc_bits >= osb->bitmap_cpg) {
281 mlog(ML_NOTICE, "Requested local alloc window %d is larger "
282 "than max possible %u. Using defaults.\n",
283 osb->local_alloc_bits, (osb->bitmap_cpg - 1));
284 osb->local_alloc_bits =
285 ocfs2_megabytes_to_clusters(osb->sb,
286 ocfs2_la_default_mb(osb));
289 /* read the alloc off disk */
290 inode = ocfs2_get_system_file_inode(osb, LOCAL_ALLOC_SYSTEM_INODE,
298 status = ocfs2_read_inode_block_full(inode, &alloc_bh,
299 OCFS2_BH_IGNORE_CACHE);
305 alloc = (struct ocfs2_dinode *) alloc_bh->b_data;
306 la = OCFS2_LOCAL_ALLOC(alloc);
308 if (!(le32_to_cpu(alloc->i_flags) &
309 (OCFS2_LOCAL_ALLOC_FL|OCFS2_BITMAP_FL))) {
310 mlog(ML_ERROR, "Invalid local alloc inode, %llu\n",
311 (unsigned long long)OCFS2_I(inode)->ip_blkno);
316 if ((la->la_size == 0) ||
317 (le16_to_cpu(la->la_size) > ocfs2_local_alloc_size(inode->i_sb))) {
318 mlog(ML_ERROR, "Local alloc size is invalid (la_size = %u)\n",
319 le16_to_cpu(la->la_size));
324 /* do a little verification. */
325 num_used = ocfs2_local_alloc_count_bits(alloc);
327 /* hopefully the local alloc has always been recovered before
330 || alloc->id1.bitmap1.i_used
331 || alloc->id1.bitmap1.i_total
333 mlog(ML_ERROR, "inconsistent detected, clean journal with"
334 " unrecovered local alloc, please run fsck.ocfs2!\n"
335 "found = %u, set = %u, taken = %u, off = %u\n",
336 num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
337 le32_to_cpu(alloc->id1.bitmap1.i_total),
338 OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
344 osb->local_alloc_bh = alloc_bh;
345 osb->local_alloc_state = OCFS2_LA_ENABLED;
352 trace_ocfs2_load_local_alloc(osb->local_alloc_bits);
360 * return any unused bits to the bitmap and write out a clean
363 * local_alloc_bh is optional. If not passed, we will simply use the
364 * one off osb. If you do pass it however, be warned that it *will* be
365 * returned brelse'd and NULL'd out.*/
366 void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
370 struct inode *local_alloc_inode = NULL;
371 struct buffer_head *bh = NULL;
372 struct buffer_head *main_bm_bh = NULL;
373 struct inode *main_bm_inode = NULL;
374 struct ocfs2_dinode *alloc_copy = NULL;
375 struct ocfs2_dinode *alloc = NULL;
377 cancel_delayed_work(&osb->la_enable_wq);
379 flush_workqueue(osb->ocfs2_wq);
381 if (osb->local_alloc_state == OCFS2_LA_UNUSED)
385 ocfs2_get_system_file_inode(osb,
386 LOCAL_ALLOC_SYSTEM_INODE,
388 if (!local_alloc_inode) {
394 osb->local_alloc_state = OCFS2_LA_DISABLED;
396 ocfs2_resmap_uninit(&osb->osb_la_resmap);
398 main_bm_inode = ocfs2_get_system_file_inode(osb,
399 GLOBAL_BITMAP_SYSTEM_INODE,
401 if (!main_bm_inode) {
407 inode_lock(main_bm_inode);
409 status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
415 /* WINDOW_MOVE_CREDITS is a bit heavy... */
416 handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
417 if (IS_ERR(handle)) {
418 mlog_errno(PTR_ERR(handle));
423 bh = osb->local_alloc_bh;
424 alloc = (struct ocfs2_dinode *) bh->b_data;
426 alloc_copy = kmemdup(alloc, bh->b_size, GFP_NOFS);
432 status = ocfs2_journal_access_di(handle, INODE_CACHE(local_alloc_inode),
433 bh, OCFS2_JOURNAL_ACCESS_WRITE);
439 ocfs2_clear_local_alloc(alloc);
440 ocfs2_journal_dirty(handle, bh);
443 osb->local_alloc_bh = NULL;
444 osb->local_alloc_state = OCFS2_LA_UNUSED;
446 status = ocfs2_sync_local_to_main(osb, handle, alloc_copy,
447 main_bm_inode, main_bm_bh);
452 ocfs2_commit_trans(osb, handle);
457 ocfs2_inode_unlock(main_bm_inode, 1);
460 inode_unlock(main_bm_inode);
464 iput(local_alloc_inode);
470 * We want to free the bitmap bits outside of any recovery context as
471 * we'll need a cluster lock to do so, but we must clear the local
472 * alloc before giving up the recovered nodes journal. To solve this,
473 * we kmalloc a copy of the local alloc before it's change for the
474 * caller to process with ocfs2_complete_local_alloc_recovery
476 int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
478 struct ocfs2_dinode **alloc_copy)
481 struct buffer_head *alloc_bh = NULL;
482 struct inode *inode = NULL;
483 struct ocfs2_dinode *alloc;
485 trace_ocfs2_begin_local_alloc_recovery(slot_num);
489 inode = ocfs2_get_system_file_inode(osb,
490 LOCAL_ALLOC_SYSTEM_INODE,
500 status = ocfs2_read_inode_block_full(inode, &alloc_bh,
501 OCFS2_BH_IGNORE_CACHE);
507 *alloc_copy = kmalloc(alloc_bh->b_size, GFP_KERNEL);
508 if (!(*alloc_copy)) {
512 memcpy((*alloc_copy), alloc_bh->b_data, alloc_bh->b_size);
514 alloc = (struct ocfs2_dinode *) alloc_bh->b_data;
515 ocfs2_clear_local_alloc(alloc);
517 ocfs2_compute_meta_ecc(osb->sb, alloc_bh->b_data, &alloc->i_check);
518 status = ocfs2_write_block(osb, alloc_bh, INODE_CACHE(inode));
541 * Step 2: By now, we've completed the journal recovery, we've stamped
542 * a clean local alloc on disk and dropped the node out of the
543 * recovery map. Dlm locks will no longer stall, so lets clear out the
546 int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb,
547 struct ocfs2_dinode *alloc)
551 struct buffer_head *main_bm_bh = NULL;
552 struct inode *main_bm_inode;
554 main_bm_inode = ocfs2_get_system_file_inode(osb,
555 GLOBAL_BITMAP_SYSTEM_INODE,
557 if (!main_bm_inode) {
563 inode_lock(main_bm_inode);
565 status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
571 handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
572 if (IS_ERR(handle)) {
573 status = PTR_ERR(handle);
579 /* we want the bitmap change to be recorded on disk asap */
582 status = ocfs2_sync_local_to_main(osb, handle, alloc,
583 main_bm_inode, main_bm_bh);
587 ocfs2_commit_trans(osb, handle);
590 ocfs2_inode_unlock(main_bm_inode, 1);
593 inode_unlock(main_bm_inode);
601 ocfs2_init_steal_slots(osb);
608 * make sure we've got at least bits_wanted contiguous bits in the
609 * local alloc. You lose them when you drop i_rwsem.
611 * We will add ourselves to the transaction passed in, but may start
612 * our own in order to shift windows.
614 int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
616 struct ocfs2_alloc_context *ac)
619 struct ocfs2_dinode *alloc;
620 struct inode *local_alloc_inode;
621 unsigned int free_bits;
626 ocfs2_get_system_file_inode(osb,
627 LOCAL_ALLOC_SYSTEM_INODE,
629 if (!local_alloc_inode) {
635 inode_lock(local_alloc_inode);
638 * We must double check state and allocator bits because
639 * another process may have changed them while holding i_rwsem.
641 spin_lock(&osb->osb_lock);
642 if (!ocfs2_la_state_enabled(osb) ||
643 (bits_wanted > osb->local_alloc_bits)) {
644 spin_unlock(&osb->osb_lock);
648 spin_unlock(&osb->osb_lock);
650 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
652 #ifdef CONFIG_OCFS2_DEBUG_FS
653 if (le32_to_cpu(alloc->id1.bitmap1.i_used) !=
654 ocfs2_local_alloc_count_bits(alloc)) {
655 status = ocfs2_error(osb->sb, "local alloc inode %llu says it has %u used bits, but a count shows %u\n",
656 (unsigned long long)le64_to_cpu(alloc->i_blkno),
657 le32_to_cpu(alloc->id1.bitmap1.i_used),
658 ocfs2_local_alloc_count_bits(alloc));
663 free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) -
664 le32_to_cpu(alloc->id1.bitmap1.i_used);
665 if (bits_wanted > free_bits) {
666 /* uhoh, window change time. */
668 ocfs2_local_alloc_slide_window(osb, local_alloc_inode);
670 if (status != -ENOSPC)
676 * Under certain conditions, the window slide code
677 * might have reduced the number of bits available or
678 * disabled the local alloc entirely. Re-check
679 * here and return -ENOSPC if necessary.
682 if (!ocfs2_la_state_enabled(osb))
685 free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) -
686 le32_to_cpu(alloc->id1.bitmap1.i_used);
687 if (bits_wanted > free_bits)
691 ac->ac_inode = local_alloc_inode;
692 /* We should never use localalloc from another slot */
693 ac->ac_alloc_slot = osb->slot_num;
694 ac->ac_which = OCFS2_AC_USE_LOCAL;
695 get_bh(osb->local_alloc_bh);
696 ac->ac_bh = osb->local_alloc_bh;
699 if (status < 0 && local_alloc_inode) {
700 inode_unlock(local_alloc_inode);
701 iput(local_alloc_inode);
704 trace_ocfs2_reserve_local_alloc_bits(
705 (unsigned long long)ac->ac_max_block,
706 bits_wanted, osb->slot_num, status);
713 int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
715 struct ocfs2_alloc_context *ac,
721 struct inode *local_alloc_inode;
723 struct ocfs2_dinode *alloc;
724 struct ocfs2_local_alloc *la;
726 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
728 local_alloc_inode = ac->ac_inode;
729 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
730 la = OCFS2_LOCAL_ALLOC(alloc);
732 start = ocfs2_local_alloc_find_clear_bits(osb, alloc, &bits_wanted,
735 /* TODO: Shouldn't we just BUG here? */
741 bitmap = la->la_bitmap;
742 *bit_off = le32_to_cpu(la->la_bm_off) + start;
743 *num_bits = bits_wanted;
745 status = ocfs2_journal_access_di(handle,
746 INODE_CACHE(local_alloc_inode),
748 OCFS2_JOURNAL_ACCESS_WRITE);
754 ocfs2_resmap_claimed_bits(&osb->osb_la_resmap, ac->ac_resv, start,
758 ocfs2_set_bit(start++, bitmap);
760 le32_add_cpu(&alloc->id1.bitmap1.i_used, *num_bits);
761 ocfs2_journal_dirty(handle, osb->local_alloc_bh);
769 int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
771 struct ocfs2_alloc_context *ac,
777 struct inode *local_alloc_inode;
779 struct ocfs2_dinode *alloc;
780 struct ocfs2_local_alloc *la;
782 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
784 local_alloc_inode = ac->ac_inode;
785 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
786 la = OCFS2_LOCAL_ALLOC(alloc);
788 bitmap = la->la_bitmap;
789 start = bit_off - le32_to_cpu(la->la_bm_off);
790 clear_bits = num_bits;
792 status = ocfs2_journal_access_di(handle,
793 INODE_CACHE(local_alloc_inode),
795 OCFS2_JOURNAL_ACCESS_WRITE);
802 ocfs2_clear_bit(start++, bitmap);
804 le32_add_cpu(&alloc->id1.bitmap1.i_used, -num_bits);
805 ocfs2_journal_dirty(handle, osb->local_alloc_bh);
811 static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
814 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
816 count = memweight(la->la_bitmap, le16_to_cpu(la->la_size));
818 trace_ocfs2_local_alloc_count_bits(count);
822 static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
823 struct ocfs2_dinode *alloc,
825 struct ocfs2_alloc_reservation *resv)
827 int numfound = 0, bitoff, left, startoff;
829 struct ocfs2_alloc_reservation r;
831 struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap;
833 if (!alloc->id1.bitmap1.i_total) {
840 ocfs2_resv_init_once(&r);
841 ocfs2_resv_set_type(&r, OCFS2_RESV_FLAG_TMP);
846 if (ocfs2_resmap_resv_bits(resmap, resv, &bitoff, &numfound) == 0) {
847 if (numfound < *numbits)
853 * Code error. While reservations are enabled, local
854 * allocation should _always_ go through them.
856 BUG_ON(osb->osb_resv_level != 0);
859 * Reservations are disabled. Handle this the old way.
862 bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap;
864 numfound = bitoff = startoff = 0;
865 left = le32_to_cpu(alloc->id1.bitmap1.i_total);
866 while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) {
867 if (bitoff == left) {
868 /* mlog(0, "bitoff (%d) == left", bitoff); */
871 /* mlog(0, "Found a zero: bitoff = %d, startoff = %d, "
872 "numfound = %d\n", bitoff, startoff, numfound);*/
874 /* Ok, we found a zero bit... is it contig. or do we
876 if (bitoff == startoff) {
877 /* we found a zero */
881 /* got a zero after some ones */
885 /* we got everything we needed */
886 if (numfound == *numbits) {
887 /* mlog(0, "Found it all!\n"); */
892 trace_ocfs2_local_alloc_find_clear_bits_search_bitmap(bitoff, numfound);
894 if (numfound == *numbits)
895 bitoff = startoff - numfound;
901 ocfs2_resv_discard(resmap, resv);
903 trace_ocfs2_local_alloc_find_clear_bits(*numbits,
904 le32_to_cpu(alloc->id1.bitmap1.i_total),
910 static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc)
912 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
915 alloc->id1.bitmap1.i_total = 0;
916 alloc->id1.bitmap1.i_used = 0;
918 for(i = 0; i < le16_to_cpu(la->la_size); i++)
919 la->la_bitmap[i] = 0;
923 /* turn this on and uncomment below to aid debugging window shifts. */
924 static void ocfs2_verify_zero_bits(unsigned long *bitmap,
928 unsigned int tmp = count;
930 if (ocfs2_test_bit(start + tmp, bitmap)) {
931 printk("ocfs2_verify_zero_bits: start = %u, count = "
932 "%u\n", start, count);
933 printk("ocfs2_verify_zero_bits: bit %u is set!",
942 * sync the local alloc to main bitmap.
944 * assumes you've already locked the main bitmap -- the bitmap inode
945 * passed is used for caching.
947 static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
949 struct ocfs2_dinode *alloc,
950 struct inode *main_bm_inode,
951 struct buffer_head *main_bm_bh)
954 int bit_off, left, count, start;
958 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
960 trace_ocfs2_sync_local_to_main(
961 le32_to_cpu(alloc->id1.bitmap1.i_total),
962 le32_to_cpu(alloc->id1.bitmap1.i_used));
964 if (!alloc->id1.bitmap1.i_total) {
968 if (le32_to_cpu(alloc->id1.bitmap1.i_used) ==
969 le32_to_cpu(alloc->id1.bitmap1.i_total)) {
973 la_start_blk = ocfs2_clusters_to_blocks(osb->sb,
974 le32_to_cpu(la->la_bm_off));
975 bitmap = la->la_bitmap;
977 left = le32_to_cpu(alloc->id1.bitmap1.i_total);
979 while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start))
981 if ((bit_off < left) && (bit_off == start)) {
987 blkno = la_start_blk +
988 ocfs2_clusters_to_blocks(osb->sb,
991 trace_ocfs2_sync_local_to_main_free(
992 count, start - count,
993 (unsigned long long)la_start_blk,
994 (unsigned long long)blkno);
996 status = ocfs2_release_clusters(handle,
1005 if (bit_off >= left)
1008 start = bit_off + 1;
1017 enum ocfs2_la_event {
1018 OCFS2_LA_EVENT_SLIDE, /* Normal window slide. */
1019 OCFS2_LA_EVENT_FRAGMENTED, /* The global bitmap has
1020 * enough bits theoretically
1021 * free, but a contiguous
1022 * allocation could not be
1024 OCFS2_LA_EVENT_ENOSPC, /* Global bitmap doesn't have
1025 * enough bits free to satisfy
1028 #define OCFS2_LA_ENABLE_INTERVAL (30 * HZ)
1030 * Given an event, calculate the size of our next local alloc window.
1032 * This should always be called under i_rwsem of the local alloc inode
1033 * so that local alloc disabling doesn't race with processes trying to
1034 * use the allocator.
1036 * Returns the state which the local alloc was left in. This value can
1037 * be ignored by some paths.
1039 static int ocfs2_recalc_la_window(struct ocfs2_super *osb,
1040 enum ocfs2_la_event event)
1045 spin_lock(&osb->osb_lock);
1046 if (osb->local_alloc_state == OCFS2_LA_DISABLED) {
1047 WARN_ON_ONCE(osb->local_alloc_state == OCFS2_LA_DISABLED);
1052 * ENOSPC and fragmentation are treated similarly for now.
1054 if (event == OCFS2_LA_EVENT_ENOSPC ||
1055 event == OCFS2_LA_EVENT_FRAGMENTED) {
1057 * We ran out of contiguous space in the primary
1058 * bitmap. Drastically reduce the number of bits used
1059 * by local alloc until we have to disable it.
1061 bits = osb->local_alloc_bits >> 1;
1062 if (bits > ocfs2_megabytes_to_clusters(osb->sb, 1)) {
1064 * By setting state to THROTTLED, we'll keep
1065 * the number of local alloc bits used down
1066 * until an event occurs which would give us
1067 * reason to assume the bitmap situation might
1070 osb->local_alloc_state = OCFS2_LA_THROTTLED;
1071 osb->local_alloc_bits = bits;
1073 osb->local_alloc_state = OCFS2_LA_DISABLED;
1075 queue_delayed_work(osb->ocfs2_wq, &osb->la_enable_wq,
1076 OCFS2_LA_ENABLE_INTERVAL);
1081 * Don't increase the size of the local alloc window until we
1082 * know we might be able to fulfill the request. Otherwise, we
1083 * risk bouncing around the global bitmap during periods of
1086 if (osb->local_alloc_state != OCFS2_LA_THROTTLED)
1087 osb->local_alloc_bits = osb->local_alloc_default_bits;
1090 state = osb->local_alloc_state;
1091 spin_unlock(&osb->osb_lock);
1096 static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
1097 struct ocfs2_alloc_context **ac,
1098 struct inode **bitmap_inode,
1099 struct buffer_head **bitmap_bh)
1103 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
1111 (*ac)->ac_bits_wanted = osb->local_alloc_bits;
1112 status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
1113 if (status == -ENOSPC) {
1114 if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) ==
1118 ocfs2_free_ac_resource(*ac);
1119 memset(*ac, 0, sizeof(struct ocfs2_alloc_context));
1127 *bitmap_inode = (*ac)->ac_inode;
1128 igrab(*bitmap_inode);
1129 *bitmap_bh = (*ac)->ac_bh;
1133 if ((status < 0) && *ac) {
1134 ocfs2_free_alloc_context(*ac);
1144 * pass it the bitmap lock in lock_bh if you have it.
1146 static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
1148 struct ocfs2_alloc_context *ac)
1151 u32 cluster_off, cluster_count;
1152 struct ocfs2_dinode *alloc = NULL;
1153 struct ocfs2_local_alloc *la;
1155 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
1156 la = OCFS2_LOCAL_ALLOC(alloc);
1158 trace_ocfs2_local_alloc_new_window(
1159 le32_to_cpu(alloc->id1.bitmap1.i_total),
1160 osb->local_alloc_bits);
1162 /* Instruct the allocation code to try the most recently used
1163 * cluster group. We'll re-record the group used this pass
1165 ac->ac_last_group = osb->la_last_gd;
1167 /* we used the generic suballoc reserve function, but we set
1168 * everything up nicely, so there's no reason why we can't use
1169 * the more specific cluster api to claim bits. */
1170 status = ocfs2_claim_clusters(handle, ac, osb->local_alloc_bits,
1171 &cluster_off, &cluster_count);
1172 if (status == -ENOSPC) {
1175 * Note: We could also try syncing the journal here to
1176 * allow use of any free bits which the current
1177 * transaction can't give us access to. --Mark
1179 if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_FRAGMENTED) ==
1183 ac->ac_bits_wanted = osb->local_alloc_bits;
1184 status = ocfs2_claim_clusters(handle, ac,
1185 osb->local_alloc_bits,
1188 if (status == -ENOSPC)
1191 * We only shrunk the *minimum* number of in our
1192 * request - it's entirely possible that the allocator
1193 * might give us more than we asked for.
1196 spin_lock(&osb->osb_lock);
1197 osb->local_alloc_bits = cluster_count;
1198 spin_unlock(&osb->osb_lock);
1202 if (status != -ENOSPC)
1207 osb->la_last_gd = ac->ac_last_group;
1209 la->la_bm_off = cpu_to_le32(cluster_off);
1210 alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count);
1211 /* just in case... In the future when we find space ourselves,
1212 * we don't have to get all contiguous -- but we'll have to
1213 * set all previously used bits in bitmap and update
1214 * la_bits_set before setting the bits in the main bitmap. */
1215 alloc->id1.bitmap1.i_used = 0;
1216 memset(OCFS2_LOCAL_ALLOC(alloc)->la_bitmap, 0,
1217 le16_to_cpu(la->la_size));
1219 ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count,
1220 OCFS2_LOCAL_ALLOC(alloc)->la_bitmap);
1222 trace_ocfs2_local_alloc_new_window_result(
1223 OCFS2_LOCAL_ALLOC(alloc)->la_bm_off,
1224 le32_to_cpu(alloc->id1.bitmap1.i_total));
1232 /* Note that we do *NOT* lock the local alloc inode here as
1233 * it's been locked already for us. */
1234 static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
1235 struct inode *local_alloc_inode)
1238 struct buffer_head *main_bm_bh = NULL;
1239 struct inode *main_bm_inode = NULL;
1240 handle_t *handle = NULL;
1241 struct ocfs2_dinode *alloc;
1242 struct ocfs2_dinode *alloc_copy = NULL;
1243 struct ocfs2_alloc_context *ac = NULL;
1245 ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE);
1247 /* This will lock the main bitmap for us. */
1248 status = ocfs2_local_alloc_reserve_for_window(osb,
1253 if (status != -ENOSPC)
1258 handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
1259 if (IS_ERR(handle)) {
1260 status = PTR_ERR(handle);
1266 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
1268 /* We want to clear the local alloc before doing anything
1269 * else, so that if we error later during this operation,
1270 * local alloc shutdown won't try to double free main bitmap
1271 * bits. Make a copy so the sync function knows which bits to
1273 alloc_copy = kmemdup(alloc, osb->local_alloc_bh->b_size, GFP_NOFS);
1280 status = ocfs2_journal_access_di(handle,
1281 INODE_CACHE(local_alloc_inode),
1282 osb->local_alloc_bh,
1283 OCFS2_JOURNAL_ACCESS_WRITE);
1289 ocfs2_clear_local_alloc(alloc);
1290 ocfs2_journal_dirty(handle, osb->local_alloc_bh);
1292 status = ocfs2_sync_local_to_main(osb, handle, alloc_copy,
1293 main_bm_inode, main_bm_bh);
1299 status = ocfs2_local_alloc_new_window(osb, handle, ac);
1301 if (status != -ENOSPC)
1306 atomic_inc(&osb->alloc_stats.moves);
1310 ocfs2_commit_trans(osb, handle);
1314 iput(main_bm_inode);
1318 ocfs2_free_alloc_context(ac);