fab60c1ba0650f1e9829190a8ca81f4c66c6494b
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / btrfs / delayed-ref.c
1 /*
2  * Copyright (C) 2009 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25
26 struct kmem_cache *btrfs_delayed_ref_head_cachep;
27 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
28 struct kmem_cache *btrfs_delayed_data_ref_cachep;
29 struct kmem_cache *btrfs_delayed_extent_op_cachep;
30 /*
31  * delayed back reference update tracking.  For subvolume trees
32  * we queue up extent allocations and backref maintenance for
33  * delayed processing.   This avoids deep call chains where we
34  * add extents in the middle of btrfs_search_slot, and it allows
35  * us to buffer up frequently modified backrefs in an rb tree instead
36  * of hammering updates on the extent allocation tree.
37  */
38
39 /*
40  * compare two delayed tree backrefs with same bytenr and type
41  */
42 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
43                           struct btrfs_delayed_tree_ref *ref1, int type)
44 {
45         if (type == BTRFS_TREE_BLOCK_REF_KEY) {
46                 if (ref1->root < ref2->root)
47                         return -1;
48                 if (ref1->root > ref2->root)
49                         return 1;
50         } else {
51                 if (ref1->parent < ref2->parent)
52                         return -1;
53                 if (ref1->parent > ref2->parent)
54                         return 1;
55         }
56         return 0;
57 }
58
59 /*
60  * compare two delayed data backrefs with same bytenr and type
61  */
62 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
63                           struct btrfs_delayed_data_ref *ref1)
64 {
65         if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
66                 if (ref1->root < ref2->root)
67                         return -1;
68                 if (ref1->root > ref2->root)
69                         return 1;
70                 if (ref1->objectid < ref2->objectid)
71                         return -1;
72                 if (ref1->objectid > ref2->objectid)
73                         return 1;
74                 if (ref1->offset < ref2->offset)
75                         return -1;
76                 if (ref1->offset > ref2->offset)
77                         return 1;
78         } else {
79                 if (ref1->parent < ref2->parent)
80                         return -1;
81                 if (ref1->parent > ref2->parent)
82                         return 1;
83         }
84         return 0;
85 }
86
87 /*
88  * entries in the rb tree are ordered by the byte number of the extent,
89  * type of the delayed backrefs and content of delayed backrefs.
90  */
91 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
92                       struct btrfs_delayed_ref_node *ref1,
93                       bool compare_seq)
94 {
95         if (ref1->bytenr < ref2->bytenr)
96                 return -1;
97         if (ref1->bytenr > ref2->bytenr)
98                 return 1;
99         if (ref1->is_head && ref2->is_head)
100                 return 0;
101         if (ref2->is_head)
102                 return -1;
103         if (ref1->is_head)
104                 return 1;
105         if (ref1->type < ref2->type)
106                 return -1;
107         if (ref1->type > ref2->type)
108                 return 1;
109         /* merging of sequenced refs is not allowed */
110         if (compare_seq) {
111                 if (ref1->seq < ref2->seq)
112                         return -1;
113                 if (ref1->seq > ref2->seq)
114                         return 1;
115         }
116         if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
117             ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
118                 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
119                                       btrfs_delayed_node_to_tree_ref(ref1),
120                                       ref1->type);
121         } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
122                    ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
123                 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
124                                       btrfs_delayed_node_to_data_ref(ref1));
125         }
126         BUG();
127         return 0;
128 }
129
130 /*
131  * insert a new ref into the rbtree.  This returns any existing refs
132  * for the same (bytenr,parent) tuple, or NULL if the new node was properly
133  * inserted.
134  */
135 static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
136                                                   struct rb_node *node)
137 {
138         struct rb_node **p = &root->rb_node;
139         struct rb_node *parent_node = NULL;
140         struct btrfs_delayed_ref_node *entry;
141         struct btrfs_delayed_ref_node *ins;
142         int cmp;
143
144         ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
145         while (*p) {
146                 parent_node = *p;
147                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
148                                  rb_node);
149
150                 cmp = comp_entry(entry, ins, 1);
151                 if (cmp < 0)
152                         p = &(*p)->rb_left;
153                 else if (cmp > 0)
154                         p = &(*p)->rb_right;
155                 else
156                         return entry;
157         }
158
159         rb_link_node(node, parent_node, p);
160         rb_insert_color(node, root);
161         return NULL;
162 }
163
164 /* insert a new ref to head ref rbtree */
165 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
166                                                    struct rb_node *node)
167 {
168         struct rb_node **p = &root->rb_node;
169         struct rb_node *parent_node = NULL;
170         struct btrfs_delayed_ref_head *entry;
171         struct btrfs_delayed_ref_head *ins;
172         u64 bytenr;
173
174         ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
175         bytenr = ins->node.bytenr;
176         while (*p) {
177                 parent_node = *p;
178                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
179                                  href_node);
180
181                 if (bytenr < entry->node.bytenr)
182                         p = &(*p)->rb_left;
183                 else if (bytenr > entry->node.bytenr)
184                         p = &(*p)->rb_right;
185                 else
186                         return entry;
187         }
188
189         rb_link_node(node, parent_node, p);
190         rb_insert_color(node, root);
191         return NULL;
192 }
193
194 /*
195  * find an head entry based on bytenr. This returns the delayed ref
196  * head if it was able to find one, or NULL if nothing was in that spot.
197  * If return_bigger is given, the next bigger entry is returned if no exact
198  * match is found.
199  */
200 static struct btrfs_delayed_ref_head *
201 find_ref_head(struct rb_root *root, u64 bytenr,
202               struct btrfs_delayed_ref_head **last, int return_bigger)
203 {
204         struct rb_node *n;
205         struct btrfs_delayed_ref_head *entry;
206         int cmp = 0;
207
208 again:
209         n = root->rb_node;
210         entry = NULL;
211         while (n) {
212                 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
213                 if (last)
214                         *last = entry;
215
216                 if (bytenr < entry->node.bytenr)
217                         cmp = -1;
218                 else if (bytenr > entry->node.bytenr)
219                         cmp = 1;
220                 else
221                         cmp = 0;
222
223                 if (cmp < 0)
224                         n = n->rb_left;
225                 else if (cmp > 0)
226                         n = n->rb_right;
227                 else
228                         return entry;
229         }
230         if (entry && return_bigger) {
231                 if (cmp > 0) {
232                         n = rb_next(&entry->href_node);
233                         if (!n)
234                                 n = rb_first(root);
235                         entry = rb_entry(n, struct btrfs_delayed_ref_head,
236                                          href_node);
237                         bytenr = entry->node.bytenr;
238                         return_bigger = 0;
239                         goto again;
240                 }
241                 return entry;
242         }
243         return NULL;
244 }
245
246 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
247                            struct btrfs_delayed_ref_head *head)
248 {
249         struct btrfs_delayed_ref_root *delayed_refs;
250
251         delayed_refs = &trans->transaction->delayed_refs;
252         assert_spin_locked(&delayed_refs->lock);
253         if (mutex_trylock(&head->mutex))
254                 return 0;
255
256         atomic_inc(&head->node.refs);
257         spin_unlock(&delayed_refs->lock);
258
259         mutex_lock(&head->mutex);
260         spin_lock(&delayed_refs->lock);
261         if (!head->node.in_tree) {
262                 mutex_unlock(&head->mutex);
263                 btrfs_put_delayed_ref(&head->node);
264                 return -EAGAIN;
265         }
266         btrfs_put_delayed_ref(&head->node);
267         return 0;
268 }
269
270 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
271                                     struct btrfs_delayed_ref_root *delayed_refs,
272                                     struct btrfs_delayed_ref_node *ref)
273 {
274         rb_erase(&ref->rb_node, &delayed_refs->root);
275         if (btrfs_delayed_ref_is_head(ref)) {
276                 struct btrfs_delayed_ref_head *head;
277
278                 head = btrfs_delayed_node_to_head(ref);
279                 rb_erase(&head->href_node, &delayed_refs->href_root);
280         }
281         ref->in_tree = 0;
282         btrfs_put_delayed_ref(ref);
283         delayed_refs->num_entries--;
284         if (trans->delayed_ref_updates)
285                 trans->delayed_ref_updates--;
286 }
287
288 static int merge_ref(struct btrfs_trans_handle *trans,
289                      struct btrfs_delayed_ref_root *delayed_refs,
290                      struct btrfs_delayed_ref_node *ref, u64 seq)
291 {
292         struct rb_node *node;
293         int merged = 0;
294         int mod = 0;
295         int done = 0;
296
297         node = rb_prev(&ref->rb_node);
298         while (node) {
299                 struct btrfs_delayed_ref_node *next;
300
301                 next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
302                 node = rb_prev(node);
303                 if (next->bytenr != ref->bytenr)
304                         break;
305                 if (seq && next->seq >= seq)
306                         break;
307                 if (comp_entry(ref, next, 0))
308                         continue;
309
310                 if (ref->action == next->action) {
311                         mod = next->ref_mod;
312                 } else {
313                         if (ref->ref_mod < next->ref_mod) {
314                                 struct btrfs_delayed_ref_node *tmp;
315
316                                 tmp = ref;
317                                 ref = next;
318                                 next = tmp;
319                                 done = 1;
320                         }
321                         mod = -next->ref_mod;
322                 }
323
324                 merged++;
325                 drop_delayed_ref(trans, delayed_refs, next);
326                 ref->ref_mod += mod;
327                 if (ref->ref_mod == 0) {
328                         drop_delayed_ref(trans, delayed_refs, ref);
329                         break;
330                 } else {
331                         /*
332                          * You can't have multiples of the same ref on a tree
333                          * block.
334                          */
335                         WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
336                                 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
337                 }
338
339                 if (done)
340                         break;
341                 node = rb_prev(&ref->rb_node);
342         }
343
344         return merged;
345 }
346
347 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
348                               struct btrfs_fs_info *fs_info,
349                               struct btrfs_delayed_ref_root *delayed_refs,
350                               struct btrfs_delayed_ref_head *head)
351 {
352         struct rb_node *node;
353         u64 seq = 0;
354
355         /*
356          * We don't have too much refs to merge in the case of delayed data
357          * refs.
358          */
359         if (head->is_data)
360                 return;
361
362         spin_lock(&fs_info->tree_mod_seq_lock);
363         if (!list_empty(&fs_info->tree_mod_seq_list)) {
364                 struct seq_list *elem;
365
366                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
367                                         struct seq_list, list);
368                 seq = elem->seq;
369         }
370         spin_unlock(&fs_info->tree_mod_seq_lock);
371
372         node = rb_prev(&head->node.rb_node);
373         while (node) {
374                 struct btrfs_delayed_ref_node *ref;
375
376                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
377                                rb_node);
378                 if (ref->bytenr != head->node.bytenr)
379                         break;
380
381                 /* We can't merge refs that are outside of our seq count */
382                 if (seq && ref->seq >= seq)
383                         break;
384                 if (merge_ref(trans, delayed_refs, ref, seq))
385                         node = rb_prev(&head->node.rb_node);
386                 else
387                         node = rb_prev(node);
388         }
389 }
390
391 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
392                             struct btrfs_delayed_ref_root *delayed_refs,
393                             u64 seq)
394 {
395         struct seq_list *elem;
396         int ret = 0;
397
398         spin_lock(&fs_info->tree_mod_seq_lock);
399         if (!list_empty(&fs_info->tree_mod_seq_list)) {
400                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
401                                         struct seq_list, list);
402                 if (seq >= elem->seq) {
403                         pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
404                                  (u32)(seq >> 32), (u32)seq,
405                                  (u32)(elem->seq >> 32), (u32)elem->seq,
406                                  delayed_refs);
407                         ret = 1;
408                 }
409         }
410
411         spin_unlock(&fs_info->tree_mod_seq_lock);
412         return ret;
413 }
414
415 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
416                            struct list_head *cluster, u64 start)
417 {
418         int count = 0;
419         struct btrfs_delayed_ref_root *delayed_refs;
420         struct rb_node *node;
421         struct btrfs_delayed_ref_head *head = NULL;
422
423         delayed_refs = &trans->transaction->delayed_refs;
424         node = rb_first(&delayed_refs->href_root);
425
426         if (start) {
427                 find_ref_head(&delayed_refs->href_root, start + 1, &head, 1);
428                 if (head)
429                         node = &head->href_node;
430         }
431 again:
432         while (node && count < 32) {
433                 head = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
434                 if (list_empty(&head->cluster)) {
435                         list_add_tail(&head->cluster, cluster);
436                         delayed_refs->run_delayed_start =
437                                 head->node.bytenr;
438                         count++;
439
440                         WARN_ON(delayed_refs->num_heads_ready == 0);
441                         delayed_refs->num_heads_ready--;
442                 } else if (count) {
443                         /* the goal of the clustering is to find extents
444                          * that are likely to end up in the same extent
445                          * leaf on disk.  So, we don't want them spread
446                          * all over the tree.  Stop now if we've hit
447                          * a head that was already in use
448                          */
449                         break;
450                 }
451                 node = rb_next(node);
452         }
453         if (count) {
454                 return 0;
455         } else if (start) {
456                 /*
457                  * we've gone to the end of the rbtree without finding any
458                  * clusters.  start from the beginning and try again
459                  */
460                 start = 0;
461                 node = rb_first(&delayed_refs->href_root);
462                 goto again;
463         }
464         return 1;
465 }
466
467 void btrfs_release_ref_cluster(struct list_head *cluster)
468 {
469         struct list_head *pos, *q;
470
471         list_for_each_safe(pos, q, cluster)
472                 list_del_init(pos);
473 }
474
475 /*
476  * helper function to update an extent delayed ref in the
477  * rbtree.  existing and update must both have the same
478  * bytenr and parent
479  *
480  * This may free existing if the update cancels out whatever
481  * operation it was doing.
482  */
483 static noinline void
484 update_existing_ref(struct btrfs_trans_handle *trans,
485                     struct btrfs_delayed_ref_root *delayed_refs,
486                     struct btrfs_delayed_ref_node *existing,
487                     struct btrfs_delayed_ref_node *update)
488 {
489         if (update->action != existing->action) {
490                 /*
491                  * this is effectively undoing either an add or a
492                  * drop.  We decrement the ref_mod, and if it goes
493                  * down to zero we just delete the entry without
494                  * every changing the extent allocation tree.
495                  */
496                 existing->ref_mod--;
497                 if (existing->ref_mod == 0)
498                         drop_delayed_ref(trans, delayed_refs, existing);
499                 else
500                         WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
501                                 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
502         } else {
503                 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
504                         existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
505                 /*
506                  * the action on the existing ref matches
507                  * the action on the ref we're trying to add.
508                  * Bump the ref_mod by one so the backref that
509                  * is eventually added/removed has the correct
510                  * reference count
511                  */
512                 existing->ref_mod += update->ref_mod;
513         }
514 }
515
516 /*
517  * helper function to update the accounting in the head ref
518  * existing and update must have the same bytenr
519  */
520 static noinline void
521 update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
522                          struct btrfs_delayed_ref_node *update)
523 {
524         struct btrfs_delayed_ref_head *existing_ref;
525         struct btrfs_delayed_ref_head *ref;
526
527         existing_ref = btrfs_delayed_node_to_head(existing);
528         ref = btrfs_delayed_node_to_head(update);
529         BUG_ON(existing_ref->is_data != ref->is_data);
530
531         if (ref->must_insert_reserved) {
532                 /* if the extent was freed and then
533                  * reallocated before the delayed ref
534                  * entries were processed, we can end up
535                  * with an existing head ref without
536                  * the must_insert_reserved flag set.
537                  * Set it again here
538                  */
539                 existing_ref->must_insert_reserved = ref->must_insert_reserved;
540
541                 /*
542                  * update the num_bytes so we make sure the accounting
543                  * is done correctly
544                  */
545                 existing->num_bytes = update->num_bytes;
546
547         }
548
549         if (ref->extent_op) {
550                 if (!existing_ref->extent_op) {
551                         existing_ref->extent_op = ref->extent_op;
552                 } else {
553                         if (ref->extent_op->update_key) {
554                                 memcpy(&existing_ref->extent_op->key,
555                                        &ref->extent_op->key,
556                                        sizeof(ref->extent_op->key));
557                                 existing_ref->extent_op->update_key = 1;
558                         }
559                         if (ref->extent_op->update_flags) {
560                                 existing_ref->extent_op->flags_to_set |=
561                                         ref->extent_op->flags_to_set;
562                                 existing_ref->extent_op->update_flags = 1;
563                         }
564                         btrfs_free_delayed_extent_op(ref->extent_op);
565                 }
566         }
567         /*
568          * update the reference mod on the head to reflect this new operation
569          */
570         existing->ref_mod += update->ref_mod;
571 }
572
573 /*
574  * helper function to actually insert a head node into the rbtree.
575  * this does all the dirty work in terms of maintaining the correct
576  * overall modification count.
577  */
578 static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
579                                         struct btrfs_trans_handle *trans,
580                                         struct btrfs_delayed_ref_node *ref,
581                                         u64 bytenr, u64 num_bytes,
582                                         int action, int is_data)
583 {
584         struct btrfs_delayed_ref_node *existing;
585         struct btrfs_delayed_ref_head *head_ref = NULL;
586         struct btrfs_delayed_ref_root *delayed_refs;
587         int count_mod = 1;
588         int must_insert_reserved = 0;
589
590         /*
591          * the head node stores the sum of all the mods, so dropping a ref
592          * should drop the sum in the head node by one.
593          */
594         if (action == BTRFS_UPDATE_DELAYED_HEAD)
595                 count_mod = 0;
596         else if (action == BTRFS_DROP_DELAYED_REF)
597                 count_mod = -1;
598
599         /*
600          * BTRFS_ADD_DELAYED_EXTENT means that we need to update
601          * the reserved accounting when the extent is finally added, or
602          * if a later modification deletes the delayed ref without ever
603          * inserting the extent into the extent allocation tree.
604          * ref->must_insert_reserved is the flag used to record
605          * that accounting mods are required.
606          *
607          * Once we record must_insert_reserved, switch the action to
608          * BTRFS_ADD_DELAYED_REF because other special casing is not required.
609          */
610         if (action == BTRFS_ADD_DELAYED_EXTENT)
611                 must_insert_reserved = 1;
612         else
613                 must_insert_reserved = 0;
614
615         delayed_refs = &trans->transaction->delayed_refs;
616
617         /* first set the basic ref node struct up */
618         atomic_set(&ref->refs, 1);
619         ref->bytenr = bytenr;
620         ref->num_bytes = num_bytes;
621         ref->ref_mod = count_mod;
622         ref->type  = 0;
623         ref->action  = 0;
624         ref->is_head = 1;
625         ref->in_tree = 1;
626         ref->seq = 0;
627
628         head_ref = btrfs_delayed_node_to_head(ref);
629         head_ref->must_insert_reserved = must_insert_reserved;
630         head_ref->is_data = is_data;
631
632         INIT_LIST_HEAD(&head_ref->cluster);
633         mutex_init(&head_ref->mutex);
634
635         trace_add_delayed_ref_head(ref, head_ref, action);
636
637         existing = tree_insert(&delayed_refs->root, &ref->rb_node);
638
639         if (existing) {
640                 update_existing_head_ref(existing, ref);
641                 /*
642                  * we've updated the existing ref, free the newly
643                  * allocated ref
644                  */
645                 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
646         } else {
647                 htree_insert(&delayed_refs->href_root, &head_ref->href_node);
648                 delayed_refs->num_heads++;
649                 delayed_refs->num_heads_ready++;
650                 delayed_refs->num_entries++;
651                 trans->delayed_ref_updates++;
652         }
653 }
654
655 /*
656  * helper to insert a delayed tree ref into the rbtree.
657  */
658 static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
659                                          struct btrfs_trans_handle *trans,
660                                          struct btrfs_delayed_ref_node *ref,
661                                          u64 bytenr, u64 num_bytes, u64 parent,
662                                          u64 ref_root, int level, int action,
663                                          int for_cow)
664 {
665         struct btrfs_delayed_ref_node *existing;
666         struct btrfs_delayed_tree_ref *full_ref;
667         struct btrfs_delayed_ref_root *delayed_refs;
668         u64 seq = 0;
669
670         if (action == BTRFS_ADD_DELAYED_EXTENT)
671                 action = BTRFS_ADD_DELAYED_REF;
672
673         delayed_refs = &trans->transaction->delayed_refs;
674
675         /* first set the basic ref node struct up */
676         atomic_set(&ref->refs, 1);
677         ref->bytenr = bytenr;
678         ref->num_bytes = num_bytes;
679         ref->ref_mod = 1;
680         ref->action = action;
681         ref->is_head = 0;
682         ref->in_tree = 1;
683
684         if (need_ref_seq(for_cow, ref_root))
685                 seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
686         ref->seq = seq;
687
688         full_ref = btrfs_delayed_node_to_tree_ref(ref);
689         full_ref->parent = parent;
690         full_ref->root = ref_root;
691         if (parent)
692                 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
693         else
694                 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
695         full_ref->level = level;
696
697         trace_add_delayed_tree_ref(ref, full_ref, action);
698
699         existing = tree_insert(&delayed_refs->root, &ref->rb_node);
700
701         if (existing) {
702                 update_existing_ref(trans, delayed_refs, existing, ref);
703                 /*
704                  * we've updated the existing ref, free the newly
705                  * allocated ref
706                  */
707                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
708         } else {
709                 delayed_refs->num_entries++;
710                 trans->delayed_ref_updates++;
711         }
712 }
713
714 /*
715  * helper to insert a delayed data ref into the rbtree.
716  */
717 static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
718                                          struct btrfs_trans_handle *trans,
719                                          struct btrfs_delayed_ref_node *ref,
720                                          u64 bytenr, u64 num_bytes, u64 parent,
721                                          u64 ref_root, u64 owner, u64 offset,
722                                          int action, int for_cow)
723 {
724         struct btrfs_delayed_ref_node *existing;
725         struct btrfs_delayed_data_ref *full_ref;
726         struct btrfs_delayed_ref_root *delayed_refs;
727         u64 seq = 0;
728
729         if (action == BTRFS_ADD_DELAYED_EXTENT)
730                 action = BTRFS_ADD_DELAYED_REF;
731
732         delayed_refs = &trans->transaction->delayed_refs;
733
734         /* first set the basic ref node struct up */
735         atomic_set(&ref->refs, 1);
736         ref->bytenr = bytenr;
737         ref->num_bytes = num_bytes;
738         ref->ref_mod = 1;
739         ref->action = action;
740         ref->is_head = 0;
741         ref->in_tree = 1;
742
743         if (need_ref_seq(for_cow, ref_root))
744                 seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
745         ref->seq = seq;
746
747         full_ref = btrfs_delayed_node_to_data_ref(ref);
748         full_ref->parent = parent;
749         full_ref->root = ref_root;
750         if (parent)
751                 ref->type = BTRFS_SHARED_DATA_REF_KEY;
752         else
753                 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
754
755         full_ref->objectid = owner;
756         full_ref->offset = offset;
757
758         trace_add_delayed_data_ref(ref, full_ref, action);
759
760         existing = tree_insert(&delayed_refs->root, &ref->rb_node);
761
762         if (existing) {
763                 update_existing_ref(trans, delayed_refs, existing, ref);
764                 /*
765                  * we've updated the existing ref, free the newly
766                  * allocated ref
767                  */
768                 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
769         } else {
770                 delayed_refs->num_entries++;
771                 trans->delayed_ref_updates++;
772         }
773 }
774
775 /*
776  * add a delayed tree ref.  This does all of the accounting required
777  * to make sure the delayed ref is eventually processed before this
778  * transaction commits.
779  */
780 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
781                                struct btrfs_trans_handle *trans,
782                                u64 bytenr, u64 num_bytes, u64 parent,
783                                u64 ref_root,  int level, int action,
784                                struct btrfs_delayed_extent_op *extent_op,
785                                int for_cow)
786 {
787         struct btrfs_delayed_tree_ref *ref;
788         struct btrfs_delayed_ref_head *head_ref;
789         struct btrfs_delayed_ref_root *delayed_refs;
790
791         BUG_ON(extent_op && extent_op->is_data);
792         ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
793         if (!ref)
794                 return -ENOMEM;
795
796         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
797         if (!head_ref) {
798                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
799                 return -ENOMEM;
800         }
801
802         head_ref->extent_op = extent_op;
803
804         delayed_refs = &trans->transaction->delayed_refs;
805         spin_lock(&delayed_refs->lock);
806
807         /*
808          * insert both the head node and the new ref without dropping
809          * the spin lock
810          */
811         add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
812                                    num_bytes, action, 0);
813
814         add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
815                                    num_bytes, parent, ref_root, level, action,
816                                    for_cow);
817         spin_unlock(&delayed_refs->lock);
818         if (need_ref_seq(for_cow, ref_root))
819                 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
820
821         return 0;
822 }
823
824 /*
825  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
826  */
827 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
828                                struct btrfs_trans_handle *trans,
829                                u64 bytenr, u64 num_bytes,
830                                u64 parent, u64 ref_root,
831                                u64 owner, u64 offset, int action,
832                                struct btrfs_delayed_extent_op *extent_op,
833                                int for_cow)
834 {
835         struct btrfs_delayed_data_ref *ref;
836         struct btrfs_delayed_ref_head *head_ref;
837         struct btrfs_delayed_ref_root *delayed_refs;
838
839         BUG_ON(extent_op && !extent_op->is_data);
840         ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
841         if (!ref)
842                 return -ENOMEM;
843
844         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
845         if (!head_ref) {
846                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
847                 return -ENOMEM;
848         }
849
850         head_ref->extent_op = extent_op;
851
852         delayed_refs = &trans->transaction->delayed_refs;
853         spin_lock(&delayed_refs->lock);
854
855         /*
856          * insert both the head node and the new ref without dropping
857          * the spin lock
858          */
859         add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
860                                    num_bytes, action, 1);
861
862         add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
863                                    num_bytes, parent, ref_root, owner, offset,
864                                    action, for_cow);
865         spin_unlock(&delayed_refs->lock);
866         if (need_ref_seq(for_cow, ref_root))
867                 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
868
869         return 0;
870 }
871
872 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
873                                 struct btrfs_trans_handle *trans,
874                                 u64 bytenr, u64 num_bytes,
875                                 struct btrfs_delayed_extent_op *extent_op)
876 {
877         struct btrfs_delayed_ref_head *head_ref;
878         struct btrfs_delayed_ref_root *delayed_refs;
879
880         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
881         if (!head_ref)
882                 return -ENOMEM;
883
884         head_ref->extent_op = extent_op;
885
886         delayed_refs = &trans->transaction->delayed_refs;
887         spin_lock(&delayed_refs->lock);
888
889         add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
890                                    num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
891                                    extent_op->is_data);
892
893         spin_unlock(&delayed_refs->lock);
894         return 0;
895 }
896
897 /*
898  * this does a simple search for the head node for a given extent.
899  * It must be called with the delayed ref spinlock held, and it returns
900  * the head node if any where found, or NULL if not.
901  */
902 struct btrfs_delayed_ref_head *
903 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
904 {
905         struct btrfs_delayed_ref_root *delayed_refs;
906
907         delayed_refs = &trans->transaction->delayed_refs;
908         return find_ref_head(&delayed_refs->href_root, bytenr, NULL, 0);
909 }
910
911 void btrfs_delayed_ref_exit(void)
912 {
913         if (btrfs_delayed_ref_head_cachep)
914                 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
915         if (btrfs_delayed_tree_ref_cachep)
916                 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
917         if (btrfs_delayed_data_ref_cachep)
918                 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
919         if (btrfs_delayed_extent_op_cachep)
920                 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
921 }
922
923 int btrfs_delayed_ref_init(void)
924 {
925         btrfs_delayed_ref_head_cachep = kmem_cache_create(
926                                 "btrfs_delayed_ref_head",
927                                 sizeof(struct btrfs_delayed_ref_head), 0,
928                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
929         if (!btrfs_delayed_ref_head_cachep)
930                 goto fail;
931
932         btrfs_delayed_tree_ref_cachep = kmem_cache_create(
933                                 "btrfs_delayed_tree_ref",
934                                 sizeof(struct btrfs_delayed_tree_ref), 0,
935                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
936         if (!btrfs_delayed_tree_ref_cachep)
937                 goto fail;
938
939         btrfs_delayed_data_ref_cachep = kmem_cache_create(
940                                 "btrfs_delayed_data_ref",
941                                 sizeof(struct btrfs_delayed_data_ref), 0,
942                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
943         if (!btrfs_delayed_data_ref_cachep)
944                 goto fail;
945
946         btrfs_delayed_extent_op_cachep = kmem_cache_create(
947                                 "btrfs_delayed_extent_op",
948                                 sizeof(struct btrfs_delayed_extent_op), 0,
949                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
950         if (!btrfs_delayed_extent_op_cachep)
951                 goto fail;
952
953         return 0;
954 fail:
955         btrfs_delayed_ref_exit();
956         return -ENOMEM;
957 }