atomic_t detaching;
atomic_long_t sectors_dirty;
- unsigned long sectors_dirty_gc;
unsigned long sectors_dirty_last;
long sectors_dirty_derivative;
struct cache_set *bch_cache_set_alloc(struct cache_sb *);
void bch_btree_cache_free(struct cache_set *);
int bch_btree_cache_alloc(struct cache_set *);
+void bch_sectors_dirty_init(struct cached_dev *);
void bch_cached_dev_writeback_init(struct cached_dev *);
void bch_moving_init_cache_set(struct cache_set *);
gc->nkeys++;
gc->data += KEY_SIZE(k);
- if (KEY_DIRTY(k)) {
+ if (KEY_DIRTY(k))
gc->dirty += KEY_SIZE(k);
- if (d)
- d->sectors_dirty_gc += KEY_SIZE(k);
- }
}
for (t = b->sets; t <= &b->sets[b->nsets]; t++)
{
struct cache *ca;
struct bucket *b;
- struct bcache_device **d;
unsigned i;
if (!c->gc_mark_valid)
SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
}
- for (d = c->devices;
- d < c->devices + c->nr_uuids;
- d++)
- if (*d)
- (*d)->sectors_dirty_gc = 0;
-
mutex_unlock(&c->bucket_lock);
}
size_t available = 0;
struct bucket *b;
struct cache *ca;
- struct bcache_device **d;
unsigned i;
mutex_lock(&c->bucket_lock);
}
}
- for (d = c->devices;
- d < c->devices + c->nr_uuids;
- d++)
- if (*d) {
- unsigned long last =
- atomic_long_read(&((*d)->sectors_dirty));
- long difference = (*d)->sectors_dirty_gc - last;
-
- pr_debug("sectors dirty off by %li", difference);
-
- (*d)->sectors_dirty_last += difference;
-
- atomic_long_set(&((*d)->sectors_dirty),
- (*d)->sectors_dirty_gc);
- }
-
mutex_unlock(&c->bucket_lock);
return available;
}
atomic_set(&dc->count, 1);
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+ bch_sectors_dirty_init(dc);
atomic_set(&dc->has_dirty, 1);
atomic_inc(&dc->count);
bch_writeback_queue(dc);
refill_dirty(cl);
}
+/* Init */
+
+static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op,
+ struct cached_dev *dc)
+{
+ struct bkey *k;
+ struct btree_iter iter;
+
+ bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
+ while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
+ if (!b->level) {
+ if (KEY_INODE(k) > dc->disk.id)
+ break;
+
+ if (KEY_DIRTY(k))
+ atomic_long_add(KEY_SIZE(k),
+ &dc->disk.sectors_dirty);
+ } else {
+ btree(sectors_dirty_init, k, b, op, dc);
+ if (KEY_INODE(k) > dc->disk.id)
+ break;
+
+ cond_resched();
+ }
+
+ return 0;
+}
+
+void bch_sectors_dirty_init(struct cached_dev *dc)
+{
+ struct btree_op op;
+
+ bch_btree_op_init_stack(&op);
+ btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
+}
+
void bch_cached_dev_writeback_init(struct cached_dev *dc)
{
closure_init_unlocked(&dc->writeback);