fifo_push(&ca->free_inc, b - ca->buckets);
}
-static void invalidate_buckets_lru(struct cache *ca)
-{
- unsigned bucket_prio(struct bucket *b)
- {
- return ((unsigned) (b->prio - ca->set->min_prio)) *
- GC_SECTORS_USED(b);
- }
-
- bool bucket_max_cmp(struct bucket *l, struct bucket *r)
- {
- return bucket_prio(l) < bucket_prio(r);
- }
+#define bucket_prio(b) \
+ (((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b))
- bool bucket_min_cmp(struct bucket *l, struct bucket *r)
- {
- return bucket_prio(l) > bucket_prio(r);
- }
+#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
+#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
+static void invalidate_buckets_lru(struct cache *ca)
+{
struct bucket *b;
ssize_t i;
* we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
* flushing dirty data).
*
- * CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down the
- * allocation thread.
+ * CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down
+ * the allocation thread.
*/
#define CACHE_SET_UNREGISTERING 0
#define CACHE_SET_STOPPING 1
* searches - it told you where a key started. It's not used anymore,
* and can probably be safely dropped.
*/
-#define KEY(dev, sector, len) (struct bkey) \
-{ \
+#define KEY(dev, sector, len) \
+((struct bkey) { \
.high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev), \
.low = (sector) \
-}
+})
static inline void bkey_init(struct bkey *k)
{
#ifdef CONFIG_BCACHE_EDEBUG
bug:
mutex_unlock(&b->c->bucket_lock);
- btree_bug(b, "inconsistent pointer %s: bucket %zu pin %i "
- "prio %i gen %i last_gc %i mark %llu gc_gen %i", pkey(k),
- PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
+ btree_bug(b,
+"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+ pkey(k), PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
return true;
#endif
for (i = start; i <= b->nsets; i++)
keys += b->sets[i].data->keys;
- order = roundup_pow_of_two(__set_bytes(b->sets->data, keys)) / PAGE_SIZE;
+ order = roundup_pow_of_two(__set_bytes(b->sets->data,
+ keys)) / PAGE_SIZE;
if (order)
order = ilog2(order);
}
goto err_free;
if (!b) {
- cache_bug(c, "Tried to allocate bucket"
- " that was in btree cache");
+ cache_bug(c,
+ "Tried to allocate bucket that was in btree cache");
__bkey_put(c, &k.key);
goto retry;
}
if (memcmp(p1 + bv->bv_offset,
p2 + bv->bv_offset,
bv->bv_len))
- printk(KERN_ERR "bcache (%s): verify failed"
- " at sector %llu\n",
+ printk(KERN_ERR
+ "bcache (%s): verify failed at sector %llu\n",
bdevname(dc->bdev, name),
(uint64_t) s->orig_bio->bi_sector);
k = bkey_next(k), l = bkey_next(l))
if (bkey_cmp(k, l) ||
KEY_SIZE(k) != KEY_SIZE(l))
- pr_err("key %zi differs: %s "
- "!= %s", (uint64_t *) k - i->d,
+ pr_err("key %zi differs: %s != %s",
+ (uint64_t *) k - i->d,
pkey(k), pkey(l));
for (j = 0; j < 3; j++) {
BUG_ON(i->pin && atomic_read(i->pin) != 1);
if (n != i->j.seq)
- pr_err("journal entries %llu-%llu "
- "missing! (replaying %llu-%llu)\n",
- n, i->j.seq - 1, start, end);
+ pr_err(
+ "journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
+ n, i->j.seq - 1, start, end);
for (k = i->j.start;
k < end(&i->j);
bio_init(bio);
bio->bi_sector = bucket_to_sector(ca->set,
- ca->sb.d[ja->discard_idx]);
+ ca->sb.d[ja->discard_idx]);
bio->bi_bdev = ca->bdev;
bio->bi_rw = REQ_WRITE|REQ_DISCARD;
bio->bi_max_vecs = 1;
closure_return(cl);
}
+static bool bucket_cmp(struct bucket *l, struct bucket *r)
+{
+ return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
+}
+
+static unsigned bucket_heap_top(struct cache *ca)
+{
+ return GC_SECTORS_USED(heap_peek(&ca->heap));
+}
+
void bch_moving_gc(struct closure *cl)
{
struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
struct bucket *b;
unsigned i;
- bool bucket_cmp(struct bucket *l, struct bucket *r)
- {
- return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
- }
-
- unsigned top(struct cache *ca)
- {
- return GC_SECTORS_USED(heap_peek(&ca->heap));
- }
-
if (!c->copy_gc_enabled)
closure_return(cl);
sectors_to_move += GC_SECTORS_USED(b);
heap_add(&ca->heap, b, bucket_cmp);
} else if (bucket_cmp(b, heap_peek(&ca->heap))) {
- sectors_to_move -= top(ca);
+ sectors_to_move -= bucket_heap_top(ca);
sectors_to_move += GC_SECTORS_USED(b);
ca->heap.data[0] = b;
sectors_to_move -= GC_SECTORS_USED(b);
}
- ca->gc_move_threshold = top(ca);
+ ca->gc_move_threshold = bucket_heap_top(ca);
pr_debug("threshold %u", ca->gc_move_threshold);
}
t->sequential_io = 0;
}
-static void check_should_skip(struct cached_dev *dc, struct search *s)
+static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
{
- struct hlist_head *iohash(uint64_t k)
- { return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; }
+ return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
+}
+static void check_should_skip(struct cached_dev *dc, struct search *s)
+{
struct cache_set *c = s->op.c;
struct bio *bio = &s->bio.bio;
spin_lock(&dc->io_lock);
- hlist_for_each_entry(i, iohash(bio->bi_sector), hash)
+ hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
if (i->last == bio->bi_sector &&
time_before(jiffies, i->jiffies))
goto found;
s->task->sequential_io = i->sequential;
hlist_del(&i->hash);
- hlist_add_head(&i->hash, iohash(i->last));
+ hlist_add_head(&i->hash, iohash(dc, i->last));
list_move_tail(&i->lru, &dc->io_lru);
spin_unlock(&dc->io_lock);
static void scale_accounting(unsigned long data);
-void bch_cache_accounting_init(struct cache_accounting *acc, struct closure *parent)
+void bch_cache_accounting_init(struct cache_accounting *acc,
+ struct closure *parent)
{
kobject_init(&acc->total.kobj, &bch_stats_ktype);
kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
for (i = prio_buckets(ca) - 1; i >= 0; --i) {
long bucket;
struct prio_set *p = ca->disk_buckets;
- struct bucket_disk *d = p->data, *end = d + prios_per_bucket(ca);
+ struct bucket_disk *d = p->data;
+ struct bucket_disk *end = d + prios_per_bucket(ca);
for (b = ca->buckets + i * prios_per_bucket(ca);
b < ca->buckets + ca->sb.nbuckets && d < end;
if (dc->sb.block_size < c->sb.block_size) {
/* Will die */
- pr_err("Couldn't attach %s: block size "
- "less than set's block size", buf);
+ pr_err("Couldn't attach %s: block size less than set's block size",
+ buf);
return -EINVAL;
}