summaryrefslogtreecommitdiff
path: root/fs/bcachefs/movinggc.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-01-09 20:48:31 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:29 -0400
commitf25d8215f499418c17dfde0b3158a66e03c758dc (patch)
tree2239ac974b58f41e6687a840c21cf2156795fded /fs/bcachefs/movinggc.c
parentc6b2826cd14c5421bc50a768e923d078a71139c1 (diff)
bcachefs: Kill allocator threads & freelists
Now that we have new persistent data structures for the allocator, this patch converts the allocator to use them. Now, foreground bucket allocation uses the freespace btree to find buckets to allocate, instead of popping buckets off the freelist. The background allocator threads are no longer needed and are deleted, as well as the allocator freelists. Now we only need background tasks for invalidating buckets containing cached data (when we are low on empty buckets), and for issuing discards. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/movinggc.c')
-rw-r--r--fs/bcachefs/movinggc.c23
1 files changed, 5 insertions, 18 deletions
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index aecec55eb421..b9e1bd7b1d05 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -104,18 +104,6 @@ static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
return DATA_SKIP;
}
-static bool have_copygc_reserve(struct bch_dev *ca)
-{
- bool ret;
-
- spin_lock(&ca->fs->freelist_lock);
- ret = fifo_full(&ca->free[RESERVE_movinggc]) ||
- ca->allocator_state != ALLOCATOR_running;
- spin_unlock(&ca->fs->freelist_lock);
-
- return ret;
-}
-
static inline int fragmentation_cmp(copygc_heap *heap,
struct copygc_heap_entry l,
struct copygc_heap_entry r)
@@ -247,11 +235,10 @@ static int bch2_copygc(struct bch_fs *c)
}
for_each_rw_member(ca, c, dev_idx) {
- closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
+ s64 avail = min(dev_buckets_available(ca, RESERVE_movinggc),
+ ca->mi.nbuckets >> 6);
- spin_lock(&ca->fs->freelist_lock);
- sectors_reserved += fifo_used(&ca->free[RESERVE_movinggc]) * ca->mi.bucket_size;
- spin_unlock(&ca->fs->freelist_lock);
+ sectors_reserved += avail * ca->mi.bucket_size;
}
ret = walk_buckets_to_copygc(c);
@@ -352,8 +339,8 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
for_each_rw_member(ca, c, dev_idx) {
struct bch_dev_usage usage = bch2_dev_usage_read(ca);
- fragmented_allowed = ((__dev_buckets_reclaimable(ca, usage) *
- ca->mi.bucket_size) >> 1);
+ fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_none) *
+ ca->mi.bucket_size) >> 1);
fragmented = usage.d[BCH_DATA_user].fragmented;
wait = min(wait, max(0LL, fragmented_allowed - fragmented));