summaryrefslogtreecommitdiff
path: root/fs/bcachefs/movinggc.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-01-10 19:46:39 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:22 -0400
commit0678cbe2cbc586c0055de2c04602bf8136bcc3fc (patch)
treef58be61fb7a365ca74b2cba9e55317275862732c /fs/bcachefs/movinggc.c
parent3763cb9566a65966cd404cf3e0c5f218e5cf5d16 (diff)
bcachefs: Ignore cached data when calculating fragmentation
Previously, bucket fragmentation was considered to be bucket size - total amount of live data, both dirty and cached. This meant that if a bucket was full but only a small amount of data in it was dirty - the rest cached, we'd get stuck: copygc wouldn't move the dirty data out of the bucket and the allocator wouldn't be able to invalidate and drop the cached data. This changes fragmentation to exclude cached data, so that copygc will evacuate these buckets and copygc/the allocator will always be able to make forward progress. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs/bcachefs/movinggc.c')
-rw-r--r--fs/bcachefs/movinggc.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 4791e5099d93..64cb10c3f3db 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -69,10 +69,14 @@ static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
.dev = p.ptr.dev,
.offset = p.ptr.offset,
};
+ ssize_t i;
- ssize_t i = eytzinger0_find_le(h->data, h->used,
- sizeof(h->data[0]),
- bucket_offset_cmp, &search);
+ if (p.ptr.cached)
+ continue;
+
+ i = eytzinger0_find_le(h->data, h->used,
+ sizeof(h->data[0]),
+ bucket_offset_cmp, &search);
#if 0
/* eytzinger search verify code: */
ssize_t j = -1, k;
@@ -185,8 +189,7 @@ static int bch2_copygc(struct bch_fs *c)
if (m.owned_by_allocator ||
m.data_type != BCH_DATA_user ||
- !bucket_sectors_used(m) ||
- bucket_sectors_used(m) >= ca->mi.bucket_size)
+ m.dirty_sectors >= ca->mi.bucket_size)
continue;
WARN_ON(m.stripe && !g->stripe_redundancy);
@@ -195,9 +198,9 @@ static int bch2_copygc(struct bch_fs *c)
.dev = dev_idx,
.gen = m.gen,
.replicas = 1 + g->stripe_redundancy,
- .fragmentation = bucket_sectors_used(m) * (1U << 15)
+ .fragmentation = m.dirty_sectors * (1U << 15)
/ ca->mi.bucket_size,
- .sectors = bucket_sectors_used(m),
+ .sectors = m.dirty_sectors,
.offset = bucket_to_sector(ca, b),
};
heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
@@ -263,8 +266,8 @@ static int bch2_copygc(struct bch_fs *c)
m = READ_ONCE(buckets->b[b].mark);
if (i->gen == m.gen &&
- bucket_sectors_used(m)) {
- sectors_not_moved += bucket_sectors_used(m);
+ m.dirty_sectors) {
+ sectors_not_moved += m.dirty_sectors;
buckets_not_moved++;
}
}