summaryrefslogtreecommitdiff
path: root/fs/bcachefs/movinggc.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-12-31 20:03:29 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:29 -0400
commit3d48a7f85f83a51a0eb0d0a6537be26a20691260 (patch)
tree20187f3ae7c67dde71f213cfa29c203fb6aa3451 /fs/bcachefs/movinggc.c
parentd326ab2f5de201b9b7e790c653a2b925e7032d3b (diff)
bcachefs: KEY_TYPE_alloc_v4
This introduces a new alloc key which doesn't use varints. Soon we'll be adding backpointers and storing them in alloc keys, which means our pack/unpack workflow for alloc keys won't really work - we'll need to be mutating alloc keys in place. Instead of bch2_alloc_unpack(), we now have bch2_alloc_to_v4() that converts older types of alloc keys to v4 if needed. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/movinggc.c')
-rw-r--r--fs/bcachefs/movinggc.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index a54a83d3247b..aecec55eb421 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -129,7 +129,7 @@ static int walk_buckets_to_copygc(struct bch_fs *c)
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
- struct bkey_alloc_unpacked u;
+ struct bch_alloc_v4 a;
int ret;
bch2_trans_init(&trans, c, 0, 0);
@@ -139,20 +139,20 @@ static int walk_buckets_to_copygc(struct bch_fs *c)
struct bch_dev *ca = bch_dev_bkey_exists(c, iter.pos.inode);
struct copygc_heap_entry e;
- u = bch2_alloc_unpack(k);
+ bch2_alloc_to_v4(k, &a);
- if (u.data_type != BCH_DATA_user ||
- u.dirty_sectors >= ca->mi.bucket_size ||
+ if (a.data_type != BCH_DATA_user ||
+ a.dirty_sectors >= ca->mi.bucket_size ||
bch2_bucket_is_open(c, iter.pos.inode, iter.pos.offset))
continue;
e = (struct copygc_heap_entry) {
.dev = iter.pos.inode,
- .gen = u.gen,
- .replicas = 1 + u.stripe_redundancy,
- .fragmentation = u.dirty_sectors * (1U << 15)
- / ca->mi.bucket_size,
- .sectors = u.dirty_sectors,
+ .gen = a.gen,
+ .replicas = 1 + a.stripe_redundancy,
+ .fragmentation = div_u64((u64) a.dirty_sectors * (1ULL << 31),
+ ca->mi.bucket_size),
+ .sectors = a.dirty_sectors,
.offset = bucket_to_sector(ca, iter.pos.offset),
};
heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
@@ -180,7 +180,7 @@ static int check_copygc_was_done(struct bch_fs *c,
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
- struct bkey_alloc_unpacked u;
+ struct bch_alloc_v4 a;
struct copygc_heap_entry *i;
int ret = 0;
@@ -199,10 +199,10 @@ static int check_copygc_was_done(struct bch_fs *c,
if (ret)
break;
- u = bch2_alloc_unpack(k);
+ bch2_alloc_to_v4(k, &a);
- if (u.gen == i->gen && u.dirty_sectors) {
- *sectors_not_moved += u.dirty_sectors;
+ if (a.gen == i->gen && a.dirty_sectors) {
+ *sectors_not_moved += a.dirty_sectors;
*buckets_not_moved += 1;
}
}