From b224ca5bf26eb3523c936f0e8f2139b1db6c127c Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 09:40:24 -0400 Subject: [PATCH 01/17] refactor(core): reimplement Ancestors as bitset. compiles but tests fail --- conformance/src/txn_execute.zig | 4 +- src/accountsdb/account_store.zig | 6 +- src/accountsdb/db.zig | 24 ++--- src/accountsdb/fuzz.zig | 5 +- src/consensus/optimistic_vote_verifier.zig | 16 +-- src/consensus/replay_tower.zig | 46 +++++---- src/core/ancestors.zig | 111 ++++++++++++++++----- src/core/bank.zig | 8 +- src/core/status_cache.zig | 27 ++--- src/replay/confirm_slot.zig | 2 +- src/replay/consensus.zig | 12 +-- src/replay/freeze.zig | 6 +- src/replay/resolve_lookup.zig | 4 +- src/replay/service.zig | 7 +- src/replay/update_sysvar.zig | 6 +- src/runtime/check_transactions.zig | 2 +- 16 files changed, 180 insertions(+), 106 deletions(-) diff --git a/conformance/src/txn_execute.zig b/conformance/src/txn_execute.zig index b4b6f54e27..54fcffce1e 100644 --- a/conformance/src/txn_execute.zig +++ b/conformance/src/txn_execute.zig @@ -214,7 +214,7 @@ fn executeTxnContext( // Bank::new_with_paths(...) // https://github.com/firedancer-io/agave/blob/10fe1eb29aac9c236fd72d08ae60a3ef61ee8353/runtime/src/bank.rs#L1162 { - try ancestors.addSlot(allocator, 0); + try ancestors.addSlot(0); // bank.compute_budget = runtime_config.compute_budget; // bank.transaction_account_lock_limit = null; // bank.transaction_debug_keys = null; @@ -506,7 +506,7 @@ fn executeTxnContext( // var new = Bank{...} // Create ancestors with new slot and all parent slots - try ancestors.addSlot(allocator, slot); + try ancestors.addSlot(slot); // Update epoch if (parent_slots_epoch < epoch) { diff --git a/src/accountsdb/account_store.zig b/src/accountsdb/account_store.zig index 1e35a1e24b..b87cf296bd 100644 --- a/src/accountsdb/account_store.zig +++ b/src/accountsdb/account_store.zig @@ -261,7 +261,7 @@ pub const ThreadSafeAccountMap = struct { const list = map.get(address) orelse return null; for (list.items) |slot_account| { const slot, const account = slot_account; - if (ancestors.ancestors.contains(slot)) { + if (ancestors.containsSlot(slot)) { return if (account.lamports == 0) null else try toAccount(self.allocator, account); } } @@ -436,7 +436,7 @@ test "AccountStore does not return 0-lamport accounts from accountsdb" { var ancestors = Ancestors{}; defer ancestors.deinit(std.testing.allocator); - try ancestors.ancestors.put(std.testing.allocator, 0, {}); + try ancestors.addSlot(0); const slot_reader = db.accountReader().forSlot(&ancestors); try std.testing.expectEqual(null, try slot_reader.get(zero_lamport_address)); @@ -459,7 +459,7 @@ test ThreadSafeAccountMap { defer ancestors1.deinit(allocator); const slot1: Slot = 1; const addr1: Pubkey = .initRandom(random); - try ancestors1.ancestors.put(allocator, slot1, {}); + try ancestors1.addSlot(slot1); var expected_data: [128]u8 = undefined; random.bytes(&expected_data); diff --git a/src/accountsdb/db.zig b/src/accountsdb/db.zig index 7326344c4c..54a88fae80 100644 --- a/src/accountsdb/db.zig +++ b/src/accountsdb/db.zig @@ -3633,7 +3633,7 @@ test "write and read an account (write single + read with ancestors)" { { var ancestors = sig.core.Ancestors{}; defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, 5083, {}); + try ancestors.addSlot(5083); var account = (try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)).?; defer account.deinit(allocator); @@ -3669,7 +3669,7 @@ test "write and read an account (write single + read with ancestors)" { { var ancestors = sig.core.Ancestors{}; defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, 5083, {}); + try ancestors.addSlot(5083); var account = (try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)).?; defer account.deinit(allocator); @@ -3680,7 +3680,7 @@ test "write and read an account (write single + read with ancestors)" { { var ancestors = sig.core.Ancestors{}; defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, 5084, {}); + try ancestors.addSlot(5084); var account = (try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)).?; defer account.deinit(allocator); @@ -4585,7 +4585,7 @@ test "insert multiple accounts on same slot" { // Create ancestors with initial slot var ancestors = Ancestors{}; defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, slot, {}); + try ancestors.addSlot(slot); // Insert 50 random accounts on current slot and reload them immediately for (0..50) |i| { @@ -4666,12 +4666,12 @@ test "insert multiple accounts on multiple slots" { var ancestors = Ancestors{}; defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, slot, {}); + try ancestors.addSlot(slot); const pubkey = Pubkey.initRandom(random); errdefer std.log.err( - "Failed to insert and load account: i={}, slot={}, ancestors={any} pubkey={}\n", - .{ i, slot, ancestors.ancestors.keys(), pubkey }, + "Failed to insert and load account: i={}, slot={}, ancestors={} pubkey={}\n", + .{ i, slot, ancestors, pubkey }, ); const expected = try createRandomAccount(allocator, random); @@ -4709,17 +4709,17 @@ test "insert account on multiple slots" { var ancestors = Ancestors{}; defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, slot, {}); + try ancestors.addSlot(slot); errdefer std.log.err( \\Failed to insert and load account: i={} \\ j: {}/{} \\ slot: {} - \\ ancestors: {any} + \\ ancestors: {} \\ pubkey: {} \\ , - .{ i, j, num_slots_to_insert, slot, ancestors.ancestors.keys(), pubkey }, + .{ i, j, num_slots_to_insert, slot, ancestors, pubkey }, ); const expected = try createRandomAccount(allocator, random); @@ -4774,7 +4774,7 @@ test "overwrite account in same slot" { var ancestors = Ancestors{}; defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, slot, {}); + try ancestors.addSlot(slot); const first = try createRandomAccount(allocator, random); defer allocator.free(first.data); @@ -4844,7 +4844,7 @@ test "insert many duplicate individual accounts, get latest with ancestors" { var ancestors = Ancestors{}; defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, expected.slot, {}); + try ancestors.addSlot(expected.slot); const maybe_actual = try accounts_db.getAccountWithAncestors(&pubkey, &ancestors); defer if (maybe_actual) |actual| actual.deinit(allocator); diff --git a/src/accountsdb/fuzz.zig b/src/accountsdb/fuzz.zig index 441843d21f..aea5b5ae71 100644 --- a/src/accountsdb/fuzz.zig +++ b/src/accountsdb/fuzz.zig @@ -251,7 +251,7 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { defer if (will_inc_slot) { top_slot += random.intRangeAtMost(Slot, 1, 2); }; - try ancestors.addSlot(allocator, top_slot); + try ancestors.addSlot(top_slot); const current_slot = if (!non_sequential_slots) top_slot else slot: { const ancestor_slots: []const Slot = ancestors.ancestors.keys(); @@ -320,7 +320,8 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { var ancestors_sub = try ancestors.clone(allocator); defer ancestors_sub.deinit(allocator); - for (ancestors_sub.ancestors.keys()) |other_slot| { + var iter = ancestors_sub.ancestors.iterator(); + while (iter.next()) |other_slot| { if (other_slot <= tracked_account.slot) continue; _ = ancestors_sub.ancestors.swapRemove(other_slot); } diff --git a/src/consensus/optimistic_vote_verifier.zig b/src/consensus/optimistic_vote_verifier.zig index 9d8133b721..5f3d1d5b54 100644 --- a/src/consensus/optimistic_vote_verifier.zig +++ b/src/consensus/optimistic_vote_verifier.zig @@ -327,10 +327,10 @@ test "OptimisticConfirmationVerifier.verifyForUnrootedOptimisticSlots: unrooted try std.testing.expectEqual(3, latest.items.len); // Root on same fork at slot 5: ancestors include 1 and 3 - var anc5: sig.core.Ancestors = .{ .ancestors = .{} }; + var anc5: sig.core.Ancestors = .EMPTY; defer anc5.deinit(allocator); - try anc5.addSlot(allocator, 1); - try anc5.addSlot(allocator, 3); + try anc5.addSlot(1); + try anc5.addSlot(3); { const unrooted = try verifier.verifyForUnrootedOptimisticSlots( allocator, @@ -344,9 +344,9 @@ test "OptimisticConfirmationVerifier.verifyForUnrootedOptimisticSlots: unrooted // Re-add optimistic slots and check root at 3 (same fork) try verifier.addNewOptimisticConfirmedSlots(allocator, optimistic, &ledger_writer); - var anc3: sig.core.Ancestors = .{ .ancestors = .{} }; + var anc3: sig.core.Ancestors = .EMPTY; defer anc3.deinit(allocator); - try anc3.addSlot(allocator, 1); + try anc3.addSlot(1); { const unrooted = try verifier.verifyForUnrootedOptimisticSlots( allocator, @@ -361,10 +361,10 @@ test "OptimisticConfirmationVerifier.verifyForUnrootedOptimisticSlots: unrooted // Re-add optimistic slots and set a different fork root at slot 4 try verifier.addNewOptimisticConfirmedSlots(allocator, optimistic, &ledger_writer); - var anc4: sig.core.Ancestors = .{ .ancestors = .{} }; + var anc4: sig.core.Ancestors = .EMPTY; defer anc4.deinit(allocator); // ancestors for 4 include 1 (but not 3) - try anc4.addSlot(allocator, 1); + try anc4.addSlot(1); { const unrooted = try verifier.verifyForUnrootedOptimisticSlots( allocator, @@ -385,7 +385,7 @@ test "OptimisticConfirmationVerifier.verifyForUnrootedOptimisticSlots: unrooted var anc7: sig.core.Ancestors = .{ .ancestors = .empty }; defer anc7.deinit(allocator); // First run should return 1 and 3 (not in ancestors and not rooted). Mark 5 as ancestor. - try anc7.addSlot(allocator, 5); + try anc7.addSlot(5); try verifier.addNewOptimisticConfirmedSlots( allocator, optimistic, diff --git a/src/consensus/replay_tower.zig b/src/consensus/replay_tower.zig index ae0b48d5b4..f10df2d866 100644 --- a/src/consensus/replay_tower.zig +++ b/src/consensus/replay_tower.zig @@ -1554,7 +1554,8 @@ fn greatestCommonAncestor( if (superset.ancestors.count() == 0 or subset.ancestors.count() == 0) return null; - for (superset.ancestors.keys()) |slot| { + var iter = superset.ancestors.iterator(); + while (iter.next()) |slot| { if (!subset.containsSlot(slot)) continue; max_slot = if (max_slot) |current_max| @max(current_max, slot) else slot; } @@ -1776,7 +1777,8 @@ pub fn collectVoteLockouts( const fork_stake: u64 = blk: { var bank_ancestors = ancestors.get(bank_slot) orelse break :blk 0; var max_parent: ?Slot = null; - for (bank_ancestors.ancestors.keys()) |slot| { + var iter = bank_ancestors.ancestors.iterator(); + while (iter.next()) |slot| { if (max_parent == null or slot > max_parent.?) { max_parent = slot; } @@ -1827,7 +1829,8 @@ pub fn populateAncestorVotedStakes( if (ancestors.getPtr(vote_slot)) |slot_ancestors| { _ = try voted_stakes.getOrPutValue(allocator, vote_slot, 0); - for (slot_ancestors.ancestors.keys()) |slot| { + var iter = slot_ancestors.ancestors.iterator(); + while (iter.next()) |slot| { _ = try voted_stakes.getOrPutValue(allocator, slot, 0); } } @@ -1846,7 +1849,8 @@ fn updateAncestorVotedStakes( if (ancestors.getPtr(voted_slot)) |vote_slot_ancestors| { const entry_vote_stake = try voted_stakes.getOrPutValue(allocator, voted_slot, 0); entry_vote_stake.value_ptr.* += voted_stake; - for (vote_slot_ancestors.ancestors.keys()) |ancestor_slot| { + var iter = vote_slot_ancestors.ancestors.iterator(); + while (iter.next()) |ancestor_slot| { const entry_voted_stake = try voted_stakes.getOrPutValue(allocator, ancestor_slot, 0); entry_voted_stake.value_ptr.* += voted_stake; } @@ -1929,7 +1933,7 @@ test "check_vote_threshold_forks" { var slot_parents: Ancestors = .EMPTY; errdefer slot_parents.deinit(allocator); for (0..i) |j| { - try slot_parents.addSlot(allocator, j); + try slot_parents.addSlot(j); } ancestors.putAssumeCapacity(i, slot_parents); } @@ -2109,7 +2113,7 @@ test "collect vote lockouts root" { var slots: Ancestors = .EMPTY; errdefer slots.deinit(allocator); for (0..i) |j| { - try slots.addSlot(allocator, j); + try slots.addSlot(j); } try ancestors.put(allocator, i, slots); } @@ -2221,7 +2225,7 @@ test "collect vote lockouts sums" { } const set0: Ancestors = .EMPTY; var set1: Ancestors = .EMPTY; - try set1.addSlot(allocator, 0); + try set1.addSlot(0); try ancestors.put(allocator, 0, set0); try ancestors.put(allocator, 1, set1); @@ -2319,7 +2323,7 @@ test "is locked out empty" { var ancestors: Ancestors = .EMPTY; defer ancestors.deinit(std.testing.allocator); - try ancestors.addSlot(std.testing.allocator, 0); + try ancestors.addSlot(0); const result = try replay_tower.tower.isLockedOut( 1, @@ -2334,7 +2338,7 @@ test "is locked out root slot child pass" { var ancestors: Ancestors = .EMPTY; defer ancestors.deinit(std.testing.allocator); - try ancestors.addSlot(std.testing.allocator, 0); + try ancestors.addSlot(0); replay_tower.tower.vote_state.root_slot = 0; @@ -2351,7 +2355,7 @@ test "is locked out root slot sibling fail" { var ancestors: Ancestors = .EMPTY; defer ancestors.deinit(std.testing.allocator); - try ancestors.addSlot(std.testing.allocator, 0); + try ancestors.addSlot(0); replay_tower.tower.vote_state.root_slot = 0; @@ -2412,7 +2416,7 @@ test "is locked out double vote" { var ancestors: Ancestors = .EMPTY; defer ancestors.deinit(std.testing.allocator); - try ancestors.addSlot(std.testing.allocator, 0); + try ancestors.addSlot(0); for (0..2) |i| { _ = try replay_tower.recordBankVote( @@ -2436,7 +2440,7 @@ test "is locked out child" { var ancestors: Ancestors = .EMPTY; defer ancestors.deinit(std.testing.allocator); - try ancestors.addSlot(std.testing.allocator, 0); + try ancestors.addSlot(0); _ = try replay_tower.recordBankVote( std.testing.allocator, @@ -2458,7 +2462,7 @@ test "is locked out sibling" { var ancestors: Ancestors = .EMPTY; defer ancestors.deinit(std.testing.allocator); - try ancestors.addSlot(std.testing.allocator, 0); + try ancestors.addSlot(0); for (0..2) |i| { _ = try replay_tower.recordBankVote( @@ -2482,7 +2486,7 @@ test "is locked out last vote expired" { var ancestors: Ancestors = .EMPTY; defer ancestors.deinit(std.testing.allocator); - try ancestors.addSlot(std.testing.allocator, 0); + try ancestors.addSlot(0); for (0..2) |i| { _ = try replay_tower.recordBankVote( @@ -4138,7 +4142,7 @@ fn createAncestor(allocator: std.mem.Allocator, slots: []const Slot) !Ancestors } var set: Ancestors = .EMPTY; errdefer set.deinit(allocator); - for (slots) |slot| try set.addSlot(allocator, slot); + for (slots) |slot| try set.addSlot(slot); return set; } @@ -4496,11 +4500,12 @@ fn getAncestors(allocator: std.mem.Allocator, tree: Tree) !std.AutoArrayHashMapU var child_ancestors: Ancestors = .EMPTY; errdefer child_ancestors.deinit(allocator); - try child_ancestors.addSlot(allocator, current); + try child_ancestors.addSlot(current); if (ancestors.getPtr(current)) |parent_ancestors| { - for (parent_ancestors.ancestors.keys()) |item| { - try child_ancestors.addSlot(allocator, item); + var iter = parent_ancestors.ancestors.iterator(); + while (iter.next()) |item| { + try child_ancestors.addSlot(item); } } @@ -4556,8 +4561,9 @@ pub fn extendForkTreeAncestors( continue; }; - for (extension_children.ancestors.keys()) |extension_child| { - try original_children.addSlot(allocator, extension_child); + var iter = extension_children.ancestors.iterator(); + while (iter.next()) |extension_child| { + try original_children.addSlot(extension_child); } } } diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index 666d9afc68..4253373953 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -6,45 +6,110 @@ const HashMap = std.AutoArrayHashMapUnmanaged; const bincode = sig.bincode; const Slot = sig.core.Slot; +// + pub const Ancestors = struct { - // agave uses a "RollingBitField" which seems to be just an optimisation for a set - ancestors: HashMap(Slot, void) = .{}, + ancestors: RingBitSet(MAX_SLOT_RANGE) = .empty, pub const EMPTY: Ancestors = .{ .ancestors = .empty }; + /// The maximum allowed distance from the highest to lowest contained slot. + pub const MAX_SLOT_RANGE = 8192; + // For some reason, agave serializes Ancestors as HashMap(slot, usize). But deserializing // ignores the usize, and serializing just uses the value 0. So we need to serialize void // as if it's 0, and deserialize 0 as if it's void. - pub const @"!bincode-config:ancestors" = bincode.hashmap.hashMapFieldConfig( - HashMap(Slot, void), - .{ - .key = .{}, - .value = .{ .serializer = voidSerialize, .deserializer = voidDeserialize }, - }, - ); - - pub fn addSlot(self: *Ancestors, allocator: std.mem.Allocator, slot: Slot) !void { - try self.ancestors.put(allocator, slot, {}); + // pub const @"!bincode-config:ancestors" = bincode.FieldConfig(RingBitSet(MAX_SLOT_RANGE)){ + // .serializer = voidSerialize, + // .deserializer = voidDeserialize, + // }; + + pub fn addSlot(self: *Ancestors, slot: Slot) error{Underflow}!void { + try self.ancestors.set(slot); } - pub fn containsSlot(self: *const Ancestors, slot: Slot) bool { - return self.ancestors.contains(slot); + pub fn removeSlot(self: *Ancestors, slot: Slot) void { + self.ancestors.unset(slot); } - fn voidDeserialize(l: *bincode.LimitAllocator, reader: anytype, params: bincode.Params) !void { - _ = try bincode.readWithLimit(l, usize, reader, params); + pub fn containsSlot(self: *const Ancestors, slot: Slot) bool { + return self.ancestors.isSet(slot); } - fn voidSerialize(writer: anytype, data: anytype, params: bincode.Params) !void { - _ = data; - try bincode.write(writer, @as(usize, 0), params); + pub const Iterator = RingBitSet(MAX_SLOT_RANGE).Iterator; + + pub fn iterator(self: *const Ancestors) Iterator { + return self.ancestors.iterator(); } - pub fn clone(self: *const Ancestors, allocator: std.mem.Allocator) !Ancestors { - return .{ .ancestors = try self.ancestors.clone(allocator) }; + fn voidDeserialize(l: *bincode.LimitAllocator, reader: anytype, params: bincode.Params) !void { + _ = params; // autofix + const deserialized = try bincode.readWithLimit(l, HashMap(Slot, usize), reader, .{}); + defer bincode.free(l.allocator(), deserialized); } - pub fn deinit(self: *Ancestors, allocator: std.mem.Allocator) void { - self.ancestors.deinit(allocator); + // fn voidSerialize(writer: anytype, _: anytype, params: bincode.Params) !void { + // try bincode.write(writer, @as(usize, 0), params); + // } + + pub fn clone(self: *const Ancestors, _: std.mem.Allocator) !Ancestors { + return self.*; } + + pub fn deinit(_: *Ancestors, _: std.mem.Allocator) void {} }; + +/// A bit set that is allowed to progress forwards by setting bits out of bounds +/// and deleting old values, but not allowed to regress backwards. +pub fn RingBitSet(len: usize) type { + return struct { + /// underlying bit set + inner: InnerSet, + /// The lowest value represented + bottom: usize, + + const InnerSet = std.bit_set.ArrayBitSet(usize, len); + + pub const empty = RingBitSet(len){ + .inner = .initEmpty(), + .bottom = 0, + }; + + pub fn isSet(self: *const RingBitSet(len), index: usize) bool { + if (index < self.bottom or index >= self.bottom + len) return false; + return self.inner.isSet(index); + } + + pub fn set(self: *RingBitSet(len), index: usize) error{Underflow}!void { + if (index < self.bottom) return error.Underflow; + if (index - self.bottom > len) { + const wipe_start = self.bottom; + self.bottom += index - len; + const wipe_end = self.bottom; + if (wipe_start % len > wipe_end % len) { + self.inner.setRangeValue(.{ .start = wipe_start % len, .end = len }, false); + self.inner.setRangeValue(.{ .start = 0, .end = wipe_end % len }, false); + } else { + self.inner.setRangeValue(.{ .start = wipe_start % len, .end = wipe_end % len }, false); + } + } + self.inner.set(index % len); + } + + pub fn unset(self: *RingBitSet(len), index: usize) void { + if (index < self.bottom or index > self.bottom + len) return; + return self.inner.set(index); + } + + pub fn count(self: *const RingBitSet(len)) usize { + return self.inner.count(); + } + + pub const Iterator = InnerSet.Iterator(.{}); + + /// items are not sorted + pub fn iterator(self: *const RingBitSet(len)) InnerSet.Iterator(.{}) { + return self.inner.iterator(.{}); + } + }; +} diff --git a/src/core/bank.zig b/src/core/bank.zig index e9c2f65793..8968f5f219 100644 --- a/src/core/bank.zig +++ b/src/core/bank.zig @@ -132,7 +132,7 @@ pub const SlotConstants = struct { fee_rate_governor: sig.core.genesis_config.FeeRateGovernor, ) Allocator.Error!SlotConstants { var ancestors = Ancestors{}; - try ancestors.ancestors.put(allocator, 0, {}); + ancestors.addSlot(0) catch unreachable; return .{ .parent_slot = 0, .parent_hash = sig.core.Hash.ZEROES, @@ -511,7 +511,7 @@ pub const BankFields = struct { /// for commentary on the runtime of this function. random: std.Random, max_list_entries: usize, - ) std.mem.Allocator.Error!BankFields { + ) !BankFields { var blockhash_queue = try BlockhashQueue.initRandom(allocator, random, max_list_entries); errdefer blockhash_queue.deinit(allocator); @@ -577,12 +577,12 @@ pub fn ancestorsRandom( random: std.Random, allocator: std.mem.Allocator, max_list_entries: usize, -) std.mem.Allocator.Error!Ancestors { +) !Ancestors { var ancestors = Ancestors{}; errdefer ancestors.deinit(allocator); for (0..random.uintAtMost(usize, max_list_entries)) |_| { - try ancestors.addSlot(allocator, random.int(Slot)); + try ancestors.addSlot(random.int(Slot)); } return ancestors; diff --git a/src/core/status_cache.zig b/src/core/status_cache.zig index 1d6f055680..e11a73c3e8 100644 --- a/src/core/status_cache.zig +++ b/src/core/status_cache.zig @@ -96,7 +96,7 @@ pub const StatusCache = struct { var roots = self.roots.read(); defer roots.unlock(); return for (stored_forks.items) |fork| { - if (ancestors.ancestors.contains(fork.slot) or roots.get().contains(fork.slot)) { + if (ancestors.containsSlot(fork.slot) or roots.get().contains(fork.slot)) { break fork; } } else null; @@ -219,9 +219,11 @@ pub const StatusCache = struct { test "status cache (de)serialize Ancestors" { const allocator = std.testing.allocator; - var ancestors = Ancestors{ - .ancestors = try .init(allocator, &.{ 1, 2, 3, 4 }, &.{}), - }; + var ancestors = Ancestors.EMPTY; + try ancestors.addSlot(1); + try ancestors.addSlot(2); + try ancestors.addSlot(3); + try ancestors.addSlot(4); defer ancestors.deinit(allocator); const serialized = try bincode.writeAlloc(allocator, ancestors, .{}); @@ -237,7 +239,10 @@ test "status cache (de)serialize Ancestors" { defer bincode.free(allocator, deserialized); try std.testing.expectEqual(ancestors.ancestors.count(), deserialized.count()); - try std.testing.expectEqualSlices(Slot, ancestors.ancestors.keys(), deserialized.keys()); + var iter = ancestors.iterator(); + while (iter.next()) |slot| { + try std.testing.expect(deserialized.contains(slot)); + } try std.testing.expectEqualSlices(usize, &.{ 0, 0, 0, 0 }, deserialized.values()); } @@ -265,10 +270,8 @@ test "status cache find with ancestor fork" { const signature = sig.core.Signature.ZEROES; const blockhash = Hash.ZEROES; - var ancestors: Ancestors = .{ - .ancestors = try HashMap(Slot, void).init(allocator, &.{0}, &.{}), - }; - defer ancestors.ancestors.deinit(allocator); + var ancestors: Ancestors = .EMPTY; + try ancestors.addSlot(0); var status_cache: StatusCache = .DEFAULT; defer status_cache.deinit(allocator); @@ -332,10 +335,8 @@ test "status cache insert picks latest blockhash fork" { const signature = sig.core.Signature.ZEROES; const blockhash = Hash.ZEROES; - var ancestors: Ancestors = .{ - .ancestors = try HashMap(Slot, void).init(allocator, &.{0}, &.{}), - }; - defer ancestors.ancestors.deinit(allocator); + var ancestors: Ancestors = .EMPTY; + try ancestors.addSlot(0); var status_cache: StatusCache = .DEFAULT; defer status_cache.deinit(allocator); diff --git a/src/replay/confirm_slot.zig b/src/replay/confirm_slot.zig index ef25301658..43f71cc696 100644 --- a/src/replay/confirm_slot.zig +++ b/src/replay/confirm_slot.zig @@ -788,7 +788,7 @@ pub const TestState = struct { try blockhash_queue.insertGenesisHash(allocator, .ZEROES, 1); var ancestors = Ancestors{}; - try ancestors.addSlot(allocator, 0); + try ancestors.addSlot(0); const replay_votes_channel: *sig.sync.Channel(ParsedVote) = try .create(allocator); diff --git a/src/replay/consensus.zig b/src/replay/consensus.zig index 161e03b126..a3b134440f 100644 --- a/src/replay/consensus.zig +++ b/src/replay/consensus.zig @@ -1804,9 +1804,9 @@ test "processConsensus - no duplicate confirmed without votes" { ) |slot, info| { const slot_ancestors = &info.constants.ancestors.ancestors; const agop = try ancestors.getOrPutValue(testing.allocator, slot, .EMPTY); - try agop.value_ptr.ancestors.ensureUnusedCapacity(testing.allocator, slot_ancestors.count()); - for (slot_ancestors.keys()) |a_slot| { - try agop.value_ptr.addSlot(testing.allocator, a_slot); + var iter = slot_ancestors.iterator(); + while (iter.next()) |a_slot| { + try agop.value_ptr.addSlot(a_slot); const dgop = try descendants.getOrPutValue(testing.allocator, a_slot, .empty); try dgop.value_ptr.put(testing.allocator, slot); } @@ -1963,9 +1963,9 @@ test "processConsensus - duplicate-confirmed is idempotent" { ) |slot, info| { const slot_ancestors = &info.constants.ancestors.ancestors; const agop = try ancestors.getOrPutValue(testing.allocator, slot, .EMPTY); - try agop.value_ptr.ancestors.ensureUnusedCapacity(testing.allocator, slot_ancestors.count()); - for (slot_ancestors.keys()) |a_slot| { - try agop.value_ptr.addSlot(testing.allocator, a_slot); + var iter = slot_ancestors.iterator(); + while (iter.next()) |a_slot| { + try agop.value_ptr.addSlot(a_slot); const dgop = try descendants.getOrPutValue(testing.allocator, a_slot, .empty); try dgop.value_ptr.put(testing.allocator, slot); } diff --git a/src/replay/freeze.zig b/src/replay/freeze.zig index d54de28db7..4db9228174 100644 --- a/src/replay/freeze.zig +++ b/src/replay/freeze.zig @@ -280,7 +280,7 @@ pub fn hashSlot(allocator: Allocator, params: HashSlotParams) !struct { ?LtHash, if (params.feature_set.active(.accounts_lt_hash, params.slot)) { var parent_ancestors = try params.ancestors.clone(allocator); defer parent_ancestors.deinit(allocator); - assert(parent_ancestors.ancestors.swapRemove(params.slot)); + parent_ancestors.removeSlot(params.slot); var lt_hash = params.parent_lt_hash.* orelse return error.UnknownParentLtHash; lt_hash.mixIn(try deltaLtHash(params.account_reader, params.slot, &parent_ancestors)); @@ -571,8 +571,8 @@ test "delta hashes with many accounts" { var parent_ancestors = Ancestors{}; defer parent_ancestors.deinit(allocator); - try parent_ancestors.ancestors.put(allocator, 0, {}); - try parent_ancestors.ancestors.put(allocator, 1, {}); + try parent_ancestors.addSlot(0); + try parent_ancestors.addSlot(1); const actual_lt_hash = try deltaLtHash(accounts.accountReader(), hash_slot, &parent_ancestors); const actual_merkle_hash = try deltaMerkleHash(accounts.accountReader(), allocator, hash_slot); diff --git a/src/replay/resolve_lookup.zig b/src/replay/resolve_lookup.zig index 79c7c42cfd..e0b5b3a315 100644 --- a/src/replay/resolve_lookup.zig +++ b/src/replay/resolve_lookup.zig @@ -445,7 +445,7 @@ test resolveBatch { var ancestors = Ancestors{ .ancestors = .empty }; defer ancestors.deinit(std.testing.allocator); - try ancestors.ancestors.put(std.testing.allocator, 0, {}); + try ancestors.addSlot(0); const slot_hashes = try SlotHashes.init(std.testing.allocator); defer slot_hashes.deinit(std.testing.allocator); @@ -552,7 +552,7 @@ test getLookupTable { var ancestors = sig.core.Ancestors{}; defer ancestors.deinit(allocator); - try ancestors.addSlot(allocator, 0); + try ancestors.addSlot(0); const account_reader = map.accountReader().forSlot(&ancestors); diff --git a/src/replay/service.zig b/src/replay/service.zig index c46b65fcfc..fb742a043a 100644 --- a/src/replay/service.zig +++ b/src/replay/service.zig @@ -571,8 +571,9 @@ fn advanceReplay(state: *ReplayState) !void { const slot_ancestors = &info.constants.ancestors.ancestors; const ancestor_gop = try ancestors.getOrPutValue(arena, slot, .EMPTY); try ancestor_gop.value_ptr.ancestors.ensureUnusedCapacity(arena, slot_ancestors.count()); - for (slot_ancestors.keys()) |ancestor_slot| { - try ancestor_gop.value_ptr.addSlot(arena, ancestor_slot); + var iter = slot.ancestors.iterator(); + while (iter.next()) |ancestor_slot| { + try try ancestor_gop.value_ptr.addSlot(arena, ancestor_slot); const descendants_gop = try descendants.getOrPutValue(arena, ancestor_slot, .empty); try descendants_gop.value_ptr.put(arena, slot); } @@ -729,7 +730,7 @@ fn newSlotFromParent( var ancestors = try parent_constants.ancestors.clone(allocator); errdefer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, slot, {}); + try ancestors.addSlot(slot); var feature_set = try getActiveFeatures(allocator, account_reader.forSlot(&ancestors), slot); diff --git a/src/replay/update_sysvar.zig b/src/replay/update_sysvar.zig index 85b75772ed..fad1da67cf 100644 --- a/src/replay/update_sysvar.zig +++ b/src/replay/update_sysvar.zig @@ -616,7 +616,7 @@ test fillMissingSysvarCacheEntries { const slot = 10; var ancestors = Ancestors{}; defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, slot, {}); + try ancestors.addSlot(slot); // Create a sysvar cache with all sysvars randomly initialized. const expected = try initSysvarCacheWithRandomValues(allocator, prng.random()); @@ -841,7 +841,7 @@ test "update all sysvars" { const rent = Rent.DEFAULT; var ancestors = Ancestors{}; defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, slot, {}); + try ancestors.addSlot(slot); // Create and insert sysvar defaults const initial_sysvars = try initSysvarCacheWithDefaultValues(allocator); @@ -876,7 +876,7 @@ test "update all sysvars" { .rent = &rent, .slot = slot, }; - try ancestors.ancestors.put(allocator, slot, {}); + try ancestors.addSlot(slot); const account_reader = accounts_db.accountReader().forSlot(&ancestors); { // updateClock diff --git a/src/runtime/check_transactions.zig b/src/runtime/check_transactions.zig index 5532a10201..b757d9e5c0 100644 --- a/src/runtime/check_transactions.zig +++ b/src/runtime/check_transactions.zig @@ -463,7 +463,7 @@ test checkStatusCache { ), ); - try ancestors.ancestors.put(allocator, 0, {}); + try ancestors.addSlot(0); try status_cache.insert(allocator, prng.random(), &recent_blockhash, &msg_hash.data, 0); try std.testing.expectEqual( From 461120591f0d9bb739b394d10e65b4ba93cedb5d Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 09:54:50 -0400 Subject: [PATCH 02/17] fix(core): ancestors serialization with hashmap hack --- src/core/ancestors.zig | 39 ++++++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index 4253373953..ac36efb290 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -16,13 +16,13 @@ pub const Ancestors = struct { /// The maximum allowed distance from the highest to lowest contained slot. pub const MAX_SLOT_RANGE = 8192; - // For some reason, agave serializes Ancestors as HashMap(slot, usize). But deserializing - // ignores the usize, and serializing just uses the value 0. So we need to serialize void - // as if it's 0, and deserialize 0 as if it's void. - // pub const @"!bincode-config:ancestors" = bincode.FieldConfig(RingBitSet(MAX_SLOT_RANGE)){ - // .serializer = voidSerialize, - // .deserializer = voidDeserialize, - // }; + /// For some reason, agave serializes Ancestors as HashMap(slot, usize). But deserializing + /// ignores the usize, and serializing just uses the value 0. So we need to serialize void + /// as if it's 0, and deserialize 0 as if it's void. + pub const @"!bincode-config:ancestors" = bincode.FieldConfig(RingBitSet(MAX_SLOT_RANGE)){ + .serializer = serialize, + .deserializer = deserialize, + }; pub fn addSlot(self: *Ancestors, slot: Slot) error{Underflow}!void { try self.ancestors.set(slot); @@ -42,15 +42,28 @@ pub const Ancestors = struct { return self.ancestors.iterator(); } - fn voidDeserialize(l: *bincode.LimitAllocator, reader: anytype, params: bincode.Params) !void { - _ = params; // autofix - const deserialized = try bincode.readWithLimit(l, HashMap(Slot, usize), reader, .{}); + fn deserialize( + l: *bincode.LimitAllocator, + reader: anytype, + params: bincode.Params, + ) anyerror!RingBitSet(MAX_SLOT_RANGE) { + const deserialized = try bincode.readWithLimit(l, HashMap(Slot, usize), reader, params); defer bincode.free(l.allocator(), deserialized); + var set = RingBitSet(MAX_SLOT_RANGE){}; + for (deserialized.keys()) |slot| { + try set.set(slot); + } } - // fn voidSerialize(writer: anytype, _: anytype, params: bincode.Params) !void { - // try bincode.write(writer, @as(usize, 0), params); - // } + fn serialize(writer: anytype, data: anytype, params: bincode.Params) anyerror!void { + var map = HashMap(Slot, usize){}; + defer map.deinit(std.heap.c_allocator); // TODO: change this + var iter = data.iterator(); + while (iter.next()) |slot| { + try map.put(std.heap.c_allocator, slot, 0); + } + try bincode.write(writer, map, params); + } pub fn clone(self: *const Ancestors, _: std.mem.Allocator) !Ancestors { return self.*; From fef010d1f2e7f58ab59daabb49b055f3633d4b72 Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 09:56:42 -0400 Subject: [PATCH 03/17] fix(core): ancestors deserialization --- src/core/ancestors.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index ac36efb290..185b9206ea 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -49,10 +49,11 @@ pub const Ancestors = struct { ) anyerror!RingBitSet(MAX_SLOT_RANGE) { const deserialized = try bincode.readWithLimit(l, HashMap(Slot, usize), reader, params); defer bincode.free(l.allocator(), deserialized); - var set = RingBitSet(MAX_SLOT_RANGE){}; + var set = RingBitSet(MAX_SLOT_RANGE).empty; for (deserialized.keys()) |slot| { try set.set(slot); } + return set; } fn serialize(writer: anytype, data: anytype, params: bincode.Params) anyerror!void { From 96cec2bb97bfd002ca1524b84246a6ce37b0fe9d Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 10:13:12 -0400 Subject: [PATCH 04/17] fix(core): ancestors integer overflow --- src/core/ancestors.zig | 2 +- src/core/bank.zig | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index 185b9206ea..de5f7c0639 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -98,7 +98,7 @@ pub fn RingBitSet(len: usize) type { if (index < self.bottom) return error.Underflow; if (index - self.bottom > len) { const wipe_start = self.bottom; - self.bottom += index - len; + self.bottom = index + len; const wipe_end = self.bottom; if (wipe_start % len > wipe_end % len) { self.inner.setRangeValue(.{ .start = wipe_start % len, .end = len }, false); diff --git a/src/core/bank.zig b/src/core/bank.zig index 8968f5f219..1a6358f881 100644 --- a/src/core/bank.zig +++ b/src/core/bank.zig @@ -581,8 +581,11 @@ pub fn ancestorsRandom( var ancestors = Ancestors{}; errdefer ancestors.deinit(allocator); - for (0..random.uintAtMost(usize, max_list_entries)) |_| { - try ancestors.addSlot(random.int(Slot)); + const lower_bound = random.int(Slot); + const upper_bound = lower_bound + Ancestors.MAX_SLOT_RANGE; + + for (0..@min(Ancestors.MAX_SLOT_RANGE, random.uintAtMost(usize, max_list_entries))) |_| { + try ancestors.addSlot(random.intRangeAtMost(Slot, lower_bound, upper_bound)); } return ancestors; From cf544a3c61088a2339ff6150df0c28c754817c85 Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 10:15:07 -0400 Subject: [PATCH 05/17] fix(core): RingBitSet.unset totally wrong --- src/core/ancestors.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index de5f7c0639..ca1e848db0 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -111,8 +111,8 @@ pub fn RingBitSet(len: usize) type { } pub fn unset(self: *RingBitSet(len), index: usize) void { - if (index < self.bottom or index > self.bottom + len) return; - return self.inner.set(index); + if (index < self.bottom or index >= self.bottom + len) return; + return self.inner.unset(index); } pub fn count(self: *const RingBitSet(len)) usize { From 9e529284b356baf5ba9567dae12fd0ef7328e618 Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 12:16:31 -0400 Subject: [PATCH 06/17] fix(core): ancestors set bottom slot to minimum necessary --- src/core/ancestors.zig | 2 +- src/core/bank.zig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index ca1e848db0..e56e7425ec 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -98,7 +98,7 @@ pub fn RingBitSet(len: usize) type { if (index < self.bottom) return error.Underflow; if (index - self.bottom > len) { const wipe_start = self.bottom; - self.bottom = index + len; + self.bottom = 1 + index - len; const wipe_end = self.bottom; if (wipe_start % len > wipe_end % len) { self.inner.setRangeValue(.{ .start = wipe_start % len, .end = len }, false); diff --git a/src/core/bank.zig b/src/core/bank.zig index 1a6358f881..5b3aacc52a 100644 --- a/src/core/bank.zig +++ b/src/core/bank.zig @@ -585,7 +585,7 @@ pub fn ancestorsRandom( const upper_bound = lower_bound + Ancestors.MAX_SLOT_RANGE; for (0..@min(Ancestors.MAX_SLOT_RANGE, random.uintAtMost(usize, max_list_entries))) |_| { - try ancestors.addSlot(random.intRangeAtMost(Slot, lower_bound, upper_bound)); + try ancestors.addSlot(random.intRangeLessThan(Slot, lower_bound, upper_bound)); } return ancestors; From f2717a2e21a165fca81f438d073a060bda76f248 Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 12:30:06 -0400 Subject: [PATCH 07/17] perf(core): reduce ancestors from 8192 entries to 256 which should still be plenty also fix an iterator bug this revealed --- src/core/ancestors.zig | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index e56e7425ec..5a4735b99a 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -6,15 +6,13 @@ const HashMap = std.AutoArrayHashMapUnmanaged; const bincode = sig.bincode; const Slot = sig.core.Slot; -// - pub const Ancestors = struct { ancestors: RingBitSet(MAX_SLOT_RANGE) = .empty, pub const EMPTY: Ancestors = .{ .ancestors = .empty }; /// The maximum allowed distance from the highest to lowest contained slot. - pub const MAX_SLOT_RANGE = 8192; + pub const MAX_SLOT_RANGE = 256; /// For some reason, agave serializes Ancestors as HashMap(slot, usize). But deserializing /// ignores the usize, and serializing just uses the value 0. So we need to serialize void @@ -91,7 +89,7 @@ pub fn RingBitSet(len: usize) type { pub fn isSet(self: *const RingBitSet(len), index: usize) bool { if (index < self.bottom or index >= self.bottom + len) return false; - return self.inner.isSet(index); + return self.inner.isSet(index % len); } pub fn set(self: *RingBitSet(len), index: usize) error{Underflow}!void { @@ -112,18 +110,34 @@ pub fn RingBitSet(len: usize) type { pub fn unset(self: *RingBitSet(len), index: usize) void { if (index < self.bottom or index >= self.bottom + len) return; - return self.inner.unset(index); + return self.inner.unset(index % len); } pub fn count(self: *const RingBitSet(len)) usize { return self.inner.count(); } - pub const Iterator = InnerSet.Iterator(.{}); + pub const Iterator = struct { + inner: InnerSet.Iterator(.{}), + bottom: usize, + + pub fn next(self: *Iterator) ?usize { + if (self.inner.next()) |item| { + return if (item < self.bottom % len) + item + self.bottom - len + else + item + self.bottom; + } + return null; + } + }; /// items are not sorted - pub fn iterator(self: *const RingBitSet(len)) InnerSet.Iterator(.{}) { - return self.inner.iterator(.{}); + pub fn iterator(self: *const RingBitSet(len)) Iterator { + return .{ + .inner = self.inner.iterator(.{}), + .bottom = self.bottom, + }; } }; } From f546994c684d33715433bbeed5c796ba3966324f Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 16:12:23 -0400 Subject: [PATCH 08/17] fix(replay,accountsdb): new ancestors misuses --- src/accountsdb/fuzz.zig | 17 ++++++++--------- src/replay/service.zig | 8 +++----- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/src/accountsdb/fuzz.zig b/src/accountsdb/fuzz.zig index aea5b5ae71..cbd2f2e167 100644 --- a/src/accountsdb/fuzz.zig +++ b/src/accountsdb/fuzz.zig @@ -254,7 +254,12 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { try ancestors.addSlot(top_slot); const current_slot = if (!non_sequential_slots) top_slot else slot: { - const ancestor_slots: []const Slot = ancestors.ancestors.keys(); + var ancestor_slots = try allocator.alloc(Slot, ancestors.ancestors.count()); + var iter = ancestors.ancestors.iterator(); + var i: usize = 0; + while (iter.next()) |slot| : (i += 1) ancestor_slots[i] = slot; + std.mem.sort(Slot, ancestor_slots, {}, std.sort.asc(Slot)); + std.debug.assert(ancestor_slots[ancestor_slots.len - 1] == top_slot); const ancestor_index = random.intRangeLessThan( usize, @@ -320,17 +325,11 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { var ancestors_sub = try ancestors.clone(allocator); defer ancestors_sub.deinit(allocator); - var iter = ancestors_sub.ancestors.iterator(); + var iter = ancestors_sub.iterator(); while (iter.next()) |other_slot| { if (other_slot <= tracked_account.slot) continue; - _ = ancestors_sub.ancestors.swapRemove(other_slot); + _ = ancestors_sub.removeSlot(other_slot); } - ancestors_sub.ancestors.sort(struct { - ancestors_sub: []Slot, - pub fn lessThan(ctx: @This(), a: usize, b: usize) bool { - return ctx.ancestors_sub[a] < ctx.ancestors_sub[b]; - } - }{ .ancestors_sub = ancestors_sub.ancestors.keys() }); const account = try accounts_db.getAccountWithAncestors(&pubkey, &ancestors_sub) orelse { diff --git a/src/replay/service.zig b/src/replay/service.zig index fb742a043a..389de57871 100644 --- a/src/replay/service.zig +++ b/src/replay/service.zig @@ -570,18 +570,16 @@ fn advanceReplay(state: *ReplayState) !void { ) |slot, info| { const slot_ancestors = &info.constants.ancestors.ancestors; const ancestor_gop = try ancestors.getOrPutValue(arena, slot, .EMPTY); - try ancestor_gop.value_ptr.ancestors.ensureUnusedCapacity(arena, slot_ancestors.count()); - var iter = slot.ancestors.iterator(); + var iter = slot_ancestors.iterator(); while (iter.next()) |ancestor_slot| { - try try ancestor_gop.value_ptr.addSlot(arena, ancestor_slot); + try ancestor_gop.value_ptr.addSlot(ancestor_slot); const descendants_gop = try descendants.getOrPutValue(arena, ancestor_slot, .empty); try descendants_gop.value_ptr.put(arena, slot); } } } - const slot_history_accessor = SlotHistoryAccessor - .init(state.account_store.reader()); + const slot_history_accessor = SlotHistoryAccessor.init(state.account_store.reader()); // Explicitly Unlock the read lock on slot_tracker and acquire a write lock for consensus processing. slot_tracker_lg.unlock(); From f8f5b0200b2585134960b6562bde0ff168c2e9ce Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 16:16:12 -0400 Subject: [PATCH 09/17] fix: style --- src/core/ancestors.zig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index 5a4735b99a..129ad0c8f2 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -102,7 +102,10 @@ pub fn RingBitSet(len: usize) type { self.inner.setRangeValue(.{ .start = wipe_start % len, .end = len }, false); self.inner.setRangeValue(.{ .start = 0, .end = wipe_end % len }, false); } else { - self.inner.setRangeValue(.{ .start = wipe_start % len, .end = wipe_end % len }, false); + self.inner.setRangeValue( + .{ .start = wipe_start % len, .end = wipe_end % len }, + false, + ); } } self.inner.set(index % len); From d2923d7ee7caaf21a137b90fc085a1ff7eb75ef2 Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 16:36:09 -0400 Subject: [PATCH 10/17] refactor(ancestors): remove clone and deinit --- src/accountsdb/account_store.zig | 2 - src/accountsdb/db.zig | 10 --- src/accountsdb/fuzz.zig | 4 +- src/consensus/optimistic_vote_verifier.zig | 7 +- src/consensus/replay_tower.zig | 92 ++++++---------------- src/core/ancestors.zig | 6 -- src/core/bank.zig | 15 +--- src/core/status_cache.zig | 1 - src/replay/confirm_slot.zig | 1 - src/replay/consensus.zig | 12 +-- src/replay/freeze.zig | 4 +- src/replay/resolve_lookup.zig | 2 - src/replay/service.zig | 3 +- src/replay/update_sysvar.zig | 2 - src/runtime/check_transactions.zig | 3 +- src/runtime/transaction_execution.zig | 3 +- 16 files changed, 35 insertions(+), 132 deletions(-) diff --git a/src/accountsdb/account_store.zig b/src/accountsdb/account_store.zig index b87cf296bd..9817c1856b 100644 --- a/src/accountsdb/account_store.zig +++ b/src/accountsdb/account_store.zig @@ -435,7 +435,6 @@ test "AccountStore does not return 0-lamport accounts from accountsdb" { try std.testing.expectEqual(1, (try reader.getLatest(one_lamport_address)).?.lamports); var ancestors = Ancestors{}; - defer ancestors.deinit(std.testing.allocator); try ancestors.addSlot(0); const slot_reader = db.accountReader().forSlot(&ancestors); @@ -456,7 +455,6 @@ test ThreadSafeAccountMap { const account_reader = tsm.accountReader(); var ancestors1: Ancestors = .{}; - defer ancestors1.deinit(allocator); const slot1: Slot = 1; const addr1: Pubkey = .initRandom(random); try ancestors1.addSlot(slot1); diff --git a/src/accountsdb/db.zig b/src/accountsdb/db.zig index 54a88fae80..941e0ffc52 100644 --- a/src/accountsdb/db.zig +++ b/src/accountsdb/db.zig @@ -3632,7 +3632,6 @@ test "write and read an account (write single + read with ancestors)" { // slot is in ancestors { var ancestors = sig.core.Ancestors{}; - defer ancestors.deinit(allocator); try ancestors.addSlot(5083); var account = (try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)).?; @@ -3668,7 +3667,6 @@ test "write and read an account (write single + read with ancestors)" { // prev slot, get prev account { var ancestors = sig.core.Ancestors{}; - defer ancestors.deinit(allocator); try ancestors.addSlot(5083); var account = (try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)).?; @@ -3679,7 +3677,6 @@ test "write and read an account (write single + read with ancestors)" { // new slot, get new account { var ancestors = sig.core.Ancestors{}; - defer ancestors.deinit(allocator); try ancestors.addSlot(5084); var account = (try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)).?; @@ -4584,7 +4581,6 @@ test "insert multiple accounts on same slot" { // Create ancestors with initial slot var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); try ancestors.addSlot(slot); // Insert 50 random accounts on current slot and reload them immediately @@ -4665,7 +4661,6 @@ test "insert multiple accounts on multiple slots" { const slot = slots[random.uintLessThan(u64, slots.len)]; var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); try ancestors.addSlot(slot); const pubkey = Pubkey.initRandom(random); @@ -4708,7 +4703,6 @@ test "insert account on multiple slots" { const slot = slots[random.uintLessThan(u64, slots.len)]; var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); try ancestors.addSlot(slot); errdefer std.log.err( @@ -4755,8 +4749,6 @@ test "missing ancestor returns null" { try accounts_db.putAccount(slot, pubkey, account); var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); - try std.testing.expectEqual(null, try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)); } @@ -4773,7 +4765,6 @@ test "overwrite account in same slot" { const pubkey = Pubkey.initRandom(random); var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); try ancestors.addSlot(slot); const first = try createRandomAccount(allocator, random); @@ -4843,7 +4834,6 @@ test "insert many duplicate individual accounts, get latest with ancestors" { const expected = maybe_expected orelse return error.ExpectedMissing; var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); try ancestors.addSlot(expected.slot); const maybe_actual = try accounts_db.getAccountWithAncestors(&pubkey, &ancestors); diff --git a/src/accountsdb/fuzz.zig b/src/accountsdb/fuzz.zig index cbd2f2e167..e20d1249ff 100644 --- a/src/accountsdb/fuzz.zig +++ b/src/accountsdb/fuzz.zig @@ -236,7 +236,6 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { var top_slot: Slot = 0; var ancestors: sig.core.Ancestors = .EMPTY; - defer ancestors.deinit(allocator); // get/put a bunch of accounts while (true) { @@ -323,8 +322,7 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { break :blk .{ key, tracked_accounts.get(key).? }; }; - var ancestors_sub = try ancestors.clone(allocator); - defer ancestors_sub.deinit(allocator); + var ancestors_sub = ancestors; var iter = ancestors_sub.iterator(); while (iter.next()) |other_slot| { if (other_slot <= tracked_account.slot) continue; diff --git a/src/consensus/optimistic_vote_verifier.zig b/src/consensus/optimistic_vote_verifier.zig index 5f3d1d5b54..db3749ab5e 100644 --- a/src/consensus/optimistic_vote_verifier.zig +++ b/src/consensus/optimistic_vote_verifier.zig @@ -252,8 +252,7 @@ test "OptimisticConfirmationVerifier.verifyForUnrootedOptimisticSlots: same slot latest.deinit(); try std.testing.expectEqual(2, latest.items.len); - var root_ancestors: sig.core.Ancestors = .{ .ancestors = .empty }; - defer root_ancestors.deinit(allocator); + var root_ancestors: sig.core.Ancestors = .EMPTY; const unrooted = try verifier.verifyForUnrootedOptimisticSlots( allocator, @@ -328,7 +327,6 @@ test "OptimisticConfirmationVerifier.verifyForUnrootedOptimisticSlots: unrooted // Root on same fork at slot 5: ancestors include 1 and 3 var anc5: sig.core.Ancestors = .EMPTY; - defer anc5.deinit(allocator); try anc5.addSlot(1); try anc5.addSlot(3); { @@ -345,7 +343,6 @@ test "OptimisticConfirmationVerifier.verifyForUnrootedOptimisticSlots: unrooted // Re-add optimistic slots and check root at 3 (same fork) try verifier.addNewOptimisticConfirmedSlots(allocator, optimistic, &ledger_writer); var anc3: sig.core.Ancestors = .EMPTY; - defer anc3.deinit(allocator); try anc3.addSlot(1); { const unrooted = try verifier.verifyForUnrootedOptimisticSlots( @@ -362,7 +359,6 @@ test "OptimisticConfirmationVerifier.verifyForUnrootedOptimisticSlots: unrooted // Re-add optimistic slots and set a different fork root at slot 4 try verifier.addNewOptimisticConfirmedSlots(allocator, optimistic, &ledger_writer); var anc4: sig.core.Ancestors = .EMPTY; - defer anc4.deinit(allocator); // ancestors for 4 include 1 (but not 3) try anc4.addSlot(1); { @@ -383,7 +379,6 @@ test "OptimisticConfirmationVerifier.verifyForUnrootedOptimisticSlots: unrooted // Simulate missing ancestors by using root at 7 with no ancestors info var anc7: sig.core.Ancestors = .{ .ancestors = .empty }; - defer anc7.deinit(allocator); // First run should return 1 and 3 (not in ancestors and not rooted). Mark 5 as ancestor. try anc7.addSlot(5); try verifier.addNewOptimisticConfirmedSlots( diff --git a/src/consensus/replay_tower.zig b/src/consensus/replay_tower.zig index f10df2d866..0b9ebb39df 100644 --- a/src/consensus/replay_tower.zig +++ b/src/consensus/replay_tower.zig @@ -1919,19 +1919,12 @@ test "check_vote_threshold_forks" { const random = prng.random(); // Create the ancestor relationships var ancestors = std.AutoArrayHashMapUnmanaged(u64, Ancestors).empty; - defer { - var it = ancestors.iterator(); - while (it.next()) |entry| { - entry.value_ptr.deinit(allocator); - } - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); const vote_threshold_depth_plus_2 = VOTE_THRESHOLD_DEPTH + 2; try ancestors.ensureUnusedCapacity(allocator, vote_threshold_depth_plus_2); for (0..vote_threshold_depth_plus_2) |i| { var slot_parents: Ancestors = .EMPTY; - errdefer slot_parents.deinit(allocator); for (0..i) |j| { try slot_parents.addSlot(j); } @@ -2097,13 +2090,7 @@ test "collect vote lockouts root" { defer replay_tower.deinit(allocator); var ancestors = std.AutoArrayHashMapUnmanaged(u64, Ancestors).empty; - defer { - var it = ancestors.iterator(); - while (it.next()) |entry| { - entry.value_ptr.deinit(allocator); - } - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); const max_lockout_history_plus_1 = MAX_LOCKOUT_HISTORY + 1; try ancestors.ensureUnusedCapacity(allocator, max_lockout_history_plus_1); @@ -2111,7 +2098,6 @@ test "collect vote lockouts root" { for (0..max_lockout_history_plus_1) |i| { _ = try replay_tower.recordBankVote(allocator, i, .initRandom(random)); var slots: Ancestors = .EMPTY; - errdefer slots.deinit(allocator); for (0..i) |j| { try slots.addSlot(j); } @@ -2216,13 +2202,8 @@ test "collect vote lockouts sums" { // ancestors: slot 1 has ancestor 0, slot 0 has no ancestors var ancestors = std.AutoArrayHashMapUnmanaged(u64, Ancestors).empty; - defer { - var it = ancestors.iterator(); - while (it.next()) |entry| { - entry.value_ptr.*.deinit(allocator); - } - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); + const set0: Ancestors = .EMPTY; var set1: Ancestors = .EMPTY; try set1.addSlot(0); @@ -2322,7 +2303,6 @@ test "is locked out empty" { defer replay_tower.deinit(std.testing.allocator); var ancestors: Ancestors = .EMPTY; - defer ancestors.deinit(std.testing.allocator); try ancestors.addSlot(0); const result = try replay_tower.tower.isLockedOut( @@ -2337,7 +2317,6 @@ test "is locked out root slot child pass" { defer replay_tower.deinit(std.testing.allocator); var ancestors: Ancestors = .EMPTY; - defer ancestors.deinit(std.testing.allocator); try ancestors.addSlot(0); replay_tower.tower.vote_state.root_slot = 0; @@ -2354,7 +2333,6 @@ test "is locked out root slot sibling fail" { defer replay_tower.deinit(std.testing.allocator); var ancestors: Ancestors = .EMPTY; - defer ancestors.deinit(std.testing.allocator); try ancestors.addSlot(0); replay_tower.tower.vote_state.root_slot = 0; @@ -2415,7 +2393,6 @@ test "is locked out double vote" { defer replay_tower.deinit(std.testing.allocator); var ancestors: Ancestors = .EMPTY; - defer ancestors.deinit(std.testing.allocator); try ancestors.addSlot(0); for (0..2) |i| { @@ -2439,7 +2416,6 @@ test "is locked out child" { defer replay_tower.deinit(std.testing.allocator); var ancestors: Ancestors = .EMPTY; - defer ancestors.deinit(std.testing.allocator); try ancestors.addSlot(0); _ = try replay_tower.recordBankVote( @@ -2461,7 +2437,6 @@ test "is locked out sibling" { defer replay_tower.deinit(std.testing.allocator); var ancestors: Ancestors = .EMPTY; - defer ancestors.deinit(std.testing.allocator); try ancestors.addSlot(0); for (0..2) |i| { @@ -2485,7 +2460,6 @@ test "is locked out last vote expired" { defer replay_tower.deinit(std.testing.allocator); var ancestors: Ancestors = .EMPTY; - defer ancestors.deinit(std.testing.allocator); try ancestors.addSlot(0); for (0..2) |i| { @@ -3584,14 +3558,11 @@ test "greatestCommonAncestor" { // Test case: Basic common ancestor { var ancestors: AutoArrayHashMapUnmanaged(Slot, Ancestors) = .empty; - defer { - for (ancestors.values()) |*set| set.deinit(allocator); - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); try ancestors.ensureUnusedCapacity(allocator, 2); - ancestors.putAssumeCapacity(10, try createAncestor(allocator, &.{ 5, 3, 1 })); - ancestors.putAssumeCapacity(20, try createAncestor(allocator, &.{ 8, 5, 2 })); + ancestors.putAssumeCapacity(10, try createAncestor(&.{ 5, 3, 1 })); + ancestors.putAssumeCapacity(20, try createAncestor(&.{ 8, 5, 2 })); // Both slots have common ancestor 5 try std.testing.expectEqual( @@ -3603,14 +3574,11 @@ test "greatestCommonAncestor" { // Test case: No common ancestor { var ancestors: AutoArrayHashMapUnmanaged(Slot, Ancestors) = .empty; - defer { - for (ancestors.values()) |*set| set.deinit(allocator); - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); try ancestors.ensureUnusedCapacity(allocator, 2); - ancestors.putAssumeCapacity(10, try createAncestor(allocator, &.{ 3, 1 })); - ancestors.putAssumeCapacity(20, try createAncestor(allocator, &.{ 8, 2 })); + ancestors.putAssumeCapacity(10, try createAncestor(&.{ 3, 1 })); + ancestors.putAssumeCapacity(20, try createAncestor(&.{ 8, 2 })); try std.testing.expectEqual( @as(?Slot, null), @@ -3621,14 +3589,11 @@ test "greatestCommonAncestor" { // Test case: One empty ancestor set { var ancestors: AutoArrayHashMapUnmanaged(Slot, Ancestors) = .empty; - defer { - for (ancestors.values()) |*set| set.deinit(allocator); - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); try ancestors.ensureUnusedCapacity(allocator, 2); - ancestors.putAssumeCapacity(10, try createAncestor(allocator, &.{ 5, 3 })); - ancestors.putAssumeCapacity(20, try createAncestor(allocator, &.{})); + ancestors.putAssumeCapacity(10, try createAncestor(&.{ 5, 3 })); + ancestors.putAssumeCapacity(20, try createAncestor(&.{})); try std.testing.expectEqual( @as(?Slot, null), @@ -3639,13 +3604,10 @@ test "greatestCommonAncestor" { // Test case: Missing slots { var ancestors: AutoArrayHashMapUnmanaged(Slot, Ancestors) = .empty; - defer { - for (ancestors.values()) |*set| set.deinit(allocator); - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); try ancestors.ensureUnusedCapacity(allocator, 1); - ancestors.putAssumeCapacity(10, try createAncestor(allocator, &.{ 5, 3 })); + ancestors.putAssumeCapacity(10, try createAncestor(&.{ 5, 3 })); try std.testing.expectEqual( @as(?Slot, null), @@ -3656,13 +3618,10 @@ test "greatestCommonAncestor" { // Test case: Multiple common ancestors (should pick greatest) { var ancestors: AutoArrayHashMapUnmanaged(Slot, Ancestors) = .empty; - defer { - for (ancestors.values()) |*set| set.deinit(allocator); - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); - try ancestors.put(allocator, 10, try createAncestor(allocator, &.{ 7, 5, 3 })); - try ancestors.put(allocator, 20, try createAncestor(allocator, &.{ 7, 5, 4 })); + try ancestors.put(allocator, 10, try createAncestor(&.{ 7, 5, 3 })); + try ancestors.put(allocator, 20, try createAncestor(&.{ 7, 5, 4 })); // Should pick 7 (greater than 5) try std.testing.expectEqual( @@ -4136,12 +4095,11 @@ pub fn createTestSlotHistory( return SlotHistory{ .bits = bits, .next_slot = 1 }; } -fn createAncestor(allocator: std.mem.Allocator, slots: []const Slot) !Ancestors { +fn createAncestor(slots: []const Slot) !Ancestors { if (!builtin.is_test) { @compileError("createAncestor should only be used in test"); } var set: Ancestors = .EMPTY; - errdefer set.deinit(allocator); for (slots) |slot| try set.addSlot(slot); return set; } @@ -4221,7 +4179,6 @@ pub const TestFixture = struct { for (self.descendants.values()) |set| set.deinit(allocator); self.descendants.deinit(allocator); - for (self.ancestors.values()) |*set| set.deinit(allocator); self.ancestors.deinit(allocator); } @@ -4336,10 +4293,8 @@ pub const TestFixture = struct { try self.ancestors.ensureTotalCapacity(allocator, input_tree.data.len); // Populate ancenstors var extended_ancestors = try getAncestors(allocator, input_tree); - defer { - for (extended_ancestors.values()) |*set| set.deinit(allocator); - extended_ancestors.deinit(allocator); - } + defer extended_ancestors.deinit(allocator); + try extendForkTreeAncestors(allocator, &self.ancestors, extended_ancestors); // Populate decendants @@ -4499,7 +4454,6 @@ fn getAncestors(allocator: std.mem.Allocator, tree: Tree) !std.AutoArrayHashMapU try visited.put(child.slot, {}); var child_ancestors: Ancestors = .EMPTY; - errdefer child_ancestors.deinit(allocator); try child_ancestors.addSlot(current); if (ancestors.getPtr(current)) |parent_ancestors| { @@ -4555,9 +4509,9 @@ pub fn extendForkTreeAncestors( return; } - for (extension.keys(), extension.values()) |slot, *extension_children| { + for (extension.keys(), extension.values()) |slot, extension_children| { const original_children = original.getPtr(slot) orelse { - try original.put(allocator, slot, try extension_children.clone(allocator)); + try original.put(allocator, slot, extension_children); continue; }; diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index 129ad0c8f2..2b93ebc071 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -63,12 +63,6 @@ pub const Ancestors = struct { } try bincode.write(writer, map, params); } - - pub fn clone(self: *const Ancestors, _: std.mem.Allocator) !Ancestors { - return self.*; - } - - pub fn deinit(_: *Ancestors, _: std.mem.Allocator) void {} }; /// A bit set that is allowed to progress forwards by setting bits out of bounds diff --git a/src/core/bank.zig b/src/core/bank.zig index 5b3aacc52a..8d4814a231 100644 --- a/src/core/bank.zig +++ b/src/core/bank.zig @@ -117,7 +117,7 @@ pub const SlotConstants = struct { .max_tick_height = bank_fields.max_tick_height, .fee_rate_governor = bank_fields.fee_rate_governor, .epoch_reward_status = .inactive, - .ancestors = try bank_fields.ancestors.clone(allocator), + .ancestors = bank_fields.ancestors, .feature_set = feature_set, .reserved_accounts = try reserved_accounts.initForSlot( allocator, @@ -151,7 +151,6 @@ pub const SlotConstants = struct { pub fn deinit(self_const: SlotConstants, allocator: Allocator) void { var self = self_const; self.epoch_reward_status.deinit(allocator); - self.ancestors.deinit(allocator); self.reserved_accounts.deinit(allocator); } }; @@ -419,9 +418,6 @@ pub const BankFields = struct { ) void { bank_fields.blockhash_queue.deinit(allocator); - var ancestors = bank_fields.ancestors; - ancestors.deinit(allocator); - bank_fields.hard_forks.deinit(allocator); bank_fields.stakes.deinit(allocator); @@ -438,9 +434,6 @@ pub const BankFields = struct { const blockhash_queue = try bank_fields.blockhash_queue.clone(allocator); errdefer blockhash_queue.deinit(allocator); - var ancestors = try bank_fields.ancestors.clone(allocator); - errdefer ancestors.deinit(allocator); - const hard_forks = try bank_fields.hard_forks.clone(allocator); errdefer hard_forks.deinit(allocator); @@ -455,7 +448,6 @@ pub const BankFields = struct { var cloned = bank_fields.*; cloned.blockhash_queue = blockhash_queue; - cloned.ancestors = ancestors; cloned.hard_forks = hard_forks; cloned.stakes = stakes; cloned.unused_accounts = unused_accounts; @@ -515,8 +507,7 @@ pub const BankFields = struct { var blockhash_queue = try BlockhashQueue.initRandom(allocator, random, max_list_entries); errdefer blockhash_queue.deinit(allocator); - var ancestors = try ancestorsRandom(random, allocator, max_list_entries); - errdefer ancestors.deinit(allocator); + const ancestors = try ancestorsRandom(random, max_list_entries); const hard_forks = try HardForks.initRandom(random, allocator, max_list_entries); errdefer hard_forks.deinit(allocator); @@ -575,11 +566,9 @@ pub const BankFields = struct { pub fn ancestorsRandom( random: std.Random, - allocator: std.mem.Allocator, max_list_entries: usize, ) !Ancestors { var ancestors = Ancestors{}; - errdefer ancestors.deinit(allocator); const lower_bound = random.int(Slot); const upper_bound = lower_bound + Ancestors.MAX_SLOT_RANGE; diff --git a/src/core/status_cache.zig b/src/core/status_cache.zig index e11a73c3e8..e0a5a15f0e 100644 --- a/src/core/status_cache.zig +++ b/src/core/status_cache.zig @@ -224,7 +224,6 @@ test "status cache (de)serialize Ancestors" { try ancestors.addSlot(2); try ancestors.addSlot(3); try ancestors.addSlot(4); - defer ancestors.deinit(allocator); const serialized = try bincode.writeAlloc(allocator, ancestors, .{}); diff --git a/src/replay/confirm_slot.zig b/src/replay/confirm_slot.zig index 43f71cc696..e75e387514 100644 --- a/src/replay/confirm_slot.zig +++ b/src/replay/confirm_slot.zig @@ -813,7 +813,6 @@ pub const TestState = struct { pub fn deinit(self: *TestState, allocator: Allocator) void { self.account_map.deinit(); self.status_cache.deinit(allocator); - self.ancestors.deinit(allocator); var bhq = self.blockhash_queue.tryWrite() orelse unreachable; bhq.get().deinit(allocator); bhq.unlock(); diff --git a/src/replay/consensus.zig b/src/replay/consensus.zig index a3b134440f..098096cd33 100644 --- a/src/replay/consensus.zig +++ b/src/replay/consensus.zig @@ -1789,10 +1789,8 @@ test "processConsensus - no duplicate confirmed without votes" { const SlotSet = sig.utils.collections.SortedSetUnmanaged(Slot); var ancestors: std.AutoArrayHashMapUnmanaged(Slot, Ancestors) = .empty; - defer { - for (ancestors.values()) |*val| val.deinit(testing.allocator); - ancestors.deinit(testing.allocator); - } + defer ancestors.deinit(testing.allocator); + var descendants: std.AutoArrayHashMapUnmanaged(Slot, SlotSet) = .empty; defer descendants.deinit(testing.allocator); defer { @@ -1947,10 +1945,8 @@ test "processConsensus - duplicate-confirmed is idempotent" { const SlotSet = sig.utils.collections.SortedSetUnmanaged(Slot); var ancestors: std.AutoArrayHashMapUnmanaged(Slot, Ancestors) = .empty; - defer { - for (ancestors.values()) |*val| val.deinit(testing.allocator); - ancestors.deinit(testing.allocator); - } + defer ancestors.deinit(testing.allocator); + var descendants: std.AutoArrayHashMapUnmanaged(Slot, SlotSet) = .empty; defer descendants.deinit(testing.allocator); defer { diff --git a/src/replay/freeze.zig b/src/replay/freeze.zig index 4db9228174..07304741d4 100644 --- a/src/replay/freeze.zig +++ b/src/replay/freeze.zig @@ -278,8 +278,7 @@ pub fn hashSlot(allocator: Allocator, params: HashSlotParams) !struct { ?LtHash, }); if (params.feature_set.active(.accounts_lt_hash, params.slot)) { - var parent_ancestors = try params.ancestors.clone(allocator); - defer parent_ancestors.deinit(allocator); + var parent_ancestors = params.ancestors.*; parent_ancestors.removeSlot(params.slot); var lt_hash = params.parent_lt_hash.* orelse return error.UnknownParentLtHash; @@ -570,7 +569,6 @@ test "delta hashes with many accounts" { Hash.parseRuntime("5tpzYxp8ghAETjXaXnZvxZov11iNEvSbDZXNAMoJX6ov") catch unreachable; var parent_ancestors = Ancestors{}; - defer parent_ancestors.deinit(allocator); try parent_ancestors.addSlot(0); try parent_ancestors.addSlot(1); diff --git a/src/replay/resolve_lookup.zig b/src/replay/resolve_lookup.zig index e0b5b3a315..ac1633aa2f 100644 --- a/src/replay/resolve_lookup.zig +++ b/src/replay/resolve_lookup.zig @@ -444,7 +444,6 @@ test resolveBatch { }; var ancestors = Ancestors{ .ancestors = .empty }; - defer ancestors.deinit(std.testing.allocator); try ancestors.addSlot(0); const slot_hashes = try SlotHashes.init(std.testing.allocator); @@ -551,7 +550,6 @@ test getLookupTable { defer map.deinit(); var ancestors = sig.core.Ancestors{}; - defer ancestors.deinit(allocator); try ancestors.addSlot(0); const account_reader = map.accountReader().forSlot(&ancestors); diff --git a/src/replay/service.zig b/src/replay/service.zig index 389de57871..0461270114 100644 --- a/src/replay/service.zig +++ b/src/replay/service.zig @@ -726,8 +726,7 @@ fn newSlotFromParent( .clone(allocator); errdefer epoch_reward_status.deinit(allocator); - var ancestors = try parent_constants.ancestors.clone(allocator); - errdefer ancestors.deinit(allocator); + var ancestors = parent_constants.ancestors; try ancestors.addSlot(slot); var feature_set = try getActiveFeatures(allocator, account_reader.forSlot(&ancestors), slot); diff --git a/src/replay/update_sysvar.zig b/src/replay/update_sysvar.zig index fad1da67cf..7e8478ff68 100644 --- a/src/replay/update_sysvar.zig +++ b/src/replay/update_sysvar.zig @@ -615,7 +615,6 @@ test fillMissingSysvarCacheEntries { // Set slot and ancestors const slot = 10; var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); try ancestors.addSlot(slot); // Create a sysvar cache with all sysvars randomly initialized. @@ -840,7 +839,6 @@ test "update all sysvars" { var slot: Slot = 10; const rent = Rent.DEFAULT; var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); try ancestors.addSlot(slot); // Create and insert sysvar defaults diff --git a/src/runtime/check_transactions.zig b/src/runtime/check_transactions.zig index b757d9e5c0..cba7b173ac 100644 --- a/src/runtime/check_transactions.zig +++ b/src/runtime/check_transactions.zig @@ -444,8 +444,7 @@ test checkStatusCache { var prng = std.Random.DefaultPrng.init(0); - var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); + var ancestors = Ancestors.EMPTY; var status_cache: sig.core.StatusCache = .DEFAULT; defer status_cache.deinit(allocator); diff --git a/src/runtime/transaction_execution.zig b/src/runtime/transaction_execution.zig index c6150ad253..f677d0cb2c 100644 --- a/src/runtime/transaction_execution.zig +++ b/src/runtime/transaction_execution.zig @@ -950,8 +950,7 @@ test "loadAndExecuteTransaction: simple transfer transaction" { }, ); - var ancestors: Ancestors = .{}; - defer ancestors.deinit(allocator); + var ancestors: Ancestors = .EMPTY; const feature_set: FeatureSet = .ALL_ENABLED_AT_GENESIS; From 0994e3a15a438327dc3aad77b70eaf19073d9360 Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 16:43:28 -0400 Subject: [PATCH 11/17] fix(conformance): ancestors deinit --- conformance/src/txn_execute.zig | 1 - 1 file changed, 1 deletion(-) diff --git a/conformance/src/txn_execute.zig b/conformance/src/txn_execute.zig index 54fcffce1e..aa3b9f09ec 100644 --- a/conformance/src/txn_execute.zig +++ b/conformance/src/txn_execute.zig @@ -187,7 +187,6 @@ fn executeTxnContext( var epoch_schedule: EpochSchedule = undefined; var ancestors: Ancestors = .{}; - defer ancestors.deinit(allocator); var compute_budget = ComputeBudget.DEFAULT; compute_budget.compute_unit_limit = compute_budget.compute_unit_limit; From 0be7fa75041fd0ee7649f71944f1dcf20221d84d Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 16:47:18 -0400 Subject: [PATCH 12/17] refactor(ancestors): replace .{} with .EMPTY --- conformance/src/txn_execute.zig | 2 +- src/accountsdb/account_store.zig | 4 ++-- src/accountsdb/db.zig | 22 +++++++++++----------- src/core/ancestors.zig | 2 +- src/core/bank.zig | 4 ++-- src/core/status_cache.zig | 8 ++++---- src/replay/confirm_slot.zig | 2 +- src/replay/freeze.zig | 4 ++-- src/replay/resolve_lookup.zig | 2 +- src/replay/update_sysvar.zig | 4 ++-- src/runtime/transaction_execution.zig | 2 +- 11 files changed, 28 insertions(+), 28 deletions(-) diff --git a/conformance/src/txn_execute.zig b/conformance/src/txn_execute.zig index aa3b9f09ec..ddc7fefbb0 100644 --- a/conformance/src/txn_execute.zig +++ b/conformance/src/txn_execute.zig @@ -186,7 +186,7 @@ fn executeTxnContext( var parent_hash: Hash = Hash.ZEROES; var epoch_schedule: EpochSchedule = undefined; - var ancestors: Ancestors = .{}; + var ancestors: Ancestors = .EMPTY; var compute_budget = ComputeBudget.DEFAULT; compute_budget.compute_unit_limit = compute_budget.compute_unit_limit; diff --git a/src/accountsdb/account_store.zig b/src/accountsdb/account_store.zig index 9817c1856b..6f0a1f5049 100644 --- a/src/accountsdb/account_store.zig +++ b/src/accountsdb/account_store.zig @@ -434,7 +434,7 @@ test "AccountStore does not return 0-lamport accounts from accountsdb" { try std.testing.expectEqual(null, try reader.getLatest(zero_lamport_address)); try std.testing.expectEqual(1, (try reader.getLatest(one_lamport_address)).?.lamports); - var ancestors = Ancestors{}; + var ancestors = Ancestors.EMPTY; try ancestors.addSlot(0); const slot_reader = db.accountReader().forSlot(&ancestors); @@ -454,7 +454,7 @@ test ThreadSafeAccountMap { const account_store = tsm.accountStore(); const account_reader = tsm.accountReader(); - var ancestors1: Ancestors = .{}; + var ancestors1: Ancestors = .EMPTY; const slot1: Slot = 1; const addr1: Pubkey = .initRandom(random); try ancestors1.addSlot(slot1); diff --git a/src/accountsdb/db.zig b/src/accountsdb/db.zig index 941e0ffc52..479035194d 100644 --- a/src/accountsdb/db.zig +++ b/src/accountsdb/db.zig @@ -3623,7 +3623,7 @@ test "write and read an account (write single + read with ancestors)" { // assume we've progessed past the need for ancestors { accounts_db.largest_flushed_slot.store(10_000, .monotonic); - var account = (try accounts_db.getAccountWithAncestors(&pubkey, &.{})).?; + var account = (try accounts_db.getAccountWithAncestors(&pubkey, &.EMPTY)).?; accounts_db.largest_flushed_slot.store(0, .monotonic); defer account.deinit(allocator); try std.testing.expect(test_account.equals(&account)); @@ -3631,7 +3631,7 @@ test "write and read an account (write single + read with ancestors)" { // slot is in ancestors { - var ancestors = sig.core.Ancestors{}; + var ancestors = sig.core.Ancestors.EMPTY; try ancestors.addSlot(5083); var account = (try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)).?; @@ -3640,7 +3640,7 @@ test "write and read an account (write single + read with ancestors)" { } // slot is not in ancestors - try std.testing.expectEqual(null, accounts_db.getAccountWithAncestors(&pubkey, &.{})); + try std.testing.expectEqual(null, accounts_db.getAccountWithAncestors(&pubkey, &.EMPTY)); // write account to the same pubkey in the next slot (!) { @@ -3666,7 +3666,7 @@ test "write and read an account (write single + read with ancestors)" { // prev slot, get prev account { - var ancestors = sig.core.Ancestors{}; + var ancestors = sig.core.Ancestors.EMPTY; try ancestors.addSlot(5083); var account = (try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)).?; @@ -3676,7 +3676,7 @@ test "write and read an account (write single + read with ancestors)" { // new slot, get new account { - var ancestors = sig.core.Ancestors{}; + var ancestors = sig.core.Ancestors.EMPTY; try ancestors.addSlot(5084); var account = (try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)).?; @@ -4580,7 +4580,7 @@ test "insert multiple accounts on same slot" { const slot: Slot = 10; // Create ancestors with initial slot - var ancestors = Ancestors{}; + var ancestors = Ancestors.EMPTY; try ancestors.addSlot(slot); // Insert 50 random accounts on current slot and reload them immediately @@ -4660,7 +4660,7 @@ test "insert multiple accounts on multiple slots" { for (0..50) |i| { const slot = slots[random.uintLessThan(u64, slots.len)]; - var ancestors = Ancestors{}; + var ancestors = Ancestors.EMPTY; try ancestors.addSlot(slot); const pubkey = Pubkey.initRandom(random); @@ -4702,7 +4702,7 @@ test "insert account on multiple slots" { for (0..num_slots_to_insert) |j| { const slot = slots[random.uintLessThan(u64, slots.len)]; - var ancestors = Ancestors{}; + var ancestors = Ancestors.EMPTY; try ancestors.addSlot(slot); errdefer std.log.err( @@ -4748,7 +4748,7 @@ test "missing ancestor returns null" { defer allocator.free(account.data); try accounts_db.putAccount(slot, pubkey, account); - var ancestors = Ancestors{}; + var ancestors = Ancestors.EMPTY; try std.testing.expectEqual(null, try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)); } @@ -4764,7 +4764,7 @@ test "overwrite account in same slot" { const slot: Slot = 15; const pubkey = Pubkey.initRandom(random); - var ancestors = Ancestors{}; + var ancestors = Ancestors.EMPTY; try ancestors.addSlot(slot); const first = try createRandomAccount(allocator, random); @@ -4833,7 +4833,7 @@ test "insert many duplicate individual accounts, get latest with ancestors" { for (pubkeys, expected_latest) |pubkey, maybe_expected| { const expected = maybe_expected orelse return error.ExpectedMissing; - var ancestors = Ancestors{}; + var ancestors = Ancestors.EMPTY; try ancestors.addSlot(expected.slot); const maybe_actual = try accounts_db.getAccountWithAncestors(&pubkey, &ancestors); diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index 2b93ebc071..2140f2219d 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -7,7 +7,7 @@ const bincode = sig.bincode; const Slot = sig.core.Slot; pub const Ancestors = struct { - ancestors: RingBitSet(MAX_SLOT_RANGE) = .empty, + ancestors: RingBitSet(MAX_SLOT_RANGE), pub const EMPTY: Ancestors = .{ .ancestors = .empty }; diff --git a/src/core/bank.zig b/src/core/bank.zig index 8d4814a231..23117fbc4e 100644 --- a/src/core/bank.zig +++ b/src/core/bank.zig @@ -131,7 +131,7 @@ pub const SlotConstants = struct { allocator: Allocator, fee_rate_governor: sig.core.genesis_config.FeeRateGovernor, ) Allocator.Error!SlotConstants { - var ancestors = Ancestors{}; + var ancestors = Ancestors.EMPTY; ancestors.addSlot(0) catch unreachable; return .{ .parent_slot = 0, @@ -568,7 +568,7 @@ pub fn ancestorsRandom( random: std.Random, max_list_entries: usize, ) !Ancestors { - var ancestors = Ancestors{}; + var ancestors = Ancestors.EMPTY; const lower_bound = random.int(Slot); const upper_bound = lower_bound + Ancestors.MAX_SLOT_RANGE; diff --git a/src/core/status_cache.zig b/src/core/status_cache.zig index e0a5a15f0e..e02f1fe220 100644 --- a/src/core/status_cache.zig +++ b/src/core/status_cache.zig @@ -256,7 +256,7 @@ test "status cache empty" { status_cache.getStatus( &signature.data, &block_hash, - &Ancestors{}, + &Ancestors.EMPTY, ), ); } @@ -291,7 +291,7 @@ test "status cache find without ancestor fork" { const signature = sig.core.Signature.ZEROES; const blockhash = Hash.ZEROES; - var ancestors: Ancestors = .{}; + var ancestors: Ancestors = .EMPTY; var status_cache: StatusCache = .DEFAULT; defer status_cache.deinit(allocator); @@ -312,7 +312,7 @@ test "status cache find with root ancestor fork" { const signature = sig.core.Signature.ZEROES; const blockhash = Hash.ZEROES; - var ancestors: Ancestors = .{}; + var ancestors: Ancestors = .EMPTY; var status_cache: StatusCache = .DEFAULT; defer status_cache.deinit(allocator); @@ -358,7 +358,7 @@ test "status cache root expires" { const signature = sig.core.Signature.ZEROES; const blockhash = Hash.ZEROES; - var ancestors: Ancestors = .{}; + var ancestors: Ancestors = .EMPTY; var status_cache: StatusCache = .DEFAULT; defer status_cache.deinit(allocator); diff --git a/src/replay/confirm_slot.zig b/src/replay/confirm_slot.zig index e75e387514..5f55ce37cb 100644 --- a/src/replay/confirm_slot.zig +++ b/src/replay/confirm_slot.zig @@ -787,7 +787,7 @@ pub const TestState = struct { errdefer blockhash_queue.deinit(allocator); try blockhash_queue.insertGenesisHash(allocator, .ZEROES, 1); - var ancestors = Ancestors{}; + var ancestors = Ancestors.EMPTY; try ancestors.addSlot(0); const replay_votes_channel: *sig.sync.Channel(ParsedVote) = try .create(allocator); diff --git a/src/replay/freeze.zig b/src/replay/freeze.zig index 07304741d4..7e034da5fe 100644 --- a/src/replay/freeze.zig +++ b/src/replay/freeze.zig @@ -365,7 +365,7 @@ pub fn deltaLtHash( } test "deltaLtHash is identity for 0 accounts" { - try std.testing.expectEqual(LtHash.IDENTITY, try deltaLtHash(.noop, 0, &Ancestors{})); + try std.testing.expectEqual(LtHash.IDENTITY, try deltaLtHash(.noop, 0, &Ancestors.EMPTY)); } test "deltaMerkleHash for 0 accounts" { @@ -568,7 +568,7 @@ test "delta hashes with many accounts" { const expected_merkle_hash = Hash.parseRuntime("5tpzYxp8ghAETjXaXnZvxZov11iNEvSbDZXNAMoJX6ov") catch unreachable; - var parent_ancestors = Ancestors{}; + var parent_ancestors = Ancestors.EMPTY; try parent_ancestors.addSlot(0); try parent_ancestors.addSlot(1); diff --git a/src/replay/resolve_lookup.zig b/src/replay/resolve_lookup.zig index ac1633aa2f..ba21916f89 100644 --- a/src/replay/resolve_lookup.zig +++ b/src/replay/resolve_lookup.zig @@ -549,7 +549,7 @@ test getLookupTable { var map = sig.accounts_db.ThreadSafeAccountMap.init(allocator); defer map.deinit(); - var ancestors = sig.core.Ancestors{}; + var ancestors = sig.core.Ancestors.EMPTY; try ancestors.addSlot(0); const account_reader = map.accountReader().forSlot(&ancestors); diff --git a/src/replay/update_sysvar.zig b/src/replay/update_sysvar.zig index 7e8478ff68..9fd9965c39 100644 --- a/src/replay/update_sysvar.zig +++ b/src/replay/update_sysvar.zig @@ -614,7 +614,7 @@ test fillMissingSysvarCacheEntries { // Set slot and ancestors const slot = 10; - var ancestors = Ancestors{}; + var ancestors = Ancestors.EMPTY; try ancestors.addSlot(slot); // Create a sysvar cache with all sysvars randomly initialized. @@ -838,7 +838,7 @@ test "update all sysvars" { var capitalization = Atomic(u64).init(0); var slot: Slot = 10; const rent = Rent.DEFAULT; - var ancestors = Ancestors{}; + var ancestors = Ancestors.EMPTY; try ancestors.addSlot(slot); // Create and insert sysvar defaults diff --git a/src/runtime/transaction_execution.zig b/src/runtime/transaction_execution.zig index f677d0cb2c..a5c8e06727 100644 --- a/src/runtime/transaction_execution.zig +++ b/src/runtime/transaction_execution.zig @@ -756,7 +756,7 @@ test "loadAndExecuteTransactions: no transactions" { const transactions: []RuntimeTransaction = &.{}; var batch_account_cache: account_loader.BatchAccountCache = .{}; - const ancestors: Ancestors = .{}; + const ancestors: Ancestors = .EMPTY; const feature_set: FeatureSet = .ALL_DISABLED; var status_cache: StatusCache = .DEFAULT; const sysvar_cache: SysvarCache = .{}; From 48bb2eb79bbcf601e3cd6014de654bef51b74276 Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 16:51:17 -0400 Subject: [PATCH 13/17] refactor: move RingBitSet to collections --- src/core/ancestors.zig | 76 ++------------------------------------- src/utils/collections.zig | 74 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 74 deletions(-) diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index 2140f2219d..1d192f7253 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -4,6 +4,8 @@ const sig = @import("../sig.zig"); const HashMap = std.AutoArrayHashMapUnmanaged; const bincode = sig.bincode; + +const RingBitSet = sig.utils.collections.RingBitSet; const Slot = sig.core.Slot; pub const Ancestors = struct { @@ -64,77 +66,3 @@ pub const Ancestors = struct { try bincode.write(writer, map, params); } }; - -/// A bit set that is allowed to progress forwards by setting bits out of bounds -/// and deleting old values, but not allowed to regress backwards. -pub fn RingBitSet(len: usize) type { - return struct { - /// underlying bit set - inner: InnerSet, - /// The lowest value represented - bottom: usize, - - const InnerSet = std.bit_set.ArrayBitSet(usize, len); - - pub const empty = RingBitSet(len){ - .inner = .initEmpty(), - .bottom = 0, - }; - - pub fn isSet(self: *const RingBitSet(len), index: usize) bool { - if (index < self.bottom or index >= self.bottom + len) return false; - return self.inner.isSet(index % len); - } - - pub fn set(self: *RingBitSet(len), index: usize) error{Underflow}!void { - if (index < self.bottom) return error.Underflow; - if (index - self.bottom > len) { - const wipe_start = self.bottom; - self.bottom = 1 + index - len; - const wipe_end = self.bottom; - if (wipe_start % len > wipe_end % len) { - self.inner.setRangeValue(.{ .start = wipe_start % len, .end = len }, false); - self.inner.setRangeValue(.{ .start = 0, .end = wipe_end % len }, false); - } else { - self.inner.setRangeValue( - .{ .start = wipe_start % len, .end = wipe_end % len }, - false, - ); - } - } - self.inner.set(index % len); - } - - pub fn unset(self: *RingBitSet(len), index: usize) void { - if (index < self.bottom or index >= self.bottom + len) return; - return self.inner.unset(index % len); - } - - pub fn count(self: *const RingBitSet(len)) usize { - return self.inner.count(); - } - - pub const Iterator = struct { - inner: InnerSet.Iterator(.{}), - bottom: usize, - - pub fn next(self: *Iterator) ?usize { - if (self.inner.next()) |item| { - return if (item < self.bottom % len) - item + self.bottom - len - else - item + self.bottom; - } - return null; - } - }; - - /// items are not sorted - pub fn iterator(self: *const RingBitSet(len)) Iterator { - return .{ - .inner = self.inner.iterator(.{}), - .bottom = self.bottom, - }; - } - }; -} diff --git a/src/utils/collections.zig b/src/utils/collections.zig index 5ff863d918..d6c86da100 100644 --- a/src/utils/collections.zig +++ b/src/utils/collections.zig @@ -1092,6 +1092,80 @@ pub fn Window(T: type) type { }; } +/// A bit set that is allowed to progress forwards by setting bits out of bounds +/// and deleting old values, but not allowed to regress backwards. +pub fn RingBitSet(len: usize) type { + return struct { + /// underlying bit set + inner: InnerSet, + /// The lowest value represented + bottom: usize, + + const InnerSet = std.bit_set.ArrayBitSet(usize, len); + + pub const empty = RingBitSet(len){ + .inner = .initEmpty(), + .bottom = 0, + }; + + pub fn isSet(self: *const RingBitSet(len), index: usize) bool { + if (index < self.bottom or index >= self.bottom + len) return false; + return self.inner.isSet(index % len); + } + + pub fn set(self: *RingBitSet(len), index: usize) error{Underflow}!void { + if (index < self.bottom) return error.Underflow; + if (index - self.bottom > len) { + const wipe_start = self.bottom; + self.bottom = 1 + index - len; + const wipe_end = self.bottom; + if (wipe_start % len > wipe_end % len) { + self.inner.setRangeValue(.{ .start = wipe_start % len, .end = len }, false); + self.inner.setRangeValue(.{ .start = 0, .end = wipe_end % len }, false); + } else { + self.inner.setRangeValue( + .{ .start = wipe_start % len, .end = wipe_end % len }, + false, + ); + } + } + self.inner.set(index % len); + } + + pub fn unset(self: *RingBitSet(len), index: usize) void { + if (index < self.bottom or index >= self.bottom + len) return; + return self.inner.unset(index % len); + } + + pub fn count(self: *const RingBitSet(len)) usize { + return self.inner.count(); + } + + pub const Iterator = struct { + inner: InnerSet.Iterator(.{}), + bottom: usize, + + pub fn next(self: *Iterator) ?usize { + if (self.inner.next()) |item| { + return if (item < self.bottom % len) + item + self.bottom - len + else + item + self.bottom; + } + return null; + } + }; + + /// items are not sorted + pub fn iterator(self: *const RingBitSet(len)) Iterator { + return .{ + .inner = self.inner.iterator(.{}), + .bottom = self.bottom, + }; + } + }; +} + pub fn cloneMapAndValues(allocator: Allocator, map: anytype) Allocator.Error!@TypeOf(map) { var cloned: @TypeOf(map) = .{}; errdefer deinitMapAndValues(allocator, cloned); From 459551e1e042e2054ac3f1ec1fc78b0cf8ad36c2 Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 17:25:35 -0400 Subject: [PATCH 14/17] refactor: decouple ancestors from bankfields serialization --- src/accountsdb/fuzz.zig | 4 ++-- src/cmd.zig | 3 ++- src/consensus/replay_tower.zig | 10 ++++----- src/core/ancestors.zig | 40 ++++++++-------------------------- src/core/bank.zig | 40 +++++++++++++++++++--------------- src/core/status_cache.zig | 29 ------------------------ 6 files changed, 40 insertions(+), 86 deletions(-) diff --git a/src/accountsdb/fuzz.zig b/src/accountsdb/fuzz.zig index e20d1249ff..c96f887ca1 100644 --- a/src/accountsdb/fuzz.zig +++ b/src/accountsdb/fuzz.zig @@ -253,8 +253,8 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { try ancestors.addSlot(top_slot); const current_slot = if (!non_sequential_slots) top_slot else slot: { - var ancestor_slots = try allocator.alloc(Slot, ancestors.ancestors.count()); - var iter = ancestors.ancestors.iterator(); + var ancestor_slots = try allocator.alloc(Slot, ancestors.count()); + var iter = ancestors.iterator(); var i: usize = 0; while (iter.next()) |slot| : (i += 1) ancestor_slots[i] = slot; std.mem.sort(Slot, ancestor_slots, {}, std.sort.asc(Slot)); diff --git a/src/cmd.zig b/src/cmd.zig index 4cb6abba84..50609e48ad 100644 --- a/src/cmd.zig +++ b/src/cmd.zig @@ -1223,9 +1223,10 @@ fn validator( epoch_stakes, ); errdefer current_epoch_constants.deinit(allocator); + const ancestors = try sig.core.Ancestors.fromMap(&bank_fields.ancestors); const feature_set = try sig.replay.service.getActiveFeatures( allocator, - loaded_snapshot.accounts_db.accountReader().forSlot(&bank_fields.ancestors), + loaded_snapshot.accounts_db.accountReader().forSlot(&ancestors), bank_fields.slot, ); const root_slot_constants = try sig.core.SlotConstants.fromBankFields( diff --git a/src/consensus/replay_tower.zig b/src/consensus/replay_tower.zig index 0b9ebb39df..61219297a7 100644 --- a/src/consensus/replay_tower.zig +++ b/src/consensus/replay_tower.zig @@ -416,7 +416,7 @@ pub const ReplayTower = struct { return false; } - if (last_vote_ancestors.ancestors.count() == 0) { + if (last_vote_ancestors.count() == 0) { // If `last_vote_ancestors` is empty, this means we must have a last vote that is stray. If the `last_voted_slot` // is stray, it must be descended from some earlier root than the latest root (the anchor at startup). // The above check also guarentees that the candidate slot is not a descendant of this stray last vote. @@ -1777,7 +1777,7 @@ pub fn collectVoteLockouts( const fork_stake: u64 = blk: { var bank_ancestors = ancestors.get(bank_slot) orelse break :blk 0; var max_parent: ?Slot = null; - var iter = bank_ancestors.ancestors.iterator(); + var iter = bank_ancestors.iterator(); while (iter.next()) |slot| { if (max_parent == null or slot > max_parent.?) { max_parent = slot; @@ -1829,7 +1829,7 @@ pub fn populateAncestorVotedStakes( if (ancestors.getPtr(vote_slot)) |slot_ancestors| { _ = try voted_stakes.getOrPutValue(allocator, vote_slot, 0); - var iter = slot_ancestors.ancestors.iterator(); + var iter = slot_ancestors.iterator(); while (iter.next()) |slot| { _ = try voted_stakes.getOrPutValue(allocator, slot, 0); } @@ -1849,7 +1849,7 @@ fn updateAncestorVotedStakes( if (ancestors.getPtr(voted_slot)) |vote_slot_ancestors| { const entry_vote_stake = try voted_stakes.getOrPutValue(allocator, voted_slot, 0); entry_vote_stake.value_ptr.* += voted_stake; - var iter = vote_slot_ancestors.ancestors.iterator(); + var iter = vote_slot_ancestors.iterator(); while (iter.next()) |ancestor_slot| { const entry_voted_stake = try voted_stakes.getOrPutValue(allocator, ancestor_slot, 0); entry_voted_stake.value_ptr.* += voted_stake; @@ -4457,7 +4457,7 @@ fn getAncestors(allocator: std.mem.Allocator, tree: Tree) !std.AutoArrayHashMapU try child_ancestors.addSlot(current); if (ancestors.getPtr(current)) |parent_ancestors| { - var iter = parent_ancestors.ancestors.iterator(); + var iter = parent_ancestors.iterator(); while (iter.next()) |item| { try child_ancestors.addSlot(item); } diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index 1d192f7253..f21658c38f 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -16,13 +16,11 @@ pub const Ancestors = struct { /// The maximum allowed distance from the highest to lowest contained slot. pub const MAX_SLOT_RANGE = 256; - /// For some reason, agave serializes Ancestors as HashMap(slot, usize). But deserializing - /// ignores the usize, and serializing just uses the value 0. So we need to serialize void - /// as if it's 0, and deserialize 0 as if it's void. - pub const @"!bincode-config:ancestors" = bincode.FieldConfig(RingBitSet(MAX_SLOT_RANGE)){ - .serializer = serialize, - .deserializer = deserialize, - }; + pub fn fromMap(map: *const HashMap(Slot, usize)) error{Underflow}!Ancestors { + var set = RingBitSet(MAX_SLOT_RANGE).empty; + for (map.keys()) |slot| try set.set(slot); + return .{ .ancestors = set }; + } pub fn addSlot(self: *Ancestors, slot: Slot) error{Underflow}!void { try self.ancestors.set(slot); @@ -36,33 +34,13 @@ pub const Ancestors = struct { return self.ancestors.isSet(slot); } + pub fn count(self: *const Ancestors) usize { + return self.ancestors.count(); + } + pub const Iterator = RingBitSet(MAX_SLOT_RANGE).Iterator; pub fn iterator(self: *const Ancestors) Iterator { return self.ancestors.iterator(); } - - fn deserialize( - l: *bincode.LimitAllocator, - reader: anytype, - params: bincode.Params, - ) anyerror!RingBitSet(MAX_SLOT_RANGE) { - const deserialized = try bincode.readWithLimit(l, HashMap(Slot, usize), reader, params); - defer bincode.free(l.allocator(), deserialized); - var set = RingBitSet(MAX_SLOT_RANGE).empty; - for (deserialized.keys()) |slot| { - try set.set(slot); - } - return set; - } - - fn serialize(writer: anytype, data: anytype, params: bincode.Params) anyerror!void { - var map = HashMap(Slot, usize){}; - defer map.deinit(std.heap.c_allocator); // TODO: change this - var iter = data.iterator(); - while (iter.next()) |slot| { - try map.put(std.heap.c_allocator, slot, 0); - } - try bincode.write(writer, map, params); - } }; diff --git a/src/core/bank.zig b/src/core/bank.zig index 23117fbc4e..f7d55a8eed 100644 --- a/src/core/bank.zig +++ b/src/core/bank.zig @@ -107,7 +107,8 @@ pub const SlotConstants = struct { allocator: Allocator, bank_fields: *const BankFields, feature_set: FeatureSet, - ) Allocator.Error!SlotConstants { + ) !SlotConstants { + const ancestors = try sig.core.Ancestors.fromMap(&bank_fields.ancestors); return .{ .parent_slot = bank_fields.parent_slot, .parent_hash = bank_fields.parent_hash, @@ -117,7 +118,7 @@ pub const SlotConstants = struct { .max_tick_height = bank_fields.max_tick_height, .fee_rate_governor = bank_fields.fee_rate_governor, .epoch_reward_status = .inactive, - .ancestors = bank_fields.ancestors, + .ancestors = ancestors, .feature_set = feature_set, .reserved_accounts = try reserved_accounts.initForSlot( allocator, @@ -377,7 +378,7 @@ pub const EpochConstants = struct { /// Analogous to [DeserializableVersionedBank](https://github.com/anza-xyz/agave/blob/9c899a72414993dc005f11afb5df10752b10810b/runtime/src/serde_snapshot.rs#L134). pub const BankFields = struct { blockhash_queue: BlockhashQueue, - ancestors: Ancestors, + ancestors: std.AutoArrayHashMapUnmanaged(Slot, usize), hash: Hash, parent_hash: Hash, parent_slot: Slot, @@ -412,19 +413,16 @@ pub const BankFields = struct { epoch_stakes: EpochStakesMap, is_delta: bool, - pub fn deinit( - bank_fields: *const BankFields, - allocator: std.mem.Allocator, - ) void { - bank_fields.blockhash_queue.deinit(allocator); + pub fn deinit(self: *const BankFields, allocator: std.mem.Allocator) void { + var ancestors = self.ancestors; + ancestors.deinit(allocator); - bank_fields.hard_forks.deinit(allocator); - - bank_fields.stakes.deinit(allocator); - - bank_fields.unused_accounts.deinit(allocator); + self.blockhash_queue.deinit(allocator); + self.hard_forks.deinit(allocator); + self.stakes.deinit(allocator); + self.unused_accounts.deinit(allocator); - deinitMapAndValues(allocator, bank_fields.epoch_stakes); + deinitMapAndValues(allocator, self.epoch_stakes); } pub fn clone( @@ -434,6 +432,9 @@ pub const BankFields = struct { const blockhash_queue = try bank_fields.blockhash_queue.clone(allocator); errdefer blockhash_queue.deinit(allocator); + var ancestors = try bank_fields.ancestors.clone(allocator); + errdefer ancestors.deinit(allocator); + const hard_forks = try bank_fields.hard_forks.clone(allocator); errdefer hard_forks.deinit(allocator); @@ -448,6 +449,7 @@ pub const BankFields = struct { var cloned = bank_fields.*; cloned.blockhash_queue = blockhash_queue; + cloned.ancestors = ancestors; cloned.hard_forks = hard_forks; cloned.stakes = stakes; cloned.unused_accounts = unused_accounts; @@ -507,7 +509,8 @@ pub const BankFields = struct { var blockhash_queue = try BlockhashQueue.initRandom(allocator, random, max_list_entries); errdefer blockhash_queue.deinit(allocator); - const ancestors = try ancestorsRandom(random, max_list_entries); + var ancestors = try ancestorsRandom(allocator, random, max_list_entries); + errdefer ancestors.deinit(allocator); const hard_forks = try HardForks.initRandom(random, allocator, max_list_entries); errdefer hard_forks.deinit(allocator); @@ -565,16 +568,17 @@ pub const BankFields = struct { }; pub fn ancestorsRandom( + allocator: Allocator, random: std.Random, max_list_entries: usize, -) !Ancestors { - var ancestors = Ancestors.EMPTY; +) !std.AutoArrayHashMapUnmanaged(Slot, usize) { + var ancestors = std.AutoArrayHashMapUnmanaged(Slot, usize){}; const lower_bound = random.int(Slot); const upper_bound = lower_bound + Ancestors.MAX_SLOT_RANGE; for (0..@min(Ancestors.MAX_SLOT_RANGE, random.uintAtMost(usize, max_list_entries))) |_| { - try ancestors.addSlot(random.intRangeLessThan(Slot, lower_bound, upper_bound)); + try ancestors.put(allocator, random.intRangeLessThan(Slot, lower_bound, upper_bound), 0); } return ancestors; diff --git a/src/core/status_cache.zig b/src/core/status_cache.zig index e02f1fe220..a083ae8001 100644 --- a/src/core/status_cache.zig +++ b/src/core/status_cache.zig @@ -216,35 +216,6 @@ pub const StatusCache = struct { } }; -test "status cache (de)serialize Ancestors" { - const allocator = std.testing.allocator; - - var ancestors = Ancestors.EMPTY; - try ancestors.addSlot(1); - try ancestors.addSlot(2); - try ancestors.addSlot(3); - try ancestors.addSlot(4); - - const serialized = try bincode.writeAlloc(allocator, ancestors, .{}); - - defer allocator.free(serialized); - - const deserialized = try bincode.readFromSlice( - allocator, - HashMap(Slot, usize), - serialized, - .{}, - ); - defer bincode.free(allocator, deserialized); - - try std.testing.expectEqual(ancestors.ancestors.count(), deserialized.count()); - var iter = ancestors.iterator(); - while (iter.next()) |slot| { - try std.testing.expect(deserialized.contains(slot)); - } - try std.testing.expectEqualSlices(usize, &.{ 0, 0, 0, 0 }, deserialized.values()); -} - test "status cache empty" { const signature = sig.core.Signature.ZEROES; const block_hash = Hash.ZEROES; From 82e5c992dcb23da9dc6e84b43323db4daf2e3d03 Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 17:50:24 -0400 Subject: [PATCH 15/17] fix(collections): RingBitSet off by one issue when setting out of bounds --- src/utils/collections.zig | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/src/utils/collections.zig b/src/utils/collections.zig index d6c86da100..3a32b13b4a 100644 --- a/src/utils/collections.zig +++ b/src/utils/collections.zig @@ -1115,18 +1115,15 @@ pub fn RingBitSet(len: usize) type { pub fn set(self: *RingBitSet(len), index: usize) error{Underflow}!void { if (index < self.bottom) return error.Underflow; - if (index - self.bottom > len) { - const wipe_start = self.bottom; + if (1 + index - self.bottom > len) { + const wipe_start = self.bottom % len; self.bottom = 1 + index - len; - const wipe_end = self.bottom; - if (wipe_start % len > wipe_end % len) { - self.inner.setRangeValue(.{ .start = wipe_start % len, .end = len }, false); - self.inner.setRangeValue(.{ .start = 0, .end = wipe_end % len }, false); + const wipe_end = self.bottom % len; + if (wipe_start > wipe_end) { + self.inner.setRangeValue(.{ .start = wipe_start, .end = len }, false); + self.inner.setRangeValue(.{ .start = 0, .end = wipe_end }, false); } else { - self.inner.setRangeValue( - .{ .start = wipe_start % len, .end = wipe_end % len }, - false, - ); + self.inner.setRangeValue(.{ .start = wipe_start, .end = wipe_end }, false); } } self.inner.set(index % len); @@ -1535,3 +1532,19 @@ test "checkAllAllocationFailures in cloneMapAndValues" { try std.testing.checkAllAllocationFailures(std.testing.allocator, Clonable.runTest, .{}); } + +test RingBitSet { + var set = RingBitSet(10).empty; + + for (0..100) |i| { + try set.set(i); + try expect(set.isSet(i)); + try expectEqual(if (i > 9) 10 else i + 1, set.count()); + const first_set = i -| 9; + for (0..i) |j| { + const is_set = set.isSet(j); + const expected = if (j >= first_set) is_set else !is_set; + try expect(expected); + } + } +} From 11a8ab9ed67c24e2f8ea8cda14288dc805fe4022 Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 9 Sep 2025 17:51:32 -0400 Subject: [PATCH 16/17] fix: style --- src/core/ancestors.zig | 2 -- src/core/status_cache.zig | 1 - 2 files changed, 3 deletions(-) diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index f21658c38f..e5fbb11005 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -3,8 +3,6 @@ const sig = @import("../sig.zig"); const HashMap = std.AutoArrayHashMapUnmanaged; -const bincode = sig.bincode; - const RingBitSet = sig.utils.collections.RingBitSet; const Slot = sig.core.Slot; diff --git a/src/core/status_cache.zig b/src/core/status_cache.zig index a083ae8001..301a6027be 100644 --- a/src/core/status_cache.zig +++ b/src/core/status_cache.zig @@ -4,7 +4,6 @@ const sig = @import("../sig.zig"); const HashMap = std.AutoArrayHashMapUnmanaged; const ArrayList = std.ArrayListUnmanaged; const RwMux = sig.sync.RwMux; -const bincode = sig.bincode; const Hash = sig.core.Hash; const Slot = sig.core.Slot; From 97dfd89e3b633c89eab6e18c085c605994d5115a Mon Sep 17 00:00:00 2001 From: Drew Nutter Date: Tue, 23 Sep 2025 11:22:36 -0400 Subject: [PATCH 17/17] temp: try ancestors size of 8192 for conformance --- src/core/ancestors.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index e5fbb11005..3473d4fd86 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -12,7 +12,7 @@ pub const Ancestors = struct { pub const EMPTY: Ancestors = .{ .ancestors = .empty }; /// The maximum allowed distance from the highest to lowest contained slot. - pub const MAX_SLOT_RANGE = 256; + pub const MAX_SLOT_RANGE = 8192; pub fn fromMap(map: *const HashMap(Slot, usize)) error{Underflow}!Ancestors { var set = RingBitSet(MAX_SLOT_RANGE).empty;