diff --git a/conformance/src/txn_execute.zig b/conformance/src/txn_execute.zig index b4b6f54e27..ddc7fefbb0 100644 --- a/conformance/src/txn_execute.zig +++ b/conformance/src/txn_execute.zig @@ -186,8 +186,7 @@ fn executeTxnContext( var parent_hash: Hash = Hash.ZEROES; var epoch_schedule: EpochSchedule = undefined; - var ancestors: Ancestors = .{}; - defer ancestors.deinit(allocator); + var ancestors: Ancestors = .EMPTY; var compute_budget = ComputeBudget.DEFAULT; compute_budget.compute_unit_limit = compute_budget.compute_unit_limit; @@ -214,7 +213,7 @@ fn executeTxnContext( // Bank::new_with_paths(...) // https://github.com/firedancer-io/agave/blob/10fe1eb29aac9c236fd72d08ae60a3ef61ee8353/runtime/src/bank.rs#L1162 { - try ancestors.addSlot(allocator, 0); + try ancestors.addSlot(0); // bank.compute_budget = runtime_config.compute_budget; // bank.transaction_account_lock_limit = null; // bank.transaction_debug_keys = null; @@ -506,7 +505,7 @@ fn executeTxnContext( // var new = Bank{...} // Create ancestors with new slot and all parent slots - try ancestors.addSlot(allocator, slot); + try ancestors.addSlot(slot); // Update epoch if (parent_slots_epoch < epoch) { diff --git a/src/accountsdb/account_store.zig b/src/accountsdb/account_store.zig index 1e35a1e24b..6f0a1f5049 100644 --- a/src/accountsdb/account_store.zig +++ b/src/accountsdb/account_store.zig @@ -261,7 +261,7 @@ pub const ThreadSafeAccountMap = struct { const list = map.get(address) orelse return null; for (list.items) |slot_account| { const slot, const account = slot_account; - if (ancestors.ancestors.contains(slot)) { + if (ancestors.containsSlot(slot)) { return if (account.lamports == 0) null else try toAccount(self.allocator, account); } } @@ -434,9 +434,8 @@ test "AccountStore does not return 0-lamport accounts from accountsdb" { try std.testing.expectEqual(null, try reader.getLatest(zero_lamport_address)); try std.testing.expectEqual(1, (try reader.getLatest(one_lamport_address)).?.lamports); - var ancestors = Ancestors{}; - defer ancestors.deinit(std.testing.allocator); - try ancestors.ancestors.put(std.testing.allocator, 0, {}); + var ancestors = Ancestors.EMPTY; + try ancestors.addSlot(0); const slot_reader = db.accountReader().forSlot(&ancestors); try std.testing.expectEqual(null, try slot_reader.get(zero_lamport_address)); @@ -455,11 +454,10 @@ test ThreadSafeAccountMap { const account_store = tsm.accountStore(); const account_reader = tsm.accountReader(); - var ancestors1: Ancestors = .{}; - defer ancestors1.deinit(allocator); + var ancestors1: Ancestors = .EMPTY; const slot1: Slot = 1; const addr1: Pubkey = .initRandom(random); - try ancestors1.ancestors.put(allocator, slot1, {}); + try ancestors1.addSlot(slot1); var expected_data: [128]u8 = undefined; random.bytes(&expected_data); diff --git a/src/accountsdb/db.zig b/src/accountsdb/db.zig index 7326344c4c..479035194d 100644 --- a/src/accountsdb/db.zig +++ b/src/accountsdb/db.zig @@ -3623,7 +3623,7 @@ test "write and read an account (write single + read with ancestors)" { // assume we've progessed past the need for ancestors { accounts_db.largest_flushed_slot.store(10_000, .monotonic); - var account = (try accounts_db.getAccountWithAncestors(&pubkey, &.{})).?; + var account = (try accounts_db.getAccountWithAncestors(&pubkey, &.EMPTY)).?; accounts_db.largest_flushed_slot.store(0, .monotonic); defer account.deinit(allocator); try std.testing.expect(test_account.equals(&account)); @@ -3631,9 +3631,8 @@ test "write and read an account (write single + read with ancestors)" { // slot is in ancestors { - var ancestors = sig.core.Ancestors{}; - defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, 5083, {}); + var ancestors = sig.core.Ancestors.EMPTY; + try ancestors.addSlot(5083); var account = (try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)).?; defer account.deinit(allocator); @@ -3641,7 +3640,7 @@ test "write and read an account (write single + read with ancestors)" { } // slot is not in ancestors - try std.testing.expectEqual(null, accounts_db.getAccountWithAncestors(&pubkey, &.{})); + try std.testing.expectEqual(null, accounts_db.getAccountWithAncestors(&pubkey, &.EMPTY)); // write account to the same pubkey in the next slot (!) { @@ -3667,9 +3666,8 @@ test "write and read an account (write single + read with ancestors)" { // prev slot, get prev account { - var ancestors = sig.core.Ancestors{}; - defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, 5083, {}); + var ancestors = sig.core.Ancestors.EMPTY; + try ancestors.addSlot(5083); var account = (try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)).?; defer account.deinit(allocator); @@ -3678,9 +3676,8 @@ test "write and read an account (write single + read with ancestors)" { // new slot, get new account { - var ancestors = sig.core.Ancestors{}; - defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, 5084, {}); + var ancestors = sig.core.Ancestors.EMPTY; + try ancestors.addSlot(5084); var account = (try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)).?; defer account.deinit(allocator); @@ -4583,9 +4580,8 @@ test "insert multiple accounts on same slot" { const slot: Slot = 10; // Create ancestors with initial slot - var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, slot, {}); + var ancestors = Ancestors.EMPTY; + try ancestors.addSlot(slot); // Insert 50 random accounts on current slot and reload them immediately for (0..50) |i| { @@ -4664,14 +4660,13 @@ test "insert multiple accounts on multiple slots" { for (0..50) |i| { const slot = slots[random.uintLessThan(u64, slots.len)]; - var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, slot, {}); + var ancestors = Ancestors.EMPTY; + try ancestors.addSlot(slot); const pubkey = Pubkey.initRandom(random); errdefer std.log.err( - "Failed to insert and load account: i={}, slot={}, ancestors={any} pubkey={}\n", - .{ i, slot, ancestors.ancestors.keys(), pubkey }, + "Failed to insert and load account: i={}, slot={}, ancestors={} pubkey={}\n", + .{ i, slot, ancestors, pubkey }, ); const expected = try createRandomAccount(allocator, random); @@ -4707,19 +4702,18 @@ test "insert account on multiple slots" { for (0..num_slots_to_insert) |j| { const slot = slots[random.uintLessThan(u64, slots.len)]; - var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, slot, {}); + var ancestors = Ancestors.EMPTY; + try ancestors.addSlot(slot); errdefer std.log.err( \\Failed to insert and load account: i={} \\ j: {}/{} \\ slot: {} - \\ ancestors: {any} + \\ ancestors: {} \\ pubkey: {} \\ , - .{ i, j, num_slots_to_insert, slot, ancestors.ancestors.keys(), pubkey }, + .{ i, j, num_slots_to_insert, slot, ancestors, pubkey }, ); const expected = try createRandomAccount(allocator, random); @@ -4754,9 +4748,7 @@ test "missing ancestor returns null" { defer allocator.free(account.data); try accounts_db.putAccount(slot, pubkey, account); - var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); - + var ancestors = Ancestors.EMPTY; try std.testing.expectEqual(null, try accounts_db.getAccountWithAncestors(&pubkey, &ancestors)); } @@ -4772,9 +4764,8 @@ test "overwrite account in same slot" { const slot: Slot = 15; const pubkey = Pubkey.initRandom(random); - var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, slot, {}); + var ancestors = Ancestors.EMPTY; + try ancestors.addSlot(slot); const first = try createRandomAccount(allocator, random); defer allocator.free(first.data); @@ -4842,9 +4833,8 @@ test "insert many duplicate individual accounts, get latest with ancestors" { for (pubkeys, expected_latest) |pubkey, maybe_expected| { const expected = maybe_expected orelse return error.ExpectedMissing; - var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, expected.slot, {}); + var ancestors = Ancestors.EMPTY; + try ancestors.addSlot(expected.slot); const maybe_actual = try accounts_db.getAccountWithAncestors(&pubkey, &ancestors); defer if (maybe_actual) |actual| actual.deinit(allocator); diff --git a/src/accountsdb/fuzz.zig b/src/accountsdb/fuzz.zig index 441843d21f..c96f887ca1 100644 --- a/src/accountsdb/fuzz.zig +++ b/src/accountsdb/fuzz.zig @@ -236,7 +236,6 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { var top_slot: Slot = 0; var ancestors: sig.core.Ancestors = .EMPTY; - defer ancestors.deinit(allocator); // get/put a bunch of accounts while (true) { @@ -251,10 +250,15 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { defer if (will_inc_slot) { top_slot += random.intRangeAtMost(Slot, 1, 2); }; - try ancestors.addSlot(allocator, top_slot); + try ancestors.addSlot(top_slot); const current_slot = if (!non_sequential_slots) top_slot else slot: { - const ancestor_slots: []const Slot = ancestors.ancestors.keys(); + var ancestor_slots = try allocator.alloc(Slot, ancestors.count()); + var iter = ancestors.iterator(); + var i: usize = 0; + while (iter.next()) |slot| : (i += 1) ancestor_slots[i] = slot; + std.mem.sort(Slot, ancestor_slots, {}, std.sort.asc(Slot)); + std.debug.assert(ancestor_slots[ancestor_slots.len - 1] == top_slot); const ancestor_index = random.intRangeLessThan( usize, @@ -318,18 +322,12 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { break :blk .{ key, tracked_accounts.get(key).? }; }; - var ancestors_sub = try ancestors.clone(allocator); - defer ancestors_sub.deinit(allocator); - for (ancestors_sub.ancestors.keys()) |other_slot| { + var ancestors_sub = ancestors; + var iter = ancestors_sub.iterator(); + while (iter.next()) |other_slot| { if (other_slot <= tracked_account.slot) continue; - _ = ancestors_sub.ancestors.swapRemove(other_slot); + _ = ancestors_sub.removeSlot(other_slot); } - ancestors_sub.ancestors.sort(struct { - ancestors_sub: []Slot, - pub fn lessThan(ctx: @This(), a: usize, b: usize) bool { - return ctx.ancestors_sub[a] < ctx.ancestors_sub[b]; - } - }{ .ancestors_sub = ancestors_sub.ancestors.keys() }); const account = try accounts_db.getAccountWithAncestors(&pubkey, &ancestors_sub) orelse { diff --git a/src/cmd.zig b/src/cmd.zig index 4cb6abba84..50609e48ad 100644 --- a/src/cmd.zig +++ b/src/cmd.zig @@ -1223,9 +1223,10 @@ fn validator( epoch_stakes, ); errdefer current_epoch_constants.deinit(allocator); + const ancestors = try sig.core.Ancestors.fromMap(&bank_fields.ancestors); const feature_set = try sig.replay.service.getActiveFeatures( allocator, - loaded_snapshot.accounts_db.accountReader().forSlot(&bank_fields.ancestors), + loaded_snapshot.accounts_db.accountReader().forSlot(&ancestors), bank_fields.slot, ); const root_slot_constants = try sig.core.SlotConstants.fromBankFields( diff --git a/src/consensus/optimistic_vote_verifier.zig b/src/consensus/optimistic_vote_verifier.zig index 9d8133b721..db3749ab5e 100644 --- a/src/consensus/optimistic_vote_verifier.zig +++ b/src/consensus/optimistic_vote_verifier.zig @@ -252,8 +252,7 @@ test "OptimisticConfirmationVerifier.verifyForUnrootedOptimisticSlots: same slot latest.deinit(); try std.testing.expectEqual(2, latest.items.len); - var root_ancestors: sig.core.Ancestors = .{ .ancestors = .empty }; - defer root_ancestors.deinit(allocator); + var root_ancestors: sig.core.Ancestors = .EMPTY; const unrooted = try verifier.verifyForUnrootedOptimisticSlots( allocator, @@ -327,10 +326,9 @@ test "OptimisticConfirmationVerifier.verifyForUnrootedOptimisticSlots: unrooted try std.testing.expectEqual(3, latest.items.len); // Root on same fork at slot 5: ancestors include 1 and 3 - var anc5: sig.core.Ancestors = .{ .ancestors = .{} }; - defer anc5.deinit(allocator); - try anc5.addSlot(allocator, 1); - try anc5.addSlot(allocator, 3); + var anc5: sig.core.Ancestors = .EMPTY; + try anc5.addSlot(1); + try anc5.addSlot(3); { const unrooted = try verifier.verifyForUnrootedOptimisticSlots( allocator, @@ -344,9 +342,8 @@ test "OptimisticConfirmationVerifier.verifyForUnrootedOptimisticSlots: unrooted // Re-add optimistic slots and check root at 3 (same fork) try verifier.addNewOptimisticConfirmedSlots(allocator, optimistic, &ledger_writer); - var anc3: sig.core.Ancestors = .{ .ancestors = .{} }; - defer anc3.deinit(allocator); - try anc3.addSlot(allocator, 1); + var anc3: sig.core.Ancestors = .EMPTY; + try anc3.addSlot(1); { const unrooted = try verifier.verifyForUnrootedOptimisticSlots( allocator, @@ -361,10 +358,9 @@ test "OptimisticConfirmationVerifier.verifyForUnrootedOptimisticSlots: unrooted // Re-add optimistic slots and set a different fork root at slot 4 try verifier.addNewOptimisticConfirmedSlots(allocator, optimistic, &ledger_writer); - var anc4: sig.core.Ancestors = .{ .ancestors = .{} }; - defer anc4.deinit(allocator); + var anc4: sig.core.Ancestors = .EMPTY; // ancestors for 4 include 1 (but not 3) - try anc4.addSlot(allocator, 1); + try anc4.addSlot(1); { const unrooted = try verifier.verifyForUnrootedOptimisticSlots( allocator, @@ -383,9 +379,8 @@ test "OptimisticConfirmationVerifier.verifyForUnrootedOptimisticSlots: unrooted // Simulate missing ancestors by using root at 7 with no ancestors info var anc7: sig.core.Ancestors = .{ .ancestors = .empty }; - defer anc7.deinit(allocator); // First run should return 1 and 3 (not in ancestors and not rooted). Mark 5 as ancestor. - try anc7.addSlot(allocator, 5); + try anc7.addSlot(5); try verifier.addNewOptimisticConfirmedSlots( allocator, optimistic, diff --git a/src/consensus/replay_tower.zig b/src/consensus/replay_tower.zig index ae0b48d5b4..61219297a7 100644 --- a/src/consensus/replay_tower.zig +++ b/src/consensus/replay_tower.zig @@ -416,7 +416,7 @@ pub const ReplayTower = struct { return false; } - if (last_vote_ancestors.ancestors.count() == 0) { + if (last_vote_ancestors.count() == 0) { // If `last_vote_ancestors` is empty, this means we must have a last vote that is stray. If the `last_voted_slot` // is stray, it must be descended from some earlier root than the latest root (the anchor at startup). // The above check also guarentees that the candidate slot is not a descendant of this stray last vote. @@ -1554,7 +1554,8 @@ fn greatestCommonAncestor( if (superset.ancestors.count() == 0 or subset.ancestors.count() == 0) return null; - for (superset.ancestors.keys()) |slot| { + var iter = superset.ancestors.iterator(); + while (iter.next()) |slot| { if (!subset.containsSlot(slot)) continue; max_slot = if (max_slot) |current_max| @max(current_max, slot) else slot; } @@ -1776,7 +1777,8 @@ pub fn collectVoteLockouts( const fork_stake: u64 = blk: { var bank_ancestors = ancestors.get(bank_slot) orelse break :blk 0; var max_parent: ?Slot = null; - for (bank_ancestors.ancestors.keys()) |slot| { + var iter = bank_ancestors.iterator(); + while (iter.next()) |slot| { if (max_parent == null or slot > max_parent.?) { max_parent = slot; } @@ -1827,7 +1829,8 @@ pub fn populateAncestorVotedStakes( if (ancestors.getPtr(vote_slot)) |slot_ancestors| { _ = try voted_stakes.getOrPutValue(allocator, vote_slot, 0); - for (slot_ancestors.ancestors.keys()) |slot| { + var iter = slot_ancestors.iterator(); + while (iter.next()) |slot| { _ = try voted_stakes.getOrPutValue(allocator, slot, 0); } } @@ -1846,7 +1849,8 @@ fn updateAncestorVotedStakes( if (ancestors.getPtr(voted_slot)) |vote_slot_ancestors| { const entry_vote_stake = try voted_stakes.getOrPutValue(allocator, voted_slot, 0); entry_vote_stake.value_ptr.* += voted_stake; - for (vote_slot_ancestors.ancestors.keys()) |ancestor_slot| { + var iter = vote_slot_ancestors.iterator(); + while (iter.next()) |ancestor_slot| { const entry_voted_stake = try voted_stakes.getOrPutValue(allocator, ancestor_slot, 0); entry_voted_stake.value_ptr.* += voted_stake; } @@ -1915,21 +1919,14 @@ test "check_vote_threshold_forks" { const random = prng.random(); // Create the ancestor relationships var ancestors = std.AutoArrayHashMapUnmanaged(u64, Ancestors).empty; - defer { - var it = ancestors.iterator(); - while (it.next()) |entry| { - entry.value_ptr.deinit(allocator); - } - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); const vote_threshold_depth_plus_2 = VOTE_THRESHOLD_DEPTH + 2; try ancestors.ensureUnusedCapacity(allocator, vote_threshold_depth_plus_2); for (0..vote_threshold_depth_plus_2) |i| { var slot_parents: Ancestors = .EMPTY; - errdefer slot_parents.deinit(allocator); for (0..i) |j| { - try slot_parents.addSlot(allocator, j); + try slot_parents.addSlot(j); } ancestors.putAssumeCapacity(i, slot_parents); } @@ -2093,13 +2090,7 @@ test "collect vote lockouts root" { defer replay_tower.deinit(allocator); var ancestors = std.AutoArrayHashMapUnmanaged(u64, Ancestors).empty; - defer { - var it = ancestors.iterator(); - while (it.next()) |entry| { - entry.value_ptr.deinit(allocator); - } - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); const max_lockout_history_plus_1 = MAX_LOCKOUT_HISTORY + 1; try ancestors.ensureUnusedCapacity(allocator, max_lockout_history_plus_1); @@ -2107,9 +2098,8 @@ test "collect vote lockouts root" { for (0..max_lockout_history_plus_1) |i| { _ = try replay_tower.recordBankVote(allocator, i, .initRandom(random)); var slots: Ancestors = .EMPTY; - errdefer slots.deinit(allocator); for (0..i) |j| { - try slots.addSlot(allocator, j); + try slots.addSlot(j); } try ancestors.put(allocator, i, slots); } @@ -2212,16 +2202,11 @@ test "collect vote lockouts sums" { // ancestors: slot 1 has ancestor 0, slot 0 has no ancestors var ancestors = std.AutoArrayHashMapUnmanaged(u64, Ancestors).empty; - defer { - var it = ancestors.iterator(); - while (it.next()) |entry| { - entry.value_ptr.*.deinit(allocator); - } - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); + const set0: Ancestors = .EMPTY; var set1: Ancestors = .EMPTY; - try set1.addSlot(allocator, 0); + try set1.addSlot(0); try ancestors.put(allocator, 0, set0); try ancestors.put(allocator, 1, set1); @@ -2318,8 +2303,7 @@ test "is locked out empty" { defer replay_tower.deinit(std.testing.allocator); var ancestors: Ancestors = .EMPTY; - defer ancestors.deinit(std.testing.allocator); - try ancestors.addSlot(std.testing.allocator, 0); + try ancestors.addSlot(0); const result = try replay_tower.tower.isLockedOut( 1, @@ -2333,8 +2317,7 @@ test "is locked out root slot child pass" { defer replay_tower.deinit(std.testing.allocator); var ancestors: Ancestors = .EMPTY; - defer ancestors.deinit(std.testing.allocator); - try ancestors.addSlot(std.testing.allocator, 0); + try ancestors.addSlot(0); replay_tower.tower.vote_state.root_slot = 0; @@ -2350,8 +2333,7 @@ test "is locked out root slot sibling fail" { defer replay_tower.deinit(std.testing.allocator); var ancestors: Ancestors = .EMPTY; - defer ancestors.deinit(std.testing.allocator); - try ancestors.addSlot(std.testing.allocator, 0); + try ancestors.addSlot(0); replay_tower.tower.vote_state.root_slot = 0; @@ -2411,8 +2393,7 @@ test "is locked out double vote" { defer replay_tower.deinit(std.testing.allocator); var ancestors: Ancestors = .EMPTY; - defer ancestors.deinit(std.testing.allocator); - try ancestors.addSlot(std.testing.allocator, 0); + try ancestors.addSlot(0); for (0..2) |i| { _ = try replay_tower.recordBankVote( @@ -2435,8 +2416,7 @@ test "is locked out child" { defer replay_tower.deinit(std.testing.allocator); var ancestors: Ancestors = .EMPTY; - defer ancestors.deinit(std.testing.allocator); - try ancestors.addSlot(std.testing.allocator, 0); + try ancestors.addSlot(0); _ = try replay_tower.recordBankVote( std.testing.allocator, @@ -2457,8 +2437,7 @@ test "is locked out sibling" { defer replay_tower.deinit(std.testing.allocator); var ancestors: Ancestors = .EMPTY; - defer ancestors.deinit(std.testing.allocator); - try ancestors.addSlot(std.testing.allocator, 0); + try ancestors.addSlot(0); for (0..2) |i| { _ = try replay_tower.recordBankVote( @@ -2481,8 +2460,7 @@ test "is locked out last vote expired" { defer replay_tower.deinit(std.testing.allocator); var ancestors: Ancestors = .EMPTY; - defer ancestors.deinit(std.testing.allocator); - try ancestors.addSlot(std.testing.allocator, 0); + try ancestors.addSlot(0); for (0..2) |i| { _ = try replay_tower.recordBankVote( @@ -3580,14 +3558,11 @@ test "greatestCommonAncestor" { // Test case: Basic common ancestor { var ancestors: AutoArrayHashMapUnmanaged(Slot, Ancestors) = .empty; - defer { - for (ancestors.values()) |*set| set.deinit(allocator); - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); try ancestors.ensureUnusedCapacity(allocator, 2); - ancestors.putAssumeCapacity(10, try createAncestor(allocator, &.{ 5, 3, 1 })); - ancestors.putAssumeCapacity(20, try createAncestor(allocator, &.{ 8, 5, 2 })); + ancestors.putAssumeCapacity(10, try createAncestor(&.{ 5, 3, 1 })); + ancestors.putAssumeCapacity(20, try createAncestor(&.{ 8, 5, 2 })); // Both slots have common ancestor 5 try std.testing.expectEqual( @@ -3599,14 +3574,11 @@ test "greatestCommonAncestor" { // Test case: No common ancestor { var ancestors: AutoArrayHashMapUnmanaged(Slot, Ancestors) = .empty; - defer { - for (ancestors.values()) |*set| set.deinit(allocator); - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); try ancestors.ensureUnusedCapacity(allocator, 2); - ancestors.putAssumeCapacity(10, try createAncestor(allocator, &.{ 3, 1 })); - ancestors.putAssumeCapacity(20, try createAncestor(allocator, &.{ 8, 2 })); + ancestors.putAssumeCapacity(10, try createAncestor(&.{ 3, 1 })); + ancestors.putAssumeCapacity(20, try createAncestor(&.{ 8, 2 })); try std.testing.expectEqual( @as(?Slot, null), @@ -3617,14 +3589,11 @@ test "greatestCommonAncestor" { // Test case: One empty ancestor set { var ancestors: AutoArrayHashMapUnmanaged(Slot, Ancestors) = .empty; - defer { - for (ancestors.values()) |*set| set.deinit(allocator); - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); try ancestors.ensureUnusedCapacity(allocator, 2); - ancestors.putAssumeCapacity(10, try createAncestor(allocator, &.{ 5, 3 })); - ancestors.putAssumeCapacity(20, try createAncestor(allocator, &.{})); + ancestors.putAssumeCapacity(10, try createAncestor(&.{ 5, 3 })); + ancestors.putAssumeCapacity(20, try createAncestor(&.{})); try std.testing.expectEqual( @as(?Slot, null), @@ -3635,13 +3604,10 @@ test "greatestCommonAncestor" { // Test case: Missing slots { var ancestors: AutoArrayHashMapUnmanaged(Slot, Ancestors) = .empty; - defer { - for (ancestors.values()) |*set| set.deinit(allocator); - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); try ancestors.ensureUnusedCapacity(allocator, 1); - ancestors.putAssumeCapacity(10, try createAncestor(allocator, &.{ 5, 3 })); + ancestors.putAssumeCapacity(10, try createAncestor(&.{ 5, 3 })); try std.testing.expectEqual( @as(?Slot, null), @@ -3652,13 +3618,10 @@ test "greatestCommonAncestor" { // Test case: Multiple common ancestors (should pick greatest) { var ancestors: AutoArrayHashMapUnmanaged(Slot, Ancestors) = .empty; - defer { - for (ancestors.values()) |*set| set.deinit(allocator); - ancestors.deinit(allocator); - } + defer ancestors.deinit(allocator); - try ancestors.put(allocator, 10, try createAncestor(allocator, &.{ 7, 5, 3 })); - try ancestors.put(allocator, 20, try createAncestor(allocator, &.{ 7, 5, 4 })); + try ancestors.put(allocator, 10, try createAncestor(&.{ 7, 5, 3 })); + try ancestors.put(allocator, 20, try createAncestor(&.{ 7, 5, 4 })); // Should pick 7 (greater than 5) try std.testing.expectEqual( @@ -4132,13 +4095,12 @@ pub fn createTestSlotHistory( return SlotHistory{ .bits = bits, .next_slot = 1 }; } -fn createAncestor(allocator: std.mem.Allocator, slots: []const Slot) !Ancestors { +fn createAncestor(slots: []const Slot) !Ancestors { if (!builtin.is_test) { @compileError("createAncestor should only be used in test"); } var set: Ancestors = .EMPTY; - errdefer set.deinit(allocator); - for (slots) |slot| try set.addSlot(allocator, slot); + for (slots) |slot| try set.addSlot(slot); return set; } @@ -4217,7 +4179,6 @@ pub const TestFixture = struct { for (self.descendants.values()) |set| set.deinit(allocator); self.descendants.deinit(allocator); - for (self.ancestors.values()) |*set| set.deinit(allocator); self.ancestors.deinit(allocator); } @@ -4332,10 +4293,8 @@ pub const TestFixture = struct { try self.ancestors.ensureTotalCapacity(allocator, input_tree.data.len); // Populate ancenstors var extended_ancestors = try getAncestors(allocator, input_tree); - defer { - for (extended_ancestors.values()) |*set| set.deinit(allocator); - extended_ancestors.deinit(allocator); - } + defer extended_ancestors.deinit(allocator); + try extendForkTreeAncestors(allocator, &self.ancestors, extended_ancestors); // Populate decendants @@ -4495,12 +4454,12 @@ fn getAncestors(allocator: std.mem.Allocator, tree: Tree) !std.AutoArrayHashMapU try visited.put(child.slot, {}); var child_ancestors: Ancestors = .EMPTY; - errdefer child_ancestors.deinit(allocator); - try child_ancestors.addSlot(allocator, current); + try child_ancestors.addSlot(current); if (ancestors.getPtr(current)) |parent_ancestors| { - for (parent_ancestors.ancestors.keys()) |item| { - try child_ancestors.addSlot(allocator, item); + var iter = parent_ancestors.iterator(); + while (iter.next()) |item| { + try child_ancestors.addSlot(item); } } @@ -4550,14 +4509,15 @@ pub fn extendForkTreeAncestors( return; } - for (extension.keys(), extension.values()) |slot, *extension_children| { + for (extension.keys(), extension.values()) |slot, extension_children| { const original_children = original.getPtr(slot) orelse { - try original.put(allocator, slot, try extension_children.clone(allocator)); + try original.put(allocator, slot, extension_children); continue; }; - for (extension_children.ancestors.keys()) |extension_child| { - try original_children.addSlot(allocator, extension_child); + var iter = extension_children.ancestors.iterator(); + while (iter.next()) |extension_child| { + try original_children.addSlot(extension_child); } } } diff --git a/src/core/ancestors.zig b/src/core/ancestors.zig index 666d9afc68..3473d4fd86 100644 --- a/src/core/ancestors.zig +++ b/src/core/ancestors.zig @@ -3,48 +3,42 @@ const sig = @import("../sig.zig"); const HashMap = std.AutoArrayHashMapUnmanaged; -const bincode = sig.bincode; +const RingBitSet = sig.utils.collections.RingBitSet; const Slot = sig.core.Slot; pub const Ancestors = struct { - // agave uses a "RollingBitField" which seems to be just an optimisation for a set - ancestors: HashMap(Slot, void) = .{}, + ancestors: RingBitSet(MAX_SLOT_RANGE), pub const EMPTY: Ancestors = .{ .ancestors = .empty }; - // For some reason, agave serializes Ancestors as HashMap(slot, usize). But deserializing - // ignores the usize, and serializing just uses the value 0. So we need to serialize void - // as if it's 0, and deserialize 0 as if it's void. - pub const @"!bincode-config:ancestors" = bincode.hashmap.hashMapFieldConfig( - HashMap(Slot, void), - .{ - .key = .{}, - .value = .{ .serializer = voidSerialize, .deserializer = voidDeserialize }, - }, - ); - - pub fn addSlot(self: *Ancestors, allocator: std.mem.Allocator, slot: Slot) !void { - try self.ancestors.put(allocator, slot, {}); + /// The maximum allowed distance from the highest to lowest contained slot. + pub const MAX_SLOT_RANGE = 8192; + + pub fn fromMap(map: *const HashMap(Slot, usize)) error{Underflow}!Ancestors { + var set = RingBitSet(MAX_SLOT_RANGE).empty; + for (map.keys()) |slot| try set.set(slot); + return .{ .ancestors = set }; } - pub fn containsSlot(self: *const Ancestors, slot: Slot) bool { - return self.ancestors.contains(slot); + pub fn addSlot(self: *Ancestors, slot: Slot) error{Underflow}!void { + try self.ancestors.set(slot); } - fn voidDeserialize(l: *bincode.LimitAllocator, reader: anytype, params: bincode.Params) !void { - _ = try bincode.readWithLimit(l, usize, reader, params); + pub fn removeSlot(self: *Ancestors, slot: Slot) void { + self.ancestors.unset(slot); } - fn voidSerialize(writer: anytype, data: anytype, params: bincode.Params) !void { - _ = data; - try bincode.write(writer, @as(usize, 0), params); + pub fn containsSlot(self: *const Ancestors, slot: Slot) bool { + return self.ancestors.isSet(slot); } - pub fn clone(self: *const Ancestors, allocator: std.mem.Allocator) !Ancestors { - return .{ .ancestors = try self.ancestors.clone(allocator) }; + pub fn count(self: *const Ancestors) usize { + return self.ancestors.count(); } - pub fn deinit(self: *Ancestors, allocator: std.mem.Allocator) void { - self.ancestors.deinit(allocator); + pub const Iterator = RingBitSet(MAX_SLOT_RANGE).Iterator; + + pub fn iterator(self: *const Ancestors) Iterator { + return self.ancestors.iterator(); } }; diff --git a/src/core/bank.zig b/src/core/bank.zig index e9c2f65793..f7d55a8eed 100644 --- a/src/core/bank.zig +++ b/src/core/bank.zig @@ -107,7 +107,8 @@ pub const SlotConstants = struct { allocator: Allocator, bank_fields: *const BankFields, feature_set: FeatureSet, - ) Allocator.Error!SlotConstants { + ) !SlotConstants { + const ancestors = try sig.core.Ancestors.fromMap(&bank_fields.ancestors); return .{ .parent_slot = bank_fields.parent_slot, .parent_hash = bank_fields.parent_hash, @@ -117,7 +118,7 @@ pub const SlotConstants = struct { .max_tick_height = bank_fields.max_tick_height, .fee_rate_governor = bank_fields.fee_rate_governor, .epoch_reward_status = .inactive, - .ancestors = try bank_fields.ancestors.clone(allocator), + .ancestors = ancestors, .feature_set = feature_set, .reserved_accounts = try reserved_accounts.initForSlot( allocator, @@ -131,8 +132,8 @@ pub const SlotConstants = struct { allocator: Allocator, fee_rate_governor: sig.core.genesis_config.FeeRateGovernor, ) Allocator.Error!SlotConstants { - var ancestors = Ancestors{}; - try ancestors.ancestors.put(allocator, 0, {}); + var ancestors = Ancestors.EMPTY; + ancestors.addSlot(0) catch unreachable; return .{ .parent_slot = 0, .parent_hash = sig.core.Hash.ZEROES, @@ -151,7 +152,6 @@ pub const SlotConstants = struct { pub fn deinit(self_const: SlotConstants, allocator: Allocator) void { var self = self_const; self.epoch_reward_status.deinit(allocator); - self.ancestors.deinit(allocator); self.reserved_accounts.deinit(allocator); } }; @@ -378,7 +378,7 @@ pub const EpochConstants = struct { /// Analogous to [DeserializableVersionedBank](https://github.com/anza-xyz/agave/blob/9c899a72414993dc005f11afb5df10752b10810b/runtime/src/serde_snapshot.rs#L134). pub const BankFields = struct { blockhash_queue: BlockhashQueue, - ancestors: Ancestors, + ancestors: std.AutoArrayHashMapUnmanaged(Slot, usize), hash: Hash, parent_hash: Hash, parent_slot: Slot, @@ -413,22 +413,16 @@ pub const BankFields = struct { epoch_stakes: EpochStakesMap, is_delta: bool, - pub fn deinit( - bank_fields: *const BankFields, - allocator: std.mem.Allocator, - ) void { - bank_fields.blockhash_queue.deinit(allocator); - - var ancestors = bank_fields.ancestors; + pub fn deinit(self: *const BankFields, allocator: std.mem.Allocator) void { + var ancestors = self.ancestors; ancestors.deinit(allocator); - bank_fields.hard_forks.deinit(allocator); - - bank_fields.stakes.deinit(allocator); - - bank_fields.unused_accounts.deinit(allocator); + self.blockhash_queue.deinit(allocator); + self.hard_forks.deinit(allocator); + self.stakes.deinit(allocator); + self.unused_accounts.deinit(allocator); - deinitMapAndValues(allocator, bank_fields.epoch_stakes); + deinitMapAndValues(allocator, self.epoch_stakes); } pub fn clone( @@ -511,11 +505,11 @@ pub const BankFields = struct { /// for commentary on the runtime of this function. random: std.Random, max_list_entries: usize, - ) std.mem.Allocator.Error!BankFields { + ) !BankFields { var blockhash_queue = try BlockhashQueue.initRandom(allocator, random, max_list_entries); errdefer blockhash_queue.deinit(allocator); - var ancestors = try ancestorsRandom(random, allocator, max_list_entries); + var ancestors = try ancestorsRandom(allocator, random, max_list_entries); errdefer ancestors.deinit(allocator); const hard_forks = try HardForks.initRandom(random, allocator, max_list_entries); @@ -574,15 +568,17 @@ pub const BankFields = struct { }; pub fn ancestorsRandom( + allocator: Allocator, random: std.Random, - allocator: std.mem.Allocator, max_list_entries: usize, -) std.mem.Allocator.Error!Ancestors { - var ancestors = Ancestors{}; - errdefer ancestors.deinit(allocator); +) !std.AutoArrayHashMapUnmanaged(Slot, usize) { + var ancestors = std.AutoArrayHashMapUnmanaged(Slot, usize){}; + + const lower_bound = random.int(Slot); + const upper_bound = lower_bound + Ancestors.MAX_SLOT_RANGE; - for (0..random.uintAtMost(usize, max_list_entries)) |_| { - try ancestors.addSlot(allocator, random.int(Slot)); + for (0..@min(Ancestors.MAX_SLOT_RANGE, random.uintAtMost(usize, max_list_entries))) |_| { + try ancestors.put(allocator, random.intRangeLessThan(Slot, lower_bound, upper_bound), 0); } return ancestors; diff --git a/src/core/status_cache.zig b/src/core/status_cache.zig index 1d6f055680..301a6027be 100644 --- a/src/core/status_cache.zig +++ b/src/core/status_cache.zig @@ -4,7 +4,6 @@ const sig = @import("../sig.zig"); const HashMap = std.AutoArrayHashMapUnmanaged; const ArrayList = std.ArrayListUnmanaged; const RwMux = sig.sync.RwMux; -const bincode = sig.bincode; const Hash = sig.core.Hash; const Slot = sig.core.Slot; @@ -96,7 +95,7 @@ pub const StatusCache = struct { var roots = self.roots.read(); defer roots.unlock(); return for (stored_forks.items) |fork| { - if (ancestors.ancestors.contains(fork.slot) or roots.get().contains(fork.slot)) { + if (ancestors.containsSlot(fork.slot) or roots.get().contains(fork.slot)) { break fork; } } else null; @@ -216,31 +215,6 @@ pub const StatusCache = struct { } }; -test "status cache (de)serialize Ancestors" { - const allocator = std.testing.allocator; - - var ancestors = Ancestors{ - .ancestors = try .init(allocator, &.{ 1, 2, 3, 4 }, &.{}), - }; - defer ancestors.deinit(allocator); - - const serialized = try bincode.writeAlloc(allocator, ancestors, .{}); - - defer allocator.free(serialized); - - const deserialized = try bincode.readFromSlice( - allocator, - HashMap(Slot, usize), - serialized, - .{}, - ); - defer bincode.free(allocator, deserialized); - - try std.testing.expectEqual(ancestors.ancestors.count(), deserialized.count()); - try std.testing.expectEqualSlices(Slot, ancestors.ancestors.keys(), deserialized.keys()); - try std.testing.expectEqualSlices(usize, &.{ 0, 0, 0, 0 }, deserialized.values()); -} - test "status cache empty" { const signature = sig.core.Signature.ZEROES; const block_hash = Hash.ZEROES; @@ -252,7 +226,7 @@ test "status cache empty" { status_cache.getStatus( &signature.data, &block_hash, - &Ancestors{}, + &Ancestors.EMPTY, ), ); } @@ -265,10 +239,8 @@ test "status cache find with ancestor fork" { const signature = sig.core.Signature.ZEROES; const blockhash = Hash.ZEROES; - var ancestors: Ancestors = .{ - .ancestors = try HashMap(Slot, void).init(allocator, &.{0}, &.{}), - }; - defer ancestors.ancestors.deinit(allocator); + var ancestors: Ancestors = .EMPTY; + try ancestors.addSlot(0); var status_cache: StatusCache = .DEFAULT; defer status_cache.deinit(allocator); @@ -289,7 +261,7 @@ test "status cache find without ancestor fork" { const signature = sig.core.Signature.ZEROES; const blockhash = Hash.ZEROES; - var ancestors: Ancestors = .{}; + var ancestors: Ancestors = .EMPTY; var status_cache: StatusCache = .DEFAULT; defer status_cache.deinit(allocator); @@ -310,7 +282,7 @@ test "status cache find with root ancestor fork" { const signature = sig.core.Signature.ZEROES; const blockhash = Hash.ZEROES; - var ancestors: Ancestors = .{}; + var ancestors: Ancestors = .EMPTY; var status_cache: StatusCache = .DEFAULT; defer status_cache.deinit(allocator); @@ -332,10 +304,8 @@ test "status cache insert picks latest blockhash fork" { const signature = sig.core.Signature.ZEROES; const blockhash = Hash.ZEROES; - var ancestors: Ancestors = .{ - .ancestors = try HashMap(Slot, void).init(allocator, &.{0}, &.{}), - }; - defer ancestors.ancestors.deinit(allocator); + var ancestors: Ancestors = .EMPTY; + try ancestors.addSlot(0); var status_cache: StatusCache = .DEFAULT; defer status_cache.deinit(allocator); @@ -358,7 +328,7 @@ test "status cache root expires" { const signature = sig.core.Signature.ZEROES; const blockhash = Hash.ZEROES; - var ancestors: Ancestors = .{}; + var ancestors: Ancestors = .EMPTY; var status_cache: StatusCache = .DEFAULT; defer status_cache.deinit(allocator); diff --git a/src/replay/confirm_slot.zig b/src/replay/confirm_slot.zig index ef25301658..5f55ce37cb 100644 --- a/src/replay/confirm_slot.zig +++ b/src/replay/confirm_slot.zig @@ -787,8 +787,8 @@ pub const TestState = struct { errdefer blockhash_queue.deinit(allocator); try blockhash_queue.insertGenesisHash(allocator, .ZEROES, 1); - var ancestors = Ancestors{}; - try ancestors.addSlot(allocator, 0); + var ancestors = Ancestors.EMPTY; + try ancestors.addSlot(0); const replay_votes_channel: *sig.sync.Channel(ParsedVote) = try .create(allocator); @@ -813,7 +813,6 @@ pub const TestState = struct { pub fn deinit(self: *TestState, allocator: Allocator) void { self.account_map.deinit(); self.status_cache.deinit(allocator); - self.ancestors.deinit(allocator); var bhq = self.blockhash_queue.tryWrite() orelse unreachable; bhq.get().deinit(allocator); bhq.unlock(); diff --git a/src/replay/consensus.zig b/src/replay/consensus.zig index 161e03b126..098096cd33 100644 --- a/src/replay/consensus.zig +++ b/src/replay/consensus.zig @@ -1789,10 +1789,8 @@ test "processConsensus - no duplicate confirmed without votes" { const SlotSet = sig.utils.collections.SortedSetUnmanaged(Slot); var ancestors: std.AutoArrayHashMapUnmanaged(Slot, Ancestors) = .empty; - defer { - for (ancestors.values()) |*val| val.deinit(testing.allocator); - ancestors.deinit(testing.allocator); - } + defer ancestors.deinit(testing.allocator); + var descendants: std.AutoArrayHashMapUnmanaged(Slot, SlotSet) = .empty; defer descendants.deinit(testing.allocator); defer { @@ -1804,9 +1802,9 @@ test "processConsensus - no duplicate confirmed without votes" { ) |slot, info| { const slot_ancestors = &info.constants.ancestors.ancestors; const agop = try ancestors.getOrPutValue(testing.allocator, slot, .EMPTY); - try agop.value_ptr.ancestors.ensureUnusedCapacity(testing.allocator, slot_ancestors.count()); - for (slot_ancestors.keys()) |a_slot| { - try agop.value_ptr.addSlot(testing.allocator, a_slot); + var iter = slot_ancestors.iterator(); + while (iter.next()) |a_slot| { + try agop.value_ptr.addSlot(a_slot); const dgop = try descendants.getOrPutValue(testing.allocator, a_slot, .empty); try dgop.value_ptr.put(testing.allocator, slot); } @@ -1947,10 +1945,8 @@ test "processConsensus - duplicate-confirmed is idempotent" { const SlotSet = sig.utils.collections.SortedSetUnmanaged(Slot); var ancestors: std.AutoArrayHashMapUnmanaged(Slot, Ancestors) = .empty; - defer { - for (ancestors.values()) |*val| val.deinit(testing.allocator); - ancestors.deinit(testing.allocator); - } + defer ancestors.deinit(testing.allocator); + var descendants: std.AutoArrayHashMapUnmanaged(Slot, SlotSet) = .empty; defer descendants.deinit(testing.allocator); defer { @@ -1963,9 +1959,9 @@ test "processConsensus - duplicate-confirmed is idempotent" { ) |slot, info| { const slot_ancestors = &info.constants.ancestors.ancestors; const agop = try ancestors.getOrPutValue(testing.allocator, slot, .EMPTY); - try agop.value_ptr.ancestors.ensureUnusedCapacity(testing.allocator, slot_ancestors.count()); - for (slot_ancestors.keys()) |a_slot| { - try agop.value_ptr.addSlot(testing.allocator, a_slot); + var iter = slot_ancestors.iterator(); + while (iter.next()) |a_slot| { + try agop.value_ptr.addSlot(a_slot); const dgop = try descendants.getOrPutValue(testing.allocator, a_slot, .empty); try dgop.value_ptr.put(testing.allocator, slot); } diff --git a/src/replay/freeze.zig b/src/replay/freeze.zig index d54de28db7..7e034da5fe 100644 --- a/src/replay/freeze.zig +++ b/src/replay/freeze.zig @@ -278,9 +278,8 @@ pub fn hashSlot(allocator: Allocator, params: HashSlotParams) !struct { ?LtHash, }); if (params.feature_set.active(.accounts_lt_hash, params.slot)) { - var parent_ancestors = try params.ancestors.clone(allocator); - defer parent_ancestors.deinit(allocator); - assert(parent_ancestors.ancestors.swapRemove(params.slot)); + var parent_ancestors = params.ancestors.*; + parent_ancestors.removeSlot(params.slot); var lt_hash = params.parent_lt_hash.* orelse return error.UnknownParentLtHash; lt_hash.mixIn(try deltaLtHash(params.account_reader, params.slot, &parent_ancestors)); @@ -366,7 +365,7 @@ pub fn deltaLtHash( } test "deltaLtHash is identity for 0 accounts" { - try std.testing.expectEqual(LtHash.IDENTITY, try deltaLtHash(.noop, 0, &Ancestors{})); + try std.testing.expectEqual(LtHash.IDENTITY, try deltaLtHash(.noop, 0, &Ancestors.EMPTY)); } test "deltaMerkleHash for 0 accounts" { @@ -569,10 +568,9 @@ test "delta hashes with many accounts" { const expected_merkle_hash = Hash.parseRuntime("5tpzYxp8ghAETjXaXnZvxZov11iNEvSbDZXNAMoJX6ov") catch unreachable; - var parent_ancestors = Ancestors{}; - defer parent_ancestors.deinit(allocator); - try parent_ancestors.ancestors.put(allocator, 0, {}); - try parent_ancestors.ancestors.put(allocator, 1, {}); + var parent_ancestors = Ancestors.EMPTY; + try parent_ancestors.addSlot(0); + try parent_ancestors.addSlot(1); const actual_lt_hash = try deltaLtHash(accounts.accountReader(), hash_slot, &parent_ancestors); const actual_merkle_hash = try deltaMerkleHash(accounts.accountReader(), allocator, hash_slot); diff --git a/src/replay/resolve_lookup.zig b/src/replay/resolve_lookup.zig index 79c7c42cfd..ba21916f89 100644 --- a/src/replay/resolve_lookup.zig +++ b/src/replay/resolve_lookup.zig @@ -444,8 +444,7 @@ test resolveBatch { }; var ancestors = Ancestors{ .ancestors = .empty }; - defer ancestors.deinit(std.testing.allocator); - try ancestors.ancestors.put(std.testing.allocator, 0, {}); + try ancestors.addSlot(0); const slot_hashes = try SlotHashes.init(std.testing.allocator); defer slot_hashes.deinit(std.testing.allocator); @@ -550,9 +549,8 @@ test getLookupTable { var map = sig.accounts_db.ThreadSafeAccountMap.init(allocator); defer map.deinit(); - var ancestors = sig.core.Ancestors{}; - defer ancestors.deinit(allocator); - try ancestors.addSlot(allocator, 0); + var ancestors = sig.core.Ancestors.EMPTY; + try ancestors.addSlot(0); const account_reader = map.accountReader().forSlot(&ancestors); diff --git a/src/replay/service.zig b/src/replay/service.zig index c46b65fcfc..0461270114 100644 --- a/src/replay/service.zig +++ b/src/replay/service.zig @@ -570,17 +570,16 @@ fn advanceReplay(state: *ReplayState) !void { ) |slot, info| { const slot_ancestors = &info.constants.ancestors.ancestors; const ancestor_gop = try ancestors.getOrPutValue(arena, slot, .EMPTY); - try ancestor_gop.value_ptr.ancestors.ensureUnusedCapacity(arena, slot_ancestors.count()); - for (slot_ancestors.keys()) |ancestor_slot| { - try ancestor_gop.value_ptr.addSlot(arena, ancestor_slot); + var iter = slot_ancestors.iterator(); + while (iter.next()) |ancestor_slot| { + try ancestor_gop.value_ptr.addSlot(ancestor_slot); const descendants_gop = try descendants.getOrPutValue(arena, ancestor_slot, .empty); try descendants_gop.value_ptr.put(arena, slot); } } } - const slot_history_accessor = SlotHistoryAccessor - .init(state.account_store.reader()); + const slot_history_accessor = SlotHistoryAccessor.init(state.account_store.reader()); // Explicitly Unlock the read lock on slot_tracker and acquire a write lock for consensus processing. slot_tracker_lg.unlock(); @@ -727,9 +726,8 @@ fn newSlotFromParent( .clone(allocator); errdefer epoch_reward_status.deinit(allocator); - var ancestors = try parent_constants.ancestors.clone(allocator); - errdefer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, slot, {}); + var ancestors = parent_constants.ancestors; + try ancestors.addSlot(slot); var feature_set = try getActiveFeatures(allocator, account_reader.forSlot(&ancestors), slot); diff --git a/src/replay/update_sysvar.zig b/src/replay/update_sysvar.zig index 85b75772ed..9fd9965c39 100644 --- a/src/replay/update_sysvar.zig +++ b/src/replay/update_sysvar.zig @@ -614,9 +614,8 @@ test fillMissingSysvarCacheEntries { // Set slot and ancestors const slot = 10; - var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, slot, {}); + var ancestors = Ancestors.EMPTY; + try ancestors.addSlot(slot); // Create a sysvar cache with all sysvars randomly initialized. const expected = try initSysvarCacheWithRandomValues(allocator, prng.random()); @@ -839,9 +838,8 @@ test "update all sysvars" { var capitalization = Atomic(u64).init(0); var slot: Slot = 10; const rent = Rent.DEFAULT; - var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); - try ancestors.ancestors.put(allocator, slot, {}); + var ancestors = Ancestors.EMPTY; + try ancestors.addSlot(slot); // Create and insert sysvar defaults const initial_sysvars = try initSysvarCacheWithDefaultValues(allocator); @@ -876,7 +874,7 @@ test "update all sysvars" { .rent = &rent, .slot = slot, }; - try ancestors.ancestors.put(allocator, slot, {}); + try ancestors.addSlot(slot); const account_reader = accounts_db.accountReader().forSlot(&ancestors); { // updateClock diff --git a/src/runtime/check_transactions.zig b/src/runtime/check_transactions.zig index 5532a10201..cba7b173ac 100644 --- a/src/runtime/check_transactions.zig +++ b/src/runtime/check_transactions.zig @@ -444,8 +444,7 @@ test checkStatusCache { var prng = std.Random.DefaultPrng.init(0); - var ancestors = Ancestors{}; - defer ancestors.deinit(allocator); + var ancestors = Ancestors.EMPTY; var status_cache: sig.core.StatusCache = .DEFAULT; defer status_cache.deinit(allocator); @@ -463,7 +462,7 @@ test checkStatusCache { ), ); - try ancestors.ancestors.put(allocator, 0, {}); + try ancestors.addSlot(0); try status_cache.insert(allocator, prng.random(), &recent_blockhash, &msg_hash.data, 0); try std.testing.expectEqual( diff --git a/src/runtime/transaction_execution.zig b/src/runtime/transaction_execution.zig index c6150ad253..a5c8e06727 100644 --- a/src/runtime/transaction_execution.zig +++ b/src/runtime/transaction_execution.zig @@ -756,7 +756,7 @@ test "loadAndExecuteTransactions: no transactions" { const transactions: []RuntimeTransaction = &.{}; var batch_account_cache: account_loader.BatchAccountCache = .{}; - const ancestors: Ancestors = .{}; + const ancestors: Ancestors = .EMPTY; const feature_set: FeatureSet = .ALL_DISABLED; var status_cache: StatusCache = .DEFAULT; const sysvar_cache: SysvarCache = .{}; @@ -950,8 +950,7 @@ test "loadAndExecuteTransaction: simple transfer transaction" { }, ); - var ancestors: Ancestors = .{}; - defer ancestors.deinit(allocator); + var ancestors: Ancestors = .EMPTY; const feature_set: FeatureSet = .ALL_ENABLED_AT_GENESIS; diff --git a/src/utils/collections.zig b/src/utils/collections.zig index 5ff863d918..3a32b13b4a 100644 --- a/src/utils/collections.zig +++ b/src/utils/collections.zig @@ -1092,6 +1092,77 @@ pub fn Window(T: type) type { }; } +/// A bit set that is allowed to progress forwards by setting bits out of bounds +/// and deleting old values, but not allowed to regress backwards. +pub fn RingBitSet(len: usize) type { + return struct { + /// underlying bit set + inner: InnerSet, + /// The lowest value represented + bottom: usize, + + const InnerSet = std.bit_set.ArrayBitSet(usize, len); + + pub const empty = RingBitSet(len){ + .inner = .initEmpty(), + .bottom = 0, + }; + + pub fn isSet(self: *const RingBitSet(len), index: usize) bool { + if (index < self.bottom or index >= self.bottom + len) return false; + return self.inner.isSet(index % len); + } + + pub fn set(self: *RingBitSet(len), index: usize) error{Underflow}!void { + if (index < self.bottom) return error.Underflow; + if (1 + index - self.bottom > len) { + const wipe_start = self.bottom % len; + self.bottom = 1 + index - len; + const wipe_end = self.bottom % len; + if (wipe_start > wipe_end) { + self.inner.setRangeValue(.{ .start = wipe_start, .end = len }, false); + self.inner.setRangeValue(.{ .start = 0, .end = wipe_end }, false); + } else { + self.inner.setRangeValue(.{ .start = wipe_start, .end = wipe_end }, false); + } + } + self.inner.set(index % len); + } + + pub fn unset(self: *RingBitSet(len), index: usize) void { + if (index < self.bottom or index >= self.bottom + len) return; + return self.inner.unset(index % len); + } + + pub fn count(self: *const RingBitSet(len)) usize { + return self.inner.count(); + } + + pub const Iterator = struct { + inner: InnerSet.Iterator(.{}), + bottom: usize, + + pub fn next(self: *Iterator) ?usize { + if (self.inner.next()) |item| { + return if (item < self.bottom % len) + item + self.bottom - len + else + item + self.bottom; + } + return null; + } + }; + + /// items are not sorted + pub fn iterator(self: *const RingBitSet(len)) Iterator { + return .{ + .inner = self.inner.iterator(.{}), + .bottom = self.bottom, + }; + } + }; +} + pub fn cloneMapAndValues(allocator: Allocator, map: anytype) Allocator.Error!@TypeOf(map) { var cloned: @TypeOf(map) = .{}; errdefer deinitMapAndValues(allocator, cloned); @@ -1461,3 +1532,19 @@ test "checkAllAllocationFailures in cloneMapAndValues" { try std.testing.checkAllAllocationFailures(std.testing.allocator, Clonable.runTest, .{}); } + +test RingBitSet { + var set = RingBitSet(10).empty; + + for (0..100) |i| { + try set.set(i); + try expect(set.isSet(i)); + try expectEqual(if (i > 9) 10 else i + 1, set.count()); + const first_set = i -| 9; + for (0..i) |j| { + const is_set = set.isSet(j); + const expected = if (j >= first_set) is_set else !is_set; + try expect(expected); + } + } +}