diff --git a/conformance/src/instruction_execute.zig b/conformance/src/instruction_execute.zig index 4327882865..30e6f1de18 100644 --- a/conformance/src/instruction_execute.zig +++ b/conformance/src/instruction_execute.zig @@ -136,8 +136,10 @@ fn executeInstruction( if (emit_logs) { std.debug.print("Execution Logs:\n", .{}); - for (tc.log_collector.?.collect(), 1..) |msg, index| { - std.debug.print(" {}: {s}\n", .{ index, msg }); + var i: usize = 0; + var msgs = tc.log_collector.?.iterator(); + while (msgs.next()) |msg| : (i += 1) { + std.debug.print(" {}: {s}\n", .{ i, msg }); } } diff --git a/conformance/src/txn_execute.zig b/conformance/src/txn_execute.zig index b529d720ea..64657118aa 100644 --- a/conformance/src/txn_execute.zig +++ b/conformance/src/txn_execute.zig @@ -886,8 +886,9 @@ fn printLogs(result: TransactionResult(ProcessedTransaction)) void { .ok => |txn| { switch (txn) { .executed => |executed| { - const msgs = executed.executed_transaction.log_collector.?.messages; - for (msgs.items, 0..) |msg, i| { + var i: usize = 0; + var msgs = executed.executed_transaction.log_collector.?.iterator(); + while (msgs.next()) |msg| : (i += 1) { std.debug.print("log {}: {s}\n", .{ i, msg }); } }, diff --git a/conformance/src/utils.zig b/conformance/src/utils.zig index 4d1cb1cfaf..31cd25bb22 100644 --- a/conformance/src/utils.zig +++ b/conformance/src/utils.zig @@ -102,8 +102,12 @@ pub fn createTransactionContext( ptr else try allocator.create(ProgramMap); + errdefer if (environment.program_map == null) allocator.destroy(program_map); program_map.* = ProgramMap{}; + const log_collector = try sig.runtime.LogCollector.default(allocator); + errdefer log_collector.deinit(allocator); + tc.* = TransactionContext{ .allocator = allocator, .feature_set = feature_set, @@ -124,7 +128,7 @@ pub fn createTransactionContext( .compute_meter = instr_ctx.cu_avail, .compute_budget = sig.runtime.ComputeBudget.default(instr_ctx.cu_avail), .custom_error = null, - .log_collector = sig.runtime.LogCollector.default(), + .log_collector = log_collector, .rent = sysvar_cache.get(sysvar.Rent) catch sysvar.Rent.DEFAULT, .prev_blockhash = sig.core.Hash.ZEROES, .prev_lamports_per_signature = 0, @@ -480,7 +484,8 @@ pub fn createSyscallEffect(allocator: std.mem.Allocator, params: struct { var log = std.ArrayList(u8).init(allocator); defer log.deinit(); if (params.tc.log_collector) |log_collector| { - for (log_collector.collect()) |msg| { + var iter = log_collector.iterator(); + while (iter.next()) |msg| { try log.appendSlice(msg); try log.append('\n'); } diff --git a/conformance/src/vm_syscall.zig b/conformance/src/vm_syscall.zig index c412f43226..249ad44c02 100644 --- a/conformance/src/vm_syscall.zig +++ b/conformance/src/vm_syscall.zig @@ -343,8 +343,10 @@ fn executeSyscall( if (emit_logs) { std.debug.print("Execution Logs:\n", .{}); - for (tc.log_collector.?.collect(), 1..) |msg, index| { - std.debug.print(" {}: {s}\n", .{ index, msg }); + var i: usize = 0; + var msgs = tc.log_collector.?.iterator(); + while (msgs.next()) |msg| : (i += 1) { + std.debug.print(" {}: {s}\n", .{ i, msg }); } } diff --git a/src/accountsdb/buffer_pool.zig b/src/accountsdb/buffer_pool.zig index f370240f00..4ce99b6309 100644 --- a/src/accountsdb/buffer_pool.zig +++ b/src/accountsdb/buffer_pool.zig @@ -528,7 +528,7 @@ pub const FrameManager = struct { entry.value_ptr.* = frame_ref.index; eviction_lfu.key[frame_ref.index] = key; - std.debug.assert(!std.meta.eql(evicted_key, key)); // inserted key we just evicted + std.debug.assert(evicted_key != key); // inserted key we just evicted const removed = frame_map.swapRemove(evicted_key); std.debug.assert(removed); // evicted key was not in map diff --git a/src/accountsdb/db.zig b/src/accountsdb/db.zig index 7059743536..aafa46f592 100644 --- a/src/accountsdb/db.zig +++ b/src/accountsdb/db.zig @@ -3832,6 +3832,7 @@ test "load sysvars" { } test "generate snapshot & update gossip snapshot hashes" { + const GossipDataTag = sig.gossip.data.GossipDataTag; const SnapshotHashes = sig.gossip.data.SnapshotHashes; const allocator = std.testing.allocator; @@ -3914,7 +3915,7 @@ test "generate snapshot & update gossip snapshot hashes" { try std.testing.expectEqual(1, queue.queue.items.len); const queue_item_0 = queue.queue.items[0]; // should be from the full generation - try std.testing.expectEqual(.SnapshotHashes, std.meta.activeTag(queue_item_0)); + try std.testing.expectEqual(.SnapshotHashes, @as(GossipDataTag, queue_item_0)); try std.testing.expectEqualDeep( SnapshotHashes{ @@ -3964,7 +3965,7 @@ test "generate snapshot & update gossip snapshot hashes" { try std.testing.expectEqual(2, queue.queue.items.len); const queue_item_1 = queue.queue.items[1]; // should be from the incremental generation - try std.testing.expectEqual(.SnapshotHashes, std.meta.activeTag(queue_item_1)); + try std.testing.expectEqual(.SnapshotHashes, @as(GossipDataTag, queue_item_1)); try std.testing.expectEqualDeep( SnapshotHashes{ diff --git a/src/accountsdb/fuzz.zig b/src/accountsdb/fuzz.zig index 481233e09f..5422fd7a91 100644 --- a/src/accountsdb/fuzz.zig +++ b/src/accountsdb/fuzz.zig @@ -535,7 +535,7 @@ fn readRandomAccounts( const tracked_pubkeys = tracked_accounts.keys(); if (tracked_pubkeys.len == 0) { // wait for some accounts to exist - std.time.sleep(std.time.ns_per_s); + std.Thread.sleep(std.time.ns_per_s); continue; } diff --git a/src/accountsdb/snapshot/download.zig b/src/accountsdb/snapshot/download.zig index b9e5f2653f..77f24f764b 100644 --- a/src/accountsdb/snapshot/download.zig +++ b/src/accountsdb/snapshot/download.zig @@ -210,7 +210,7 @@ pub fn downloadSnapshotsFromGossip( var function_duration = try std.time.Timer.start(); var download_attempts: u64 = 0; while (true) { - std.time.sleep(5 * std.time.ns_per_s); // wait while gossip table updates + std.Thread.sleep(5 * std.time.ns_per_s); // wait while gossip table updates if (download_attempts > max_number_of_download_attempts) { logger.err().logf( diff --git a/src/accountsdb/swiss_map.zig b/src/accountsdb/swiss_map.zig index 79897ea13e..4f6c39b7d7 100644 --- a/src/accountsdb/swiss_map.zig +++ b/src/accountsdb/swiss_map.zig @@ -649,7 +649,7 @@ test "swissmap resize" { // this will resize the map with the key still in there try map.ensureTotalCapacity(200); const get_ref = map.get(sig.core.Pubkey.ZEROES) orelse return error.MissingAccount; - try std.testing.expect(std.meta.eql(get_ref, ref)); + try std.testing.expectEqual(get_ref, ref); } test "swissmap read/write/delete" { diff --git a/src/adapter.zig b/src/adapter.zig index 56c3734db6..be2ccb6f9d 100644 --- a/src/adapter.zig +++ b/src/adapter.zig @@ -110,7 +110,7 @@ pub const RpcEpochContextService = struct { result catch |e| self.logger.err().logf("failed to refresh epoch context via rpc: {}", .{e}); } - std.time.sleep(100 * std.time.ns_per_ms); + std.Thread.sleep(100 * std.time.ns_per_ms); i += 1; } } diff --git a/src/benchmarks.zig b/src/benchmarks.zig index 235c4a9a5e..b9a67b4790 100644 --- a/src/benchmarks.zig +++ b/src/benchmarks.zig @@ -47,7 +47,7 @@ fn exitWithUsage() noreturn { \\ ) catch @panic("failed to print usage"); - inline for (std.meta.fields(Benchmark)) |field| { + inline for (@typeInfo(Benchmark).@"enum".fields) |field| { stdout.print( " {s}\n", .{field.name}, diff --git a/src/geyser/benchmark.zig b/src/geyser/benchmark.zig index 1bea27e8aa..82255727c0 100644 --- a/src/geyser/benchmark.zig +++ b/src/geyser/benchmark.zig @@ -78,7 +78,7 @@ pub fn runBenchmark(logger: sig.trace.Logger("geyser.benchmark")) !void { // let it run for ~4 measurements const NUM_MEAUSUREMENTS = 4; - std.time.sleep(MEASURE_RATE.asNanos() * NUM_MEAUSUREMENTS); + std.Thread.sleep(MEASURE_RATE.asNanos() * NUM_MEAUSUREMENTS); exit.store(true, .release); reader_handle.join(); diff --git a/src/geyser/core.zig b/src/geyser/core.zig index 1dffbd1363..2216f1903c 100644 --- a/src/geyser/core.zig +++ b/src/geyser/core.zig @@ -199,7 +199,7 @@ pub const GeyserWriter = struct { const buf = self.io_allocator.alloc(u8, total_len) catch { // no memory available rn - unlock and wait self.metrics.recycle_fba_empty_loop_count.inc(); - std.time.sleep(std.time.ns_per_ms); + std.Thread.sleep(std.time.ns_per_ms); if (self.exit.load(.acquire)) { return error.MemoryBlockedWithExitSignaled; } diff --git a/src/gossip/data.zig b/src/gossip/data.zig index 6b33561cee..a466039b99 100644 --- a/src/gossip/data.zig +++ b/src/gossip/data.zig @@ -2076,7 +2076,7 @@ test "LegacyContactInfo <-> ContactInfo roundtrip" { defer ci.deinit(); const end = LegacyContactInfo.fromContactInfo(&ci); - try std.testing.expect(std.meta.eql(start, end)); + try std.testing.expectEqual(start, end); } test "sanitize valid ContactInfo works" { diff --git a/src/gossip/dump_service.zig b/src/gossip/dump_service.zig index 771a2a1a67..f54e194142 100644 --- a/src/gossip/dump_service.zig +++ b/src/gossip/dump_service.zig @@ -39,7 +39,7 @@ pub const GossipDumpService = struct { while (self.exit_condition.shouldRun()) { try self.dumpGossip(dir, start_time); - std.time.sleep(DUMP_INTERVAL.asNanos()); + std.Thread.sleep(DUMP_INTERVAL.asNanos()); } } diff --git a/src/gossip/fuzz_service.zig b/src/gossip/fuzz_service.zig index ff004dd32c..88316efe98 100644 --- a/src/gossip/fuzz_service.zig +++ b/src/gossip/fuzz_service.zig @@ -239,7 +239,7 @@ pub fn fuzz( std.debug.print("{d} messages sent\n", .{msg_count}); last_print_msg_count = msg_count; } - std.time.sleep(SLEEP_TIME.asNanos()); + std.Thread.sleep(SLEEP_TIME.asNanos()); } } diff --git a/src/gossip/service.zig b/src/gossip/service.zig index 08e5ec8740..f2f9f07901 100644 --- a/src/gossip/service.zig +++ b/src/gossip/service.zig @@ -980,7 +980,7 @@ pub const GossipService = struct { // sleep if (loop_timer.read().asNanos() < BUILD_MESSAGE_LOOP_MIN.asNanos()) { const time_left_ms = BUILD_MESSAGE_LOOP_MIN.asMillis() -| loop_timer.read().asMillis(); - std.time.sleep(time_left_ms * std.time.ns_per_ms); + std.Thread.sleep(time_left_ms * std.time.ns_per_ms); } } } @@ -2641,12 +2641,12 @@ test "handle old prune & pull request message" { const MAX_N_SLEEPS = 100; var i: u64 = 0; while (gossip_service.metrics.pull_requests_dropped.get() != 2) { - std.time.sleep(std.time.ns_per_ms * 100); + std.Thread.sleep(std.time.ns_per_ms * 100); if (i > MAX_N_SLEEPS) return error.LoopRangeExceeded; i += 1; } while (gossip_service.metrics.prune_messages_dropped.get() != 1) { - std.time.sleep(std.time.ns_per_ms * 100); + std.Thread.sleep(std.time.ns_per_ms * 100); if (i > MAX_N_SLEEPS) return error.LoopRangeExceeded; i += 1; } @@ -3263,7 +3263,7 @@ test "process contact info push packet" { const MAX_N_SLEEPS = 100; var i: u64 = 0; while (gossip_service.metrics.gossip_packets_processed_total.get() != valid_messages_sent) { - std.time.sleep(std.time.ns_per_ms * 100); + std.Thread.sleep(std.time.ns_per_ms * 100); if (i > MAX_N_SLEEPS) return error.LoopRangeExceeded; i += 1; } diff --git a/src/net/socket_utils.zig b/src/net/socket_utils.zig index 97a7ee47d1..52286ed112 100644 --- a/src/net/socket_utils.zig +++ b/src/net/socket_utils.zig @@ -106,11 +106,11 @@ const XevThread = struct { if (rc.active == 1) { // Lock the ref_count to detect if theres races (i.e. another spawn()) during shutdown. rc = @bitCast(ref_count.swap(@bitCast(RefCount{ .shutdown = true }), .acquire)); - std.debug.assert(std.meta.eql(rc, RefCount{})); + std.debug.assert(rc == .{}); defer { rc = @bitCast(ref_count.swap(@bitCast(RefCount{}), .release)); - std.debug.assert(std.meta.eql(rc, RefCount{ .shutdown = true })); + std.debug.assert(rc == .{ .shutdown = true }); } notifyIoThread(); // wake up xev thread to see ref_count.shutdown to stop/shutdown @@ -518,7 +518,7 @@ test "SocketThread: overload sendto" { } // Wait for all sends to have started/happened. - while (!send_channel.isEmpty()) std.time.sleep(10 * std.time.ns_per_ms); + while (!send_channel.isEmpty()) std.Thread.sleep(10 * std.time.ns_per_ms); } pub const BenchmarkPacketProcessing = struct { @@ -586,7 +586,7 @@ pub const BenchmarkPacketProcessing = struct { if (i % 10 == 0) { const elapsed = timer.read(); if (elapsed < std.time.ns_per_s) { - std.time.sleep(std.time.ns_per_s); + std.Thread.sleep(std.time.ns_per_s); } } } diff --git a/src/prometheus/http.zig b/src/prometheus/http.zig index a55452a036..374e5fe983 100644 --- a/src/prometheus/http.zig +++ b/src/prometheus/http.zig @@ -58,7 +58,7 @@ pub fn main() !void { var gauge = try reg.getOrCreateGauge("seconds_hand", u64); var hist = try reg.getOrCreateHistogram("hist", &DEFAULT_BUCKETS); while (true) { - std.time.sleep(1_000_000_000); + std.Thread.sleep(1_000_000_000); secs_counter.inc(); gauge.set(@as(u64, @intCast(std.time.timestamp())) % @as(u64, 60)); hist.observe(1.1); diff --git a/src/rpc/client.zig b/src/rpc/client.zig index d6891d2889..3f2c24d553 100644 --- a/src/rpc/client.zig +++ b/src/rpc/client.zig @@ -36,8 +36,7 @@ pub const Client = struct { allocator: Allocator, id: rpc.request.Id, comptime method: MethodAndParams.Tag, - // TODO: use this instead of `std.meta.FieldType` to avoid eval branch quota until `@FieldType`'s here. - params: @typeInfo(MethodAndParams).@"union".fields[@intFromEnum(method)].type, + params: @FieldType(MethodAndParams, @tagName(method)), ) Error!Response(@TypeOf(params).Response) { const request: rpc.request.Request = .{ .id = id, @@ -58,8 +57,7 @@ pub const Client = struct { self: *Client, id: rpc.request.Id, comptime method: MethodAndParams.Tag, - // TODO: use this instead of `std.meta.FieldType` to avoid eval branch quota until `@FieldType`'s here. - request: @typeInfo(MethodAndParams).@"union".fields[@intFromEnum(method)].type, + request: @FieldType(MethodAndParams, @tagName(method)), ) Error!Response(@TypeOf(request).Response) { return try self.fetchCustom( self.fetcher.http_client.allocator, diff --git a/src/rpc/request.zig b/src/rpc/request.zig index 7a0cc1f50e..fba24060e7 100644 --- a/src/rpc/request.zig +++ b/src/rpc/request.zig @@ -110,8 +110,7 @@ pub const Request = struct { const method_and_params = switch (method) { inline else => |tag| @unionInit(MethodAndParams, @tagName(tag), blk: { - // NOTE: using `std.meta.FieldType` here hits eval branch quota, hack until `@FieldType` - const Params = @typeInfo(MethodAndParams).@"union".fields[@intFromEnum(tag)].type; + const Params = @FieldType(MethodAndParams, @tagName(tag)); if (Params == noreturn) { std.debug.panic("TODO: implement {s}", .{@tagName(method)}); } diff --git a/src/rpc/test_serialize.zig b/src/rpc/test_serialize.zig index b75f36c48b..fa76191939 100644 --- a/src/rpc/test_serialize.zig +++ b/src/rpc/test_serialize.zig @@ -25,8 +25,7 @@ const Response = rpc.response.Response; fn testRequest( comptime method: methods.MethodAndParams.Tag, /// passed into the client - // TODO: use this instead of `std.meta.FieldType` to avoid eval branch quota until `@FieldType`'s here. - params: @typeInfo(methods.MethodAndParams).@"union".fields[@intFromEnum(method)].type, + params: @FieldType(methods.MethodAndParams, @tagName(method)), /// test will assert the request serializes to this json expected_request_json: []const u8, ) !void { diff --git a/src/runtime/log_collector.zig b/src/runtime/log_collector.zig index 7233e4f386..e06abb972b 100644 --- a/src/runtime/log_collector.zig +++ b/src/runtime/log_collector.zig @@ -3,41 +3,74 @@ const std = @import("std"); /// [agave] https://github.com/anza-xyz/agave/blob/faea52f338df8521864ab7ce97b120b2abb5ce13/program-runtime/src/log_collector.rs#L4 const DEFAULT_MAX_BYTES_LIMIT: usize = 10 * 1000; +const LOG_TRUNCATE_MSG = "Log truncated"; /// `LogCollector` is used to collect logs at the transaction level. Each `TransactionContext` has its own log collector /// which may be used to collect and emit logs as part of the transaction processing result. /// /// [agave] https://github.com/anza-xyz/agave/blob/faea52f338df8521864ab7ce97b120b2abb5ce13/program-runtime/src/log_collector.rs#L6 pub const LogCollector = struct { - messages: std.ArrayListUnmanaged([]const u8), + message_pool: std.ArrayListUnmanaged(u8), + message_indices: std.ArrayListUnmanaged(usize), bytes_written: usize, bytes_limit: ?usize, bytes_limit_reached: bool, - pub fn init(bytes_limit: ?usize) LogCollector { + pub fn init(allocator: std.mem.Allocator, bytes_limit: ?usize) !LogCollector { return .{ - .messages = .{}, + .message_pool = try .initCapacity( + allocator, + bytes_limit orelse DEFAULT_MAX_BYTES_LIMIT, + ), + .message_indices = try .initCapacity( + allocator, + (bytes_limit orelse DEFAULT_MAX_BYTES_LIMIT) / 100, + ), .bytes_written = 0, .bytes_limit = bytes_limit, .bytes_limit_reached = false, }; } - pub fn default() LogCollector { - return LogCollector.init(DEFAULT_MAX_BYTES_LIMIT); + pub fn default(allocator: std.mem.Allocator) !LogCollector { + return LogCollector.init(allocator, DEFAULT_MAX_BYTES_LIMIT); } pub fn deinit(self: LogCollector, allocator: std.mem.Allocator) void { var copy = self; - for (copy.messages.items) |message| allocator.free(message); - copy.messages.deinit(allocator); + copy.message_pool.deinit(allocator); + copy.message_indices.deinit(allocator); } - /// [agave] https://github.com/anza-xyz/agave/blob/faea52f338df8521864ab7ce97b120b2abb5ce13/program-runtime/src/log_collector.rs#L43 - pub fn collect(self: LogCollector) []const []const u8 { - return self.messages.items; + pub fn eql(self: LogCollector, other: LogCollector) bool { + return std.mem.eql(u8, self.message_pool.items, other.message_pool.items); } + pub fn iterator(self: LogCollector) Iterator { + return .{ + .message_pool = self.message_pool.items, + .message_indices = self.message_indices.items, + .index = 0, + }; + } + + pub const Iterator = struct { + message_pool: []const u8, + message_indices: []const usize, + index: usize, + + pub fn next(it: *Iterator) ?[:0]const u8 { + if (it.index >= it.message_indices.len) return null; + const end_idx = blk: { + if (it.index + 1 == it.message_indices.len) break :blk it.message_pool.len; + break :blk it.message_indices[it.index + 1]; + }; + const msg = it.message_pool[it.message_indices[it.index]..end_idx]; + it.index += 1; + return @ptrCast(msg); + } + }; + /// [agave] https://github.com/anza-xyz/agave/blob/faea52f338df8521864ab7ce97b120b2abb5ce13/program-runtime/src/log_collector.rs#L25 pub fn log( self: *LogCollector, @@ -47,23 +80,19 @@ pub const LogCollector = struct { ) error{OutOfMemory}!void { if (self.bytes_limit_reached) return; - const message = try std.fmt.allocPrint(allocator, fmt, args); - + try self.message_indices.append(allocator, self.message_pool.items.len); if (self.bytes_limit) |bl| { - const bytes_written = self.bytes_written +| message.len; + const msg_len: usize = @intCast(std.fmt.count(fmt, args)); + const bytes_written = self.bytes_written +| msg_len; if (bytes_written >= bl and !self.bytes_limit_reached) { - allocator.free(message); self.bytes_limit_reached = true; - try self.messages.append( - allocator, - try std.fmt.allocPrint(allocator, "Log truncated", .{}), - ); + try self.message_pool.appendSlice(allocator, LOG_TRUNCATE_MSG); } else { self.bytes_written = bytes_written; - try self.messages.append(allocator, message); + try self.message_pool.writer(allocator).print(fmt, args); } } else { - try self.messages.append(allocator, message); + try self.message_pool.writer(allocator).print(fmt, args); } } }; @@ -72,33 +101,38 @@ test "bytes_limit" { const allocator = std.testing.allocator; { - var log_collector = LogCollector.init(10); + var log_collector = try LogCollector.init(allocator, 10); defer log_collector.deinit(allocator); try log_collector.log(allocator, "Hello", .{}); try log_collector.log(allocator, "World", .{}); // This message will be truncated + var iter = log_collector.iterator(); try expectEqualLogs( - &.{ - "Hello", - "Log truncated", - }, - log_collector.collect(), + &.{ "Hello", LOG_TRUNCATE_MSG }, + &.{ iter.next().?, iter.next().? }, ); } // [agave] https://github.com/anza-xyz/agave/blob/faea52f338df8521864ab7ce97b120b2abb5ce13/program-runtime/src/log_collector.rs#L108 { - var log_collector = LogCollector.default(); + var log_collector = try LogCollector.default(allocator); defer log_collector.deinit(allocator); for (0..DEFAULT_MAX_BYTES_LIMIT * 2) |_| try log_collector.log(allocator, "x", .{}); - const messages = log_collector.collect(); - try std.testing.expectEqual(DEFAULT_MAX_BYTES_LIMIT, messages.len); - for (messages[0 .. DEFAULT_MAX_BYTES_LIMIT - 1]) |msg| - try std.testing.expectEqualStrings("x", msg); - try std.testing.expectEqualStrings("Log truncated", messages[DEFAULT_MAX_BYTES_LIMIT - 1]); + var msg_iter = log_collector.iterator(); + while (msg_iter.next()) |msg| { + if (msg_iter.index == DEFAULT_MAX_BYTES_LIMIT) { + try std.testing.expectEqualStrings(LOG_TRUNCATE_MSG, msg); + } else { + try std.testing.expectEqualStrings("x", msg); + } + } + try std.testing.expectEqual( + DEFAULT_MAX_BYTES_LIMIT, + msg_iter.message_pool.len - LOG_TRUNCATE_MSG.len + 1, + ); } } diff --git a/src/runtime/program/testing.zig b/src/runtime/program/testing.zig index 59f4df755c..54f3e656c0 100644 --- a/src/runtime/program/testing.zig +++ b/src/runtime/program/testing.zig @@ -57,7 +57,7 @@ pub fn expectProgramExecuteResult( var context_params = initial_context_params; if (options.print_logs and initial_context_params.log_collector == null) { - context_params.log_collector = LogCollector.init(null); + context_params.log_collector = try LogCollector.init(allocator, null); } // Create the initial transaction context @@ -72,8 +72,10 @@ pub fn expectProgramExecuteResult( // Log messages before deiniting the transaction context if (options.print_logs) { std.debug.print("Execution Logs:\n", .{}); - for (initial_tc.log_collector.?.collect(), 1..) |log, index| { - std.debug.print(" {}: {s}\n", .{ index, log }); + var iter = initial_tc.log_collector.?.iterator(); + var i: usize = 1; + while (iter.next()) |log| : (i += 1) { + std.debug.print(" {}: {s}\n", .{ i, log }); } } deinitTransactionContext(allocator, initial_tc); diff --git a/src/runtime/program/vote/state.zig b/src/runtime/program/vote/state.zig index f74668d501..33b2652c64 100644 --- a/src/runtime/program/vote/state.zig +++ b/src/runtime/program/vote/state.zig @@ -3764,9 +3764,7 @@ test "state.VoteState process new vote state root progress" { // should succeed. for (MAX_LOCKOUT_HISTORY + 1..MAX_LOCKOUT_HISTORY + 3) |new_vote| { try processSlotVoteUnchecked(allocator, &vote_state2, new_vote); - try std.testing.expect( - !std.meta.eql(vote_state1.root_slot, vote_state2.root_slot), - ); + try std.testing.expect(vote_state1.root_slot != vote_state2.root_slot); var cloned_votes = try vote_state2.votes.clone(); defer cloned_votes.deinit(); @@ -3779,9 +3777,7 @@ test "state.VoteState process new vote state root progress" { ); try std.testing.expectEqual(null, maybe_error); // TODO have a better way of comparing all of vote_state1 with vote_state2 - try std.testing.expect( - std.meta.eql(vote_state1.root_slot, vote_state2.root_slot), - ); + try std.testing.expectEqual(vote_state1.root_slot, vote_state2.root_slot); try std.testing.expectEqualSlices( LandedVote, vote_state1.votes.items, diff --git a/src/runtime/stable_log.zig b/src/runtime/stable_log.zig index 1985e1f10a..45405feaea 100644 --- a/src/runtime/stable_log.zig +++ b/src/runtime/stable_log.zig @@ -167,7 +167,7 @@ test "stable_log" { allocator, prng.random(), .{ - .log_collector = LogCollector.default(), + .log_collector = try LogCollector.default(allocator), }, ); defer { @@ -196,13 +196,13 @@ test "stable_log" { "Program SigDefau1tPubkey111111111111111111111111111 failed: Verifier error", "Program SigDefau1tPubkey111111111111111111111111111 failed: custom program error: 0x1234", }; - const actual = tc.log_collector.?.collect(); - - try std.testing.expectEqualSlices(u8, expected[0], actual[0]); - try std.testing.expectEqualSlices(u8, expected[1], actual[1]); - try std.testing.expectEqualSlices(u8, expected[2], actual[2]); - try std.testing.expectEqualSlices(u8, expected[3], actual[3]); - try std.testing.expectEqualSlices(u8, expected[4], actual[4]); - try std.testing.expectEqualSlices(u8, expected[5], actual[5]); - try std.testing.expectEqualSlices(u8, expected[6], actual[6]); + var actual_iter = tc.log_collector.?.iterator(); + + try std.testing.expectEqualSlices(u8, expected[0], actual_iter.next().?); + try std.testing.expectEqualSlices(u8, expected[1], actual_iter.next().?); + try std.testing.expectEqualSlices(u8, expected[2], actual_iter.next().?); + try std.testing.expectEqualSlices(u8, expected[3], actual_iter.next().?); + try std.testing.expectEqualSlices(u8, expected[4], actual_iter.next().?); + try std.testing.expectEqualSlices(u8, expected[5], actual_iter.next().?); + try std.testing.expectEqualSlices(u8, expected[6], actual_iter.next().?); } diff --git a/src/runtime/testing.zig b/src/runtime/testing.zig index d5bfbdec25..6ea76ab8bd 100644 --- a/src/runtime/testing.zig +++ b/src/runtime/testing.zig @@ -382,11 +382,17 @@ pub fn expectTransactionContextEqual( } if (expected.custom_error != actual.custom_error) - return error.MaybeCustomErrorMismatch; - - // TODO: implement eqls for LogCollector - // if (expected.maybe_log_collector != actual.maybe_log_collector) - // return error.MaybeLogCollectorMismatch; + return error.CustomErrorMismatch; + + if (expected.log_collector) |elc| { + if (actual.log_collector) |alc| { + if (!elc.eql(alc)) return error.LogCollectorMismatch; + } else { + return error.LogCollectorMismatch; + } + } else { + if (actual.log_collector) |_| return error.LogCollectorMismatch; + } if (expected.prev_lamports_per_signature != actual.prev_lamports_per_signature) return error.LamportsPerSignatureMismatch; diff --git a/src/runtime/transaction_context.zig b/src/runtime/transaction_context.zig index 47d3eaa104..3769dbea62 100644 --- a/src/runtime/transaction_context.zig +++ b/src/runtime/transaction_context.zig @@ -111,7 +111,7 @@ pub const TransactionContext = struct { pub fn deinit(self: TransactionContext) void { self.allocator.free(self.accounts); - if (self.log_collector) |lc| lc.deinit(self.allocator); + if (self.log_collector) |*lc| lc.deinit(self.allocator); } /// [agave] https://github.com/anza-xyz/agave/blob/134be7c14066ea00c9791187d6bbc4795dd92f0e/sdk/src/transaction_context.rs#L233 diff --git a/src/runtime/transaction_execution.zig b/src/runtime/transaction_execution.zig index 56e31b19d6..7e05a10204 100644 --- a/src/runtime/transaction_execution.zig +++ b/src/runtime/transaction_execution.zig @@ -201,8 +201,8 @@ pub const ExecutedTransaction = struct { compute_meter: u64, accounts_data_len_delta: i64, - pub fn deinit(self: ExecutedTransaction, allocator: std.mem.Allocator) void { - if (self.log_collector) |lc| lc.deinit(allocator); + pub fn deinit(self: *ExecutedTransaction, allocator: std.mem.Allocator) void { + if (self.log_collector) |*lc| lc.deinit(allocator); } }; @@ -479,7 +479,7 @@ pub fn executeTransaction( const compute_budget = compute_budget_limits.intoComputeBudget(); const log_collector = if (config.log) - LogCollector.init(config.log_messages_byte_limit) + try LogCollector.init(allocator, config.log_messages_byte_limit) else null; diff --git a/src/shred_network/repair_service.zig b/src/shred_network/repair_service.zig index ce6522d7b5..c2de22c26a 100644 --- a/src/shred_network/repair_service.zig +++ b/src/shred_network/repair_service.zig @@ -311,7 +311,7 @@ fn sleepRepair(num_requests: u64, last_iteration: Duration) void { // being processed no more than 80% of the time. const take_a_break = last_iteration.div(4); - std.time.sleep(remaining_sleep_for_target.max(MIN_REPAIR_DELAY).max(take_a_break).asNanos()); + std.Thread.sleep(remaining_sleep_for_target.max(MIN_REPAIR_DELAY).max(take_a_break).asNanos()); } /// The maximum time that we want the repair loop to take. diff --git a/src/shred_network/service.zig b/src/shred_network/service.zig index bc60f761df..0458c3bbf8 100644 --- a/src/shred_network/service.zig +++ b/src/shred_network/service.zig @@ -220,7 +220,7 @@ pub fn start( try file.seekTo(0); try file.setEndPos(0); _ = trakr.print(file.writer()) catch unreachable; - std.time.sleep(std.time.ns_per_s); + std.Thread.sleep(std.time.ns_per_s); } } }.run, .{ deps.exit, shred_tracker }); diff --git a/src/trace/log.zig b/src/trace/log.zig index dd73a76d00..76ce230afc 100644 --- a/src/trace/log.zig +++ b/src/trace/log.zig @@ -158,7 +158,7 @@ pub const ChannelPrintLogger = struct { pub fn deinit(self: *Self) void { if (self.handle) |handle| { - std.time.sleep(std.time.ns_per_ms * 5); + std.Thread.sleep(std.time.ns_per_ms * 5); self.exit.store(true, .seq_cst); handle.join(); } @@ -413,7 +413,7 @@ test "channel logger" { }, stream.writer()); logger.logger("test").log(.info, "hello world"); - std.time.sleep(10 * std.time.ns_per_ms); + std.Thread.sleep(10 * std.time.ns_per_ms); logger.deinit(); const actual = stream.getWritten(); diff --git a/src/transaction_sender/mock_transfer_generator.zig b/src/transaction_sender/mock_transfer_generator.zig index 376ed86341..fe98023f07 100644 --- a/src/transaction_sender/mock_transfer_generator.zig +++ b/src/transaction_sender/mock_transfer_generator.zig @@ -157,7 +157,7 @@ pub const MockTransferService = struct { if (signature_status.confirmations == null) return true; if (signature_status.err) |_| return false; } - std.time.sleep(sig.time.Duration.fromSecs(1).asNanos()); + std.Thread.sleep(sig.time.Duration.fromSecs(1).asNanos()); } return false; } diff --git a/src/transaction_sender/service.zig b/src/transaction_sender/service.zig index 83f2a00053..065b50e04c 100644 --- a/src/transaction_sender/service.zig +++ b/src/transaction_sender/service.zig @@ -171,7 +171,7 @@ pub const Service = struct { defer rpc_client.deinit(); while (!self.exit.load(.monotonic)) { - std.time.sleep(self.config.pool_process_rate.asNanos()); + std.Thread.sleep(self.config.pool_process_rate.asNanos()); if (self.transaction_pool.count() == 0) continue; var timer = try Timer.start(); diff --git a/src/utils/allocators.zig b/src/utils/allocators.zig index 2e76521b40..712d080672 100644 --- a/src/utils/allocators.zig +++ b/src/utils/allocators.zig @@ -139,7 +139,7 @@ pub fn RecycleBuffer(comptime T: type, default_init: T, config: struct { if (config.thread_safe) self.mux.unlock(); defer if (config.thread_safe) self.mux.lock(); // wait some time and try to collapse again. - std.time.sleep(std.time.ns_per_ms * config.collapse_sleep_ms); + std.Thread.sleep(std.time.ns_per_ms * config.collapse_sleep_ms); // NOTE: this is because there may be new free records // (which were free'd by some other consumer thread) which // can be collapsed and the alloc call will then succeed. @@ -169,7 +169,7 @@ pub fn RecycleBuffer(comptime T: type, default_init: T, config: struct { if (config.thread_safe) self.mux.unlock(); defer if (config.thread_safe) self.mux.lock(); // wait some time and try to collapse again. - std.time.sleep(std.time.ns_per_ms * config.collapse_sleep_ms); + std.Thread.sleep(std.time.ns_per_ms * config.collapse_sleep_ms); // NOTE: this is because there may be new free records // (which were free'd by some other consumer thread) which // can be collapsed and the alloc call will then succeed. diff --git a/src/utils/service.zig b/src/utils/service.zig index 976b5e06cd..3c98bb3604 100644 --- a/src/utils/service.zig +++ b/src/utils/service.zig @@ -209,7 +209,7 @@ pub fn runService( // sleep before looping, if necessary last_iteration = timer.lap(); - std.time.sleep(@max( + std.Thread.sleep(@max( config.min_pause_ns, config.min_loop_duration_ns -| last_iteration, )); diff --git a/src/utils/types.zig b/src/utils/types.zig index 04f7adb067..bc1e3691e4 100644 --- a/src/utils/types.zig +++ b/src/utils/types.zig @@ -591,9 +591,9 @@ test "eql follows slices" { defer std.testing.allocator.free(b_slice); a_slice[0] = 1; b_slice[0] = 1; - const a = Foo{ .slice = a_slice }; - const b = Foo{ .slice = b_slice }; + const b: Foo = .{ .slice = b_slice }; + const a: Foo = .{ .slice = a_slice }; try std.testing.expect(eql(a, b)); try std.testing.expect(!eqlCustom(a, b, .{ .follow_pointers = .no })); - try std.testing.expect(!std.meta.eql(a, b)); + try std.testing.expectEqualSlices(u8, a.slice, b.slice); } diff --git a/src/vm/syscalls/sysvar.zig b/src/vm/syscalls/sysvar.zig index 4a4090fedc..0d284af34b 100644 --- a/src/vm/syscalls/sysvar.zig +++ b/src/vm/syscalls/sysvar.zig @@ -20,6 +20,8 @@ const SYSVAR_NOT_FOUND = 2; const OFFSET_LENGTH_EXCEEDS_SYSVAR = 1; fn getter(comptime T: type) fn (*TransactionContext, *MemoryMap, *RegisterMap) Error!void { + std.debug.assert(@typeInfo(T).@"struct".layout == .@"extern"); + return struct { fn getSyscall( tc: *TransactionContext, @@ -39,8 +41,10 @@ fn getter(comptime T: type) fn (*TransactionContext, *MemoryMap, *RegisterMap) E const v = try tc.sysvar_cache.get(T); // Avoid value.* = v as it sets padding bytes to undefined instead of 0. - value.* = std.mem.zeroes(T); - inline for (std.meta.fields(T)) |f| @field(value, f.name) = @field(v, f.name); + @memset(std.mem.asBytes(value), 0); + inline for (@typeInfo(T).@"struct".fields) |f| { + @field(value, f.name) = @field(v, f.name); + } } }.getSyscall; } @@ -149,10 +153,14 @@ test getSysvar { fn fill(zeroed: bool, v: anytype) @TypeOf(v) { var new_v = @TypeOf(v).DEFAULT; - for (std.mem.asBytes(&new_v), 0..) |*b, i| { - b.* = if (zeroed) @as(u8, 0) else @intCast(i); + if (zeroed) { + @memset(std.mem.asBytes(&new_v), 0); + } else { + for (std.mem.asBytes(&new_v), 0..) |*b, i| { + b.* = @intCast(i); + } } - inline for (std.meta.fields(@TypeOf(v))) |field| { + inline for (@typeInfo(@TypeOf(v)).@"struct".fields) |field| { @field(new_v, field.name) = @field(v, field.name); } return new_v;