diff --git a/.gitignore b/.gitignore index 5cc5c9436..1c1048047 100644 --- a/.gitignore +++ b/.gitignore @@ -25,7 +25,7 @@ blocks txs hash_lists wallet_lists -wallets +/wallets logs* !bin/logs ebin diff --git a/apps/arweave/e2e/ar_e2e.erl b/apps/arweave/e2e/ar_e2e.erl new file mode 100644 index 000000000..b4745c2ac --- /dev/null +++ b/apps/arweave/e2e/ar_e2e.erl @@ -0,0 +1,309 @@ +-module(ar_e2e). + +-export([fixture_dir/1, fixture_dir/2, install_fixture/3, load_wallet_fixture/1, + write_chunk_fixture/3, load_chunk_fixture/2]). + +-export([delayed_print/2, start_source_node/3, source_node_storage_modules/3, packing_type_to_packing/2, + max_chunk_offset/1, assert_block/2, assert_syncs_range/3, assert_chunks/3, assert_partition_size/4]). + +-include_lib("arweave/include/ar.hrl"). +-include_lib("arweave/include/ar_config.hrl"). +-include_lib("arweave/include/ar_consensus.hrl"). +-include_lib("eunit/include/eunit.hrl"). + + +%% Set to true to update the chunk fixtures. +%% WARNING: ONLY SET TO true IF YOU KNOW WHAT YOU ARE DOING! +-define(UPDATE_CHUNK_FIXTURES, false). + + +-spec fixture_dir(atom()) -> binary(). +fixture_dir(FixtureType) -> + Dir = filename:dirname(?FILE), + filename:join([Dir, "fixtures", atom_to_list(FixtureType)]). + +-spec fixture_dir(atom(), [binary()]) -> binary(). +fixture_dir(FixtureType, SubDirs) -> + FixtureDir = fixture_dir(FixtureType), + filename:join([FixtureDir] ++ SubDirs). + +-spec install_fixture(binary(), atom(), string()) -> binary(). +install_fixture(FilePath, FixtureType, FixtureName) -> + FixtureDir = fixture_dir(FixtureType), + ok = filelib:ensure_dir(FixtureDir ++ "/"), + FixturePath = filename:join([FixtureDir, FixtureName]), + file:copy(FilePath, FixturePath), + FixturePath. + +-spec load_wallet_fixture(atom()) -> tuple(). +load_wallet_fixture(WalletFixture) -> + WalletName = atom_to_list(WalletFixture), + FixtureDir = fixture_dir(wallets), + FixturePath = filename:join([FixtureDir, WalletName ++ ".json"]), + Wallet = ar_wallet:load_keyfile(FixturePath), + Address = ar_wallet:to_address(Wallet), + WalletPath = ar_wallet:wallet_filepath(ar_util:encode(Address)), + file:copy(FixturePath, WalletPath), + ar_wallet:load_keyfile(WalletPath). + +-spec write_chunk_fixture(binary(), non_neg_integer(), binary()) -> ok. +write_chunk_fixture(Packing, EndOffset, Chunk) -> + FixtureDir = fixture_dir(chunks, [ar_serialize:encode_packing(Packing, true)]), + FixturePath = filename:join([FixtureDir, integer_to_list(EndOffset) ++ ".bin"]), + file:write_file(FixturePath, Chunk). + +-spec load_chunk_fixture(binary(), non_neg_integer()) -> binary(). +load_chunk_fixture(Packing, EndOffset) -> + FixtureDir = fixture_dir(chunks, [ar_serialize:encode_packing(Packing, true)]), + FixturePath = filename:join([FixtureDir, integer_to_list(EndOffset) ++ ".bin"]), + file:read_file(FixturePath). + +packing_type_to_packing(PackingType, Address) -> + case PackingType of + spora_2_6 -> {spora_2_6, Address}; + composite_1 -> {composite, Address, 1}; + composite_2 -> {composite, Address, 2}; + unpacked -> unpacked + end. + +start_source_node(Node, unpacked, _WalletFixture) -> + TempNode = case Node of + peer1 -> peer2; + peer2 -> peer1 + end, + {Blocks, _SourceAddr, Chunks} = ar_e2e:start_source_node(TempNode, composite_1, wallet_a), + {_, StorageModules} = ar_e2e:source_node_storage_modules(Node, unpacked, wallet_a), + [B0 | _] = Blocks, + {ok, Config} = ar_test_node:get_config(Node), + ar_test_node:start_other_node(Node, B0, Config#config{ + peers = [ar_test_node:peer_ip(TempNode)], + start_from_latest_state = true, + storage_modules = StorageModules, + auto_join = true + }, true), + ar_e2e:assert_syncs_range(Node, ?PARTITION_SIZE, 2*?PARTITION_SIZE), + ar_e2e:assert_chunks(Node, unpacked, Chunks), + ar_test_node:stop(TempNode), + {Blocks, undefined, Chunks}; +start_source_node(Node, PackingType, WalletFixture) -> + {Wallet, StorageModules} = source_node_storage_modules(Node, PackingType, WalletFixture), + RewardAddr = ar_wallet:to_address(Wallet), + [B0] = ar_weave:init([{RewardAddr, ?AR(200), <<>>}], 0, ?PARTITION_SIZE), + + {ok, Config} = ar_test_node:remote_call(Node, application, get_env, [arweave, config]), + + ?assertEqual(ar_test_node:peer_name(Node), + ar_test_node:start_other_node(Node, B0, Config#config{ + start_from_latest_state = true, + storage_modules = StorageModules, + auto_join = true, + mining_addr = RewardAddr + }, true) + ), + + %% Note: small chunks will be padded to 256 KiB. So B1 actually contains 3 chunks of data + %% and B2 starts at a chunk boundary and contains 1 chunk of data. + B1 = mine_block(Node, Wallet, floor(2.5 * ?DATA_CHUNK_SIZE)), + B2 = mine_block(Node, Wallet, floor(0.75 * ?DATA_CHUNK_SIZE)), + B3 = mine_block(Node, Wallet, ?PARTITION_SIZE), + B4 = mine_block(Node, Wallet, ?PARTITION_SIZE), + B5 = mine_block(Node, Wallet, ?PARTITION_SIZE), + + %% List of {Block, EndOffset, ChunkSize} + Chunks = [ + {B1, ?PARTITION_SIZE + ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE}, + {B1, ?PARTITION_SIZE + (2*?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE}, + {B1, ?PARTITION_SIZE + floor(2.5 * ?DATA_CHUNK_SIZE), floor(0.5 * ?DATA_CHUNK_SIZE)}, + {B2, ?PARTITION_SIZE + floor(3.75 * ?DATA_CHUNK_SIZE), floor(0.75 * ?DATA_CHUNK_SIZE)}, + {B3, ?PARTITION_SIZE + (5*?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE}, + {B3, ?PARTITION_SIZE + (6*?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE}, + {B3, ?PARTITION_SIZE + (7*?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE}, + {B3, ?PARTITION_SIZE + (8*?DATA_CHUNK_SIZE), ?DATA_CHUNK_SIZE} + ], + + SourcePacking = ar_e2e:packing_type_to_packing(PackingType, RewardAddr), + ar_e2e:assert_syncs_range(Node, ?PARTITION_SIZE, 2*?PARTITION_SIZE), + ar_e2e:assert_chunks(Node, SourcePacking, Chunks), + + {[B0, B1, B2, B3, B4, B5], RewardAddr, Chunks}. + +max_chunk_offset(Chunks) -> + lists:foldl(fun({_, EndOffset, _}, Acc) -> max(Acc, EndOffset) end, 0, Chunks). + +source_node_storage_modules(_Node, unpacked, _WalletFixture) -> + {undefined, source_node_storage_modules(unpacked)}; +source_node_storage_modules(Node, PackingType, WalletFixture) -> + Wallet = ar_test_node:remote_call(Node, ar_e2e, load_wallet_fixture, [WalletFixture]), + RewardAddr = ar_wallet:to_address(Wallet), + SourcePacking = packing_type_to_packing(PackingType, RewardAddr), + {Wallet, source_node_storage_modules(SourcePacking)}. + +source_node_storage_modules(SourcePacking) -> + [ + {?PARTITION_SIZE, 0, SourcePacking}, + {?PARTITION_SIZE, 1, SourcePacking}, + {?PARTITION_SIZE, 2, SourcePacking}, + {?PARTITION_SIZE, 3, SourcePacking}, + {?PARTITION_SIZE, 4, SourcePacking}, + {?PARTITION_SIZE, 5, SourcePacking}, + {?PARTITION_SIZE, 6, SourcePacking} + ]. + +mine_block(Node, Wallet, DataSize) -> + WeaveSize = ar_test_node:remote_call(Node, ar_node, get_current_weave_size, []), + Addr = ar_wallet:to_address(Wallet), + {TX, Chunks} = generate_tx(Node, Wallet, WeaveSize, DataSize), + B = ar_test_node:post_and_mine(#{ miner => Node, await_on => Node }, [TX]), + + ?assertEqual(Addr, B#block.reward_addr), + + Proofs = ar_test_data_sync:post_proofs(Node, B, TX, Chunks), + + ar_test_data_sync:wait_until_syncs_chunks(Node, Proofs, infinity), + B. + +generate_tx(Node, Wallet, WeaveSize, DataSize) -> + Chunks = generate_chunks(Node, WeaveSize, DataSize, []), + {DataRoot, _DataTree} = ar_merkle:generate_tree( + [{ar_tx:generate_chunk_id(Chunk), Offset} || {Chunk, Offset} <- Chunks] + ), + TX = ar_test_node:sign_tx(Node, Wallet, #{ + data_size => DataSize, + data_root => DataRoot + }), + {TX, [Chunk || {Chunk, _} <- Chunks]}. + +generate_chunks(Node, WeaveSize, DataSize, Acc) when DataSize > 0 -> + ChunkSize = min(DataSize, ?DATA_CHUNK_SIZE), + EndOffset = (length(Acc) * ?DATA_CHUNK_SIZE) + ChunkSize, + Chunk = ar_test_node:get_genesis_chunk(WeaveSize + EndOffset), + generate_chunks(Node, WeaveSize, DataSize - ChunkSize, Acc ++ [{Chunk, EndOffset}]); +generate_chunks(_, _, _, Acc) -> + Acc. + + +assert_block({spora_2_6, Address}, MinedBlock) -> + ?assertEqual(Address, MinedBlock#block.reward_addr), + ?assertEqual(0, MinedBlock#block.packing_difficulty); +assert_block({composite, Address, PackingDifficulty}, MinedBlock) -> + ?assertEqual(Address, MinedBlock#block.reward_addr), + ?assertEqual(PackingDifficulty, MinedBlock#block.packing_difficulty). + + +assert_syncs_range(Node, StartOffset, EndOffset) -> + ?assert( + ar_util:do_until( + fun() -> has_range(Node, StartOffset, EndOffset) end, + 100, + 60 * 1000 + ), + iolist_to_binary(io_lib:format( + "~s Failed to sync range ~p - ~p", [Node, StartOffset, EndOffset]))). + +assert_partition_size(Node, PartitionNumber, Packing, Size) -> + ?assert( + ar_util:do_until( + fun() -> + ar_test_node:remote_call(Node, ar_mining_stats, get_partition_data_size, + [PartitionNumber, Packing]) >= Size + end, + 100, + 60 * 1000 + ), + iolist_to_binary(io_lib:format( + "~s partition ~p,~p failed to reach size ~p", [Node, PartitionNumber, + ar_serialize:encode_packing(Packing, true), Size]))). + + + +has_range(Node, StartOffset, EndOffset) -> + NodeIP = ar_test_node:peer_ip(Node), + case ar_http_iface_client:get_sync_record(NodeIP) of + {ok, SyncRecord} -> + interval_contains(SyncRecord, StartOffset, EndOffset); + Error -> + ?assert(false, + iolist_to_binary(io_lib:format( + "Failed to get sync record from ~p: ~p", [Node, Error]))), + false + end. + +interval_contains(Intervals, Start, End) when End > Start -> + case gb_sets:iterator_from({Start, Start}, Intervals) of + Iter -> + interval_contains2(Iter, Start, End) + end. + +interval_contains2(Iter, Start, End) -> + case gb_sets:next(Iter) of + none -> + false; + {{IntervalEnd, IntervalStart}, _} when IntervalStart =< Start andalso IntervalEnd >= End -> + true; + _ -> + false + end. + +assert_chunks(Node, Packing, Chunks) -> + lists:foreach(fun({Block, EndOffset, ChunkSize}) -> + assert_chunk(Node, Packing, Block, EndOffset, ChunkSize) + end, Chunks). + +assert_chunk(Node, Packing, Block, EndOffset, ChunkSize) -> + {ok, {{<<"200">>, _}, _, EncodedProof, _, _}} = + ar_test_node:get_chunk(Node, EndOffset, any), + Proof = ar_serialize:json_map_to_poa_map( + jiffy:decode(EncodedProof, [return_maps]) + ), + {true, _} = ar_test_node:remote_call(Node, ar_poa, validate_paths, [ + Block#block.tx_root, + maps:get(tx_path, Proof), + maps:get(data_path, Proof), + EndOffset - 1 + ]), + Chunk = maps:get(chunk, Proof), + + maybe_write_chunk_fixture(Packing, EndOffset, Chunk), + + {ok, ExpectedPackedChunk} = ar_e2e:load_chunk_fixture(Packing, EndOffset), + ?assertEqual(ExpectedPackedChunk, Chunk, + iolist_to_binary(io_lib:format( + "Chunk at offset ~p, size ~p does not match previously packed chunk", + [EndOffset, ChunkSize]))), + + {ok, UnpackedChunk} = ar_packing_server:unpack( + Packing, EndOffset, Block#block.tx_root, Chunk, ?DATA_CHUNK_SIZE), + UnpaddedChunk = ar_packing_server:unpad_chunk(Packing, UnpackedChunk, ChunkSize, byte_size(Chunk)), + ExpectedUnpackedChunk = ar_test_node:get_genesis_chunk(EndOffset), + ?assertEqual(ExpectedUnpackedChunk, UnpaddedChunk, + iolist_to_binary(io_lib:format( + "Chunk at offset ~p, size ~p does not match unpacked chunk", + [EndOffset, ChunkSize]))). + +delayed_print(Format, Args) -> + %% Print the specific flavor of this test since it isn't captured in the test name. + %% Delay the print by 1 second to allow the eunit output to be flushed. + spawn(fun() -> + timer:sleep(1000), + io:fwrite(user, Format, Args) + end). + +%% -------------------------------------------------------------------------------------------- +%% Test Data Generation +%% -------------------------------------------------------------------------------------------- +write_wallet_fixtures() -> + Wallets = [wallet_a, wallet_b, wallet_c, wallet_d], + lists:foreach(fun(Wallet) -> + WalletName = atom_to_list(Wallet), + ar_wallet:new_keyfile(?DEFAULT_KEY_TYPE, WalletName), + ar_e2e:install_fixture( + ar_wallet:wallet_filepath(Wallet), wallets, WalletName ++ ".json") + end, Wallets), + ok. + +maybe_write_chunk_fixture(Packing, EndOffset, Chunk) when ?UPDATE_CHUNK_FIXTURES =:= true -> + ?LOG_ERROR("WARNING: Updating chunk fixture! EndOffset: ~p, Packing: ~p", + [EndOffset, ar_serialize:encode_packing(Packing, true)]), + ar_e2e:write_chunk_fixture(Packing, EndOffset, Chunk); +maybe_write_chunk_fixture(_, _, _) -> + ok. diff --git a/apps/arweave/e2e/ar_repack_in_place_mine_tests.erl b/apps/arweave/e2e/ar_repack_in_place_mine_tests.erl new file mode 100644 index 000000000..cbfa90114 --- /dev/null +++ b/apps/arweave/e2e/ar_repack_in_place_mine_tests.erl @@ -0,0 +1,85 @@ +-module(ar_repack_in_place_mine_tests). + +-include_lib("arweave/include/ar_config.hrl"). +-include_lib("arweave/include/ar_consensus.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +%% -------------------------------------------------------------------------------------------- +%% Test Registration +%% -------------------------------------------------------------------------------------------- +repack_in_place_mine_test_() -> + [ + {timeout, 300, {with, {spora_2_6, spora_2_6}, [fun test_repack_in_place_mine/1]}} + ]. + +%% -------------------------------------------------------------------------------------------- +%% test_repack_in_place_mine +%% -------------------------------------------------------------------------------------------- +test_repack_in_place_mine({FromPackingType, ToPackingType}) -> + ar_e2e:delayed_print(<<" ~p -> ~p ">>, [FromPackingType, ToPackingType]), + ValidatorNode = peer1, + RepackerNode = peer2, + {Blocks, _AddrA, Chunks} = ar_e2e:start_source_node( + RepackerNode, FromPackingType, wallet_a), + + [B0 | _] = Blocks, + start_validator_node(ValidatorNode, RepackerNode, B0), + + {WalletB, FinalStorageModules} = ar_e2e:source_node_storage_modules( + RepackerNode, ToPackingType, wallet_b), + AddrB = case WalletB of + undefined -> undefined; + _ -> ar_wallet:to_address(WalletB) + end, + ToPacking = ar_e2e:packing_type_to_packing(ToPackingType, AddrB), + {ok, Config} = ar_test_node:get_config(RepackerNode), + + RepackInPlaceStorageModules = [ + {Module, ToPacking} || Module <- Config#config.storage_modules ], + ?LOG_INFO("Repack in place storage modules: ~p", [RepackInPlaceStorageModules]), + + ar_test_node:update_config(RepackerNode, Config#config{ + storage_modules = [], + repack_in_place_storage_modules = RepackInPlaceStorageModules, + mining_addr = undefined + }), + ar_test_node:restart(RepackerNode), + + ar_e2e:assert_partition_size(RepackerNode, 1, ToPacking, ?PARTITION_SIZE), + + ar_test_node:update_config(RepackerNode, Config#config{ + storage_modules = FinalStorageModules, + repack_in_place_storage_modules = [], + mining_addr = AddrB + }), + ar_test_node:restart(RepackerNode), + ar_e2e:assert_chunks(RepackerNode, ToPacking, Chunks), + + case ToPackingType of + unpacked -> + ok; + _ -> + CurrentHeight = ar_test_node:remote_call(RepackerNode, ar_node, get_height, []), + ar_test_node:mine(RepackerNode), + + RepackerBI = ar_test_node:wait_until_height(RepackerNode, CurrentHeight + 1), + {ok, RepackerBlock} = ar_test_node:http_get_block(element(1, hd(RepackerBI)), RepackerNode), + ar_e2e:assert_block(ToPacking, RepackerBlock), + + ValidatorBI = ar_test_node:wait_until_height(ValidatorNode, RepackerBlock#block.height), + {ok, ValidatorBlock} = ar_test_node:http_get_block(element(1, hd(ValidatorBI)), ValidatorNode), + ?assertEqual(RepackerBlock, ValidatorBlock) + end. + + +start_validator_node(ValidatorNode, RepackerNode, B0) -> + {ok, Config} = ar_test_node:get_config(ValidatorNode), + ?assertEqual(ar_test_node:peer_name(ValidatorNode), + ar_test_node:start_other_node(ValidatorNode, B0, Config#config{ + peers = [ar_test_node:peer_ip(RepackerNode)], + start_from_latest_state = true, + auto_join = true + }, true) + ), + ok. + \ No newline at end of file diff --git a/apps/arweave/e2e/ar_repack_mine_tests.erl b/apps/arweave/e2e/ar_repack_mine_tests.erl new file mode 100644 index 000000000..af2f94403 --- /dev/null +++ b/apps/arweave/e2e/ar_repack_mine_tests.erl @@ -0,0 +1,92 @@ +-module(ar_repack_mine_tests). + +-include_lib("arweave/include/ar_config.hrl"). +-include_lib("arweave/include/ar_consensus.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +%% -------------------------------------------------------------------------------------------- +%% Test Registration +%% -------------------------------------------------------------------------------------------- +repack_mine_test_() -> + [ + {timeout, 300, {with, {unpacked, spora_2_6}, [fun test_repack_mine/1]}}, + {timeout, 300, {with, {unpacked, composite_1}, [fun test_repack_mine/1]}}, + {timeout, 300, {with, {unpacked, composite_2}, [fun test_repack_mine/1]}}, + {timeout, 300, {with, {spora_2_6, spora_2_6}, [fun test_repack_mine/1]}}, + {timeout, 300, {with, {spora_2_6, composite_1}, [fun test_repack_mine/1]}}, + {timeout, 300, {with, {spora_2_6, composite_2}, [fun test_repack_mine/1]}}, + {timeout, 300, {with, {spora_2_6, unpacked}, [fun test_repack_mine/1]}}, + {timeout, 300, {with, {composite_1, spora_2_6}, [fun test_repack_mine/1]}}, + {timeout, 300, {with, {composite_1, composite_1}, [fun test_repack_mine/1]}}, + {timeout, 300, {with, {composite_1, composite_2}, [fun test_repack_mine/1]}}, + {timeout, 300, {with, {composite_1, unpacked}, [fun test_repack_mine/1]}}, + {timeout, 300, {with, {composite_2, spora_2_6}, [fun test_repack_mine/1]}}, + {timeout, 300, {with, {composite_2, composite_1}, [fun test_repack_mine/1]}}, + {timeout, 300, {with, {composite_2, composite_2}, [fun test_repack_mine/1]}}, + {timeout, 300, {with, {composite_2, unpacked}, [fun test_repack_mine/1]}} + ]. + +%% -------------------------------------------------------------------------------------------- +%% test_repack_mine +%% -------------------------------------------------------------------------------------------- +test_repack_mine({FromPackingType, ToPackingType}) -> + ar_e2e:delayed_print(<<" ~p -> ~p ">>, [FromPackingType, ToPackingType]), + ValidatorNode = peer1, + RepackerNode = peer2, + {Blocks, _AddrA, Chunks} = ar_e2e:start_source_node( + RepackerNode, FromPackingType, wallet_a), + + [B0 | _] = Blocks, + start_validator_node(ValidatorNode, RepackerNode, B0), + + {WalletB, StorageModules} = ar_e2e:source_node_storage_modules( + RepackerNode, ToPackingType, wallet_b), + AddrB = case WalletB of + undefined -> undefined; + _ -> ar_wallet:to_address(WalletB) + end, + ToPacking = ar_e2e:packing_type_to_packing(ToPackingType, AddrB), + {ok, Config} = ar_test_node:get_config(RepackerNode), + ar_test_node:update_config(RepackerNode, Config#config{ + storage_modules = Config#config.storage_modules ++ StorageModules, + mining_addr = AddrB + }), + ar_test_node:restart(RepackerNode), + + ar_e2e:assert_partition_size(RepackerNode, 1, ToPacking, ?PARTITION_SIZE), + + ar_test_node:update_config(RepackerNode, Config#config{ + storage_modules = StorageModules, + mining_addr = AddrB + }), + ar_test_node:restart(RepackerNode), + ar_e2e:assert_chunks(RepackerNode, ToPacking, Chunks), + + case ToPackingType of + unpacked -> + ok; + _ -> + CurrentHeight = ar_test_node:remote_call(RepackerNode, ar_node, get_height, []), + ar_test_node:mine(RepackerNode), + + RepackerBI = ar_test_node:wait_until_height(RepackerNode, CurrentHeight + 1), + {ok, RepackerBlock} = ar_test_node:http_get_block(element(1, hd(RepackerBI)), RepackerNode), + ar_e2e:assert_block(ToPacking, RepackerBlock), + + ValidatorBI = ar_test_node:wait_until_height(ValidatorNode, RepackerBlock#block.height), + {ok, ValidatorBlock} = ar_test_node:http_get_block(element(1, hd(ValidatorBI)), ValidatorNode), + ?assertEqual(RepackerBlock, ValidatorBlock) + end. + + +start_validator_node(ValidatorNode, RepackerNode, B0) -> + {ok, Config} = ar_test_node:get_config(ValidatorNode), + ?assertEqual(ar_test_node:peer_name(ValidatorNode), + ar_test_node:start_other_node(ValidatorNode, B0, Config#config{ + peers = [ar_test_node:peer_ip(RepackerNode)], + start_from_latest_state = true, + auto_join = true + }, true) + ), + ok. + \ No newline at end of file diff --git a/apps/arweave/e2e/ar_sync_pack_mine_tests.erl b/apps/arweave/e2e/ar_sync_pack_mine_tests.erl new file mode 100644 index 000000000..6ba3c79c9 --- /dev/null +++ b/apps/arweave/e2e/ar_sync_pack_mine_tests.erl @@ -0,0 +1,119 @@ +-module(ar_sync_pack_mine_tests). + +-include_lib("arweave/include/ar.hrl"). +-include_lib("arweave/include/ar_config.hrl"). +-include_lib("arweave/include/ar_consensus.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +%% -------------------------------------------------------------------------------------------- +%% Fixtures +%% -------------------------------------------------------------------------------------------- +setup_source_node(PackingType) -> + SourceNode = peer1, + SinkNode = peer2, + ar_test_node:stop(SinkNode), + {Blocks, _SourceAddr, Chunks} = ar_e2e:start_source_node(SourceNode, PackingType, wallet_a), + + {Blocks, Chunks, PackingType}. + +instantiator(GenesisData, SinkPackingType, TestFun) -> + {timeout, 300, {with, {GenesisData, SinkPackingType}, [TestFun]}}. + +%% -------------------------------------------------------------------------------------------- +%% Test Registration +%% -------------------------------------------------------------------------------------------- +spora_2_6_sync_pack_mine_test_() -> + {setup, fun () -> setup_source_node(spora_2_6) end, + fun (GenesisData) -> + [ + instantiator(GenesisData, spora_2_6, fun test_sync_pack_mine/1) + % instantiator(GenesisData, composite_1, fun test_sync_pack_mine/1), + % instantiator(GenesisData, composite_2, fun test_sync_pack_mine/1), + % instantiator(GenesisData, unpacked, fun test_sync_pack_mine/1) + ] + end}. + +% composite_1_sync_pack_mine_test_() -> +% {setup, fun () -> setup_source_node(composite_1) end, +% fun (GenesisData) -> +% [ +% instantiator(GenesisData, spora_2_6, fun test_sync_pack_mine/1), +% instantiator(GenesisData, composite_1, fun test_sync_pack_mine/1), +% instantiator(GenesisData, composite_2, fun test_sync_pack_mine/1), +% instantiator(GenesisData, unpacked, fun test_sync_pack_mine/1) +% ] +% end}. + +% composite_2_sync_pack_mine_test_() -> +% {setup, fun () -> setup_source_node(composite_2) end, +% fun (GenesisData) -> +% [ +% instantiator(GenesisData, spora_2_6, fun test_sync_pack_mine/1), +% instantiator(GenesisData, composite_1, fun test_sync_pack_mine/1), +% instantiator(GenesisData, composite_2, fun test_sync_pack_mine/1), +% instantiator(GenesisData, unpacked, fun test_sync_pack_mine/1) +% ] +% end}. + +% unpacked_sync_pack_mine_test_() -> +% {setup, fun () -> setup_source_node(unpacked) end, +% fun (GenesisData) -> +% [ +% instantiator(GenesisData, spora_2_6, fun test_sync_pack_mine/1), +% instantiator(GenesisData, composite_1, fun test_sync_pack_mine/1), +% instantiator(GenesisData, composite_2, fun test_sync_pack_mine/1), +% instantiator(GenesisData, unpacked, fun test_sync_pack_mine/1) +% ] +% end}. + +%% -------------------------------------------------------------------------------------------- +%% test_sync_pack_mine +%% -------------------------------------------------------------------------------------------- +test_sync_pack_mine({{Blocks, Chunks, SourcePackingType}, SinkPackingType}) -> + ar_e2e:delayed_print(<<" ~p -> ~p ">>, [SourcePackingType, SinkPackingType]), + [B0 | _] = Blocks, + SourceNode = peer1, + SinkNode = peer2, + + SinkPacking = start_sink_node(SinkNode, SourceNode, B0, SinkPackingType), + ar_e2e:assert_syncs_range(SinkNode, ?PARTITION_SIZE, 2*?PARTITION_SIZE), + ar_e2e:assert_chunks(SinkNode, SinkPacking, Chunks), + + case SinkPackingType of + unpacked -> + ok; + _ -> + CurrentHeight = ar_test_node:remote_call(SinkNode, ar_node, get_height, []), + ?LOG_ERROR([{event, test_sync_pack_mine}, {current_height, CurrentHeight}]), + ar_test_node:mine(SinkNode), + + SinkBI = ar_test_node:wait_until_height(SinkNode, CurrentHeight + 1), + {ok, SinkBlock} = ar_test_node:http_get_block(element(1, hd(SinkBI)), SinkNode), + ar_e2e:assert_block(SinkPacking, SinkBlock), + + SourceBI = ar_test_node:wait_until_height(SourceNode, SinkBlock#block.height), + {ok, SourceBlock} = ar_test_node:http_get_block(element(1, hd(SourceBI)), SourceNode), + ?assertEqual(SinkBlock, SourceBlock), + ok + end. + +start_sink_node(Node, SourceNode, B0, PackingType) -> + Wallet = ar_test_node:remote_call(Node, ar_e2e, load_wallet_fixture, [wallet_b]), + SinkAddr = ar_wallet:to_address(Wallet), + SinkPacking = ar_e2e:packing_type_to_packing(PackingType, SinkAddr), + {ok, Config} = ar_test_node:get_config(Node), + + StorageModules = [ + {?PARTITION_SIZE, 1, SinkPacking} + ], + ?assertEqual(ar_test_node:peer_name(Node), + ar_test_node:start_other_node(Node, B0, Config#config{ + peers = [ar_test_node:peer_ip(SourceNode)], + start_from_latest_state = true, + storage_modules = StorageModules, + auto_join = true, + mining_addr = SinkAddr + }, true) + ), + + SinkPacking. diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/2359296.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/2359296.bin new file mode 100644 index 000000000..94103247b Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/2359296.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/2621440.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/2621440.bin new file mode 100644 index 000000000..d017b9a20 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/2621440.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/2752512.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/2752512.bin new file mode 100644 index 000000000..3512c95e1 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/2752512.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/3080192.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/3080192.bin new file mode 100644 index 000000000..3bc28aab7 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/3080192.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/3407872.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/3407872.bin new file mode 100644 index 000000000..82cdfd928 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/3407872.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/3670016.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/3670016.bin new file mode 100644 index 000000000..302fee8dc Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/3670016.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/3932160.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/3932160.bin new file mode 100644 index 000000000..a762d03c9 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/3932160.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/4194304.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/4194304.bin new file mode 100644 index 000000000..d73b503c4 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.1/4194304.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/2359296.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/2359296.bin new file mode 100644 index 000000000..03d10f35b Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/2359296.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/2621440.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/2621440.bin new file mode 100644 index 000000000..e12928638 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/2621440.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/2752512.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/2752512.bin new file mode 100644 index 000000000..f9ca3f73d Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/2752512.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/3080192.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/3080192.bin new file mode 100644 index 000000000..be5f8eb85 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/3080192.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/3407872.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/3407872.bin new file mode 100644 index 000000000..3b71915ee Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/3407872.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/3670016.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/3670016.bin new file mode 100644 index 000000000..38d1d2149 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/3670016.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/3932160.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/3932160.bin new file mode 100644 index 000000000..506c4fa36 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/3932160.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/4194304.bin b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/4194304.bin new file mode 100644 index 000000000..b060ba2ca Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk.2/4194304.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/2359296.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/2359296.bin new file mode 100644 index 000000000..39168f075 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/2359296.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/2621440.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/2621440.bin new file mode 100644 index 000000000..735f119d0 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/2621440.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/2752512.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/2752512.bin new file mode 100644 index 000000000..ee77f39a1 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/2752512.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/3080192.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/3080192.bin new file mode 100644 index 000000000..631bc58f8 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/3080192.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/3407872.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/3407872.bin new file mode 100644 index 000000000..d1ee12b2a Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/3407872.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/3670016.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/3670016.bin new file mode 100644 index 000000000..ba817d673 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/3670016.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/3932160.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/3932160.bin new file mode 100644 index 000000000..afbf91aae Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/3932160.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/4194304.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/4194304.bin new file mode 100644 index 000000000..a3c5f9ee2 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.1/4194304.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/2359296.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/2359296.bin new file mode 100644 index 000000000..27f243876 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/2359296.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/2621440.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/2621440.bin new file mode 100644 index 000000000..d4df9b006 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/2621440.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/2752512.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/2752512.bin new file mode 100644 index 000000000..b3b3623b5 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/2752512.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/3080192.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/3080192.bin new file mode 100644 index 000000000..b3e9954b5 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/3080192.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/3407872.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/3407872.bin new file mode 100644 index 000000000..2d1e6781b Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/3407872.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/3670016.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/3670016.bin new file mode 100644 index 000000000..f5f731cea Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/3670016.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/3932160.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/3932160.bin new file mode 100644 index 000000000..cda7ba635 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/3932160.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/4194304.bin b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/4194304.bin new file mode 100644 index 000000000..97b853296 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/composite_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ.2/4194304.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/2359296.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/2359296.bin new file mode 100644 index 000000000..da72ea126 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/2359296.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/2621440.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/2621440.bin new file mode 100644 index 000000000..ded8f4acb Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/2621440.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/2752512.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/2752512.bin new file mode 100644 index 000000000..762fdb4a3 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/2752512.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/3080192.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/3080192.bin new file mode 100644 index 000000000..eafacbc76 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/3080192.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/3407872.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/3407872.bin new file mode 100644 index 000000000..f7845caea Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/3407872.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/3670016.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/3670016.bin new file mode 100644 index 000000000..d628abc86 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/3670016.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/3932160.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/3932160.bin new file mode 100644 index 000000000..ae44558b2 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/3932160.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/4194304.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/4194304.bin new file mode 100644 index 000000000..dee8a1429 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_dpzqq7Ec6Pr-SZFtRvvH1LqHYNGxOvyucO9JFXIzSqk/4194304.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/2359296.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/2359296.bin new file mode 100644 index 000000000..46c139065 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/2359296.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/2621440.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/2621440.bin new file mode 100644 index 000000000..30b18a239 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/2621440.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/2752512.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/2752512.bin new file mode 100644 index 000000000..3d749a797 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/2752512.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/3080192.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/3080192.bin new file mode 100644 index 000000000..877e33565 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/3080192.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/3407872.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/3407872.bin new file mode 100644 index 000000000..51caa7d0c Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/3407872.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/3670016.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/3670016.bin new file mode 100644 index 000000000..af51688a6 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/3670016.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/3932160.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/3932160.bin new file mode 100644 index 000000000..e08da0fbf Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/3932160.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/4194304.bin b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/4194304.bin new file mode 100644 index 000000000..8c6994ced Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/spora_2_6_x4FejNj_u3nSj2-DpbJOs2tCoqXJ8BLP1UuWheyHGAQ/4194304.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/unpacked/2359296.bin b/apps/arweave/e2e/fixtures/chunks/unpacked/2359296.bin new file mode 100644 index 000000000..db9878c38 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/unpacked/2359296.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/unpacked/2621440.bin b/apps/arweave/e2e/fixtures/chunks/unpacked/2621440.bin new file mode 100644 index 000000000..62e69271f Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/unpacked/2621440.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/unpacked/2752512.bin b/apps/arweave/e2e/fixtures/chunks/unpacked/2752512.bin new file mode 100644 index 000000000..df7e431d3 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/unpacked/2752512.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/unpacked/3080192.bin b/apps/arweave/e2e/fixtures/chunks/unpacked/3080192.bin new file mode 100644 index 000000000..f426c1ff9 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/unpacked/3080192.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/unpacked/3407872.bin b/apps/arweave/e2e/fixtures/chunks/unpacked/3407872.bin new file mode 100644 index 000000000..8240ecb17 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/unpacked/3407872.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/unpacked/3670016.bin b/apps/arweave/e2e/fixtures/chunks/unpacked/3670016.bin new file mode 100644 index 000000000..8a72f0dc4 Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/unpacked/3670016.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/unpacked/3932160.bin b/apps/arweave/e2e/fixtures/chunks/unpacked/3932160.bin new file mode 100644 index 000000000..e78793dce Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/unpacked/3932160.bin differ diff --git a/apps/arweave/e2e/fixtures/chunks/unpacked/4194304.bin b/apps/arweave/e2e/fixtures/chunks/unpacked/4194304.bin new file mode 100644 index 000000000..5df7d1f8f Binary files /dev/null and b/apps/arweave/e2e/fixtures/chunks/unpacked/4194304.bin differ diff --git a/apps/arweave/e2e/fixtures/wallets/wallet_a.json b/apps/arweave/e2e/fixtures/wallets/wallet_a.json new file mode 100644 index 000000000..7a2ed9862 --- /dev/null +++ b/apps/arweave/e2e/fixtures/wallets/wallet_a.json @@ -0,0 +1 @@ +{"kty":"RSA","ext":true,"e":"AQAB","n":"qi9ZdgEE_uoA804tHIeyiHdCpjZ608K3qX8cU-SrVMmyegXn3rTjGf29rwb2YqQuIohBVv--FSTDf4tQykCuJpKO5EHKH7qi7Hy1sxrJkHR13YbqX99vx4qAIQ-H8Zik4KdD8lKeaAmaOZ0lUt4Y48dxU51HHn06Bxg1HD7SRNnsFDt6juIPREn5pCzFd54braeIxexOM-0DekLR2dh8TjNAYfHy3tkDy64oSt_T5e4HzRLSGccMiGOo-6HFKj7ChxPiuFkDKBsBWcr9opK9TRaTzfQbCWWUasD9Hs5EtFPrS1HYTGpD4SuM33RPNXQZSTJp6BDRPP7bm-xH012uXoQNLe0Y9LUSphNrJZ1eDJ_pyXkS0NXgY9ggYIoJkjLxUCnAV_DO0e7w5BX1dpQv1N8gst-RvfDAYmIUYiDcMRMbmc8NqcJNITW81HqbSWhCLQ87w1UwwdLnB3vQ6dkcFSbzJjI2afqH-DieWvA1Pc9j8A9dQO3ac_4ZyvLo6EW_xAleM37x9jl7x3eSamGubi5IIYvaEVpyxqAHTxifVms-y8P9YiJEQbScdXtHKDEEFe8fZorPQppFscO-BSIzY6lrh_DbOdzTuytPOgMELeQ3bgAL4CVsizFoIMGo-61sEQwVbpCw3XHy-_TLfrn4crVJYn7WxX4SHNwpdfljeu8","d":"G18PL-P9FjSzn24w4jhO9hTcUthLS_iyyl-HwlRyW-ouuuJtRwvnxLvjQJ3JjdbjFqm8fI4YV9U4XjCdd1IM0GZc9ghAxnahkpCCNsK1rXaVqGH1GyNYGotDjU2uqyRGTF2Kl5RDJu94bxC_uoK_FQ90QiL3F8fDR_XUQO03q1wzVJO2Y_mmw_Bz5rxOrCzxPa5G2LJnZ4GUwBq0HqnrYDZtAfPEgKP9sMobb-Ns9LuiZJDE2uGBOgRxXrtHd0JtzgTcP5MNZ2tkfbkgrv-T06ywa_z5RjsgskTE0SoSscAXhV8t_yhOL45uE1hlDu9Ty8qAbxMZXAqPbpYDfVLBYu3EiHduGoyrGhxElprxt7BCJ_CzyobZfGQGwelvPL_zYhu2EwzFQGKgrhPgLiW5mru6Jlq6CK86J7KNuFFRXzhjqjNcfupC1XWI9X7SFVKCi4mNf_ZM4ZmagJUTP-hyPowrnw4FnKOjJBadWaAdCVC5uWydHYQPFToraatJXWA5QUl2tktOaK8DPkd1KhIak5pxNaLhihqCdJurLrpTQ638r_jA5wBcLmt6SoYpWm3b7JngiuemlNk-G7B2f5y79_GT-t3jKbyaPClet6fOa2FnqDOT9eqR_ZU6h2h-kWlVSadXzSGGCmqfru57rAxpmJZ5SbuKSBEKnjx8Z-6HbOE","p":"5McQNMYpbXEoaid0vFdWl6HOkOn0bSd1XeNDIxISkOsCO2G4kR5faxfA4rEZneXcQ4C_PhilRlgpjtkEG-5B9hgtWeeUWkoTTgBbuHqzv52R3Xc5L_VYtayHGjPi6wiJ5YYaDy4rVJv65gWGpO-6vj__DSnN4tvBcRryogzNH0BDvFyFKIbkqzp0a_psOXG3StxHv5Rue0ySAzvVHsgRxM0UJG06sH9KDstrhQRzQ6UGRWhV9uBPz-xMZYmFQC603kgWUEYRfF10eN7xdD9kM-NW_h4vzDqT6hdfOueZ7I0EUaLTqdb3QuX2UzvsZWCya37m4qW3a6t7zXky6nHaDw","q":"vm91oPH5LRficcNfSP4AN-mL3nBn3Pkd8lkL9ZP6G-tR4CIB3-T6FusOeD-v8RwRtoBjbZYDM5XlYZHI1XF1-WYc4TXQp6mvF0S13tU62KyesHxVTRp6v0IyL8F1WOmQReDqoQrsr0ccIxF7jJmxgo-djKAVTU8U1dFGGgSvpxuuI0G7gDYyB5ieL2s38TX1H8FjfhAPEf5DIhzdmiYzoX4qXrT93tCYHCB7h8ffJT9tj_eTuXUitqc9A1FKT8hcbA4-DnUhxr-hI-lkK_dcjuVpAf9VbaKbSQNJ03SG7W8ALkZXmAXMw17t8mw7nFV4nWQIvEly7dgUk8eBwe6xIQ","dp":"Du_sX_W8SLgFsoCm_5EYR0g6S33rBqF36UxoWsbYTXv6plPoEBmSk1R2tJZpnMSgUAv88Jn9WI1zES-cNBKnXeEQPPmA1zBU-FfPpUjlqZIpLvOU2UvEogAExjIzE7N4BXNvCiSykZCpnhEoTGaWo8tb5Mkg9znv9GmVA_2f-vVgNtE3pIDCN2fWqCIupMWG-S1OxfR0DjreobVrYdogRuA4-3PiTBnThQnFGGdE-1qwASIh0r-sll_QUSTcfWdPSeAdDNq2U49qhmXQEA3_hd_HE0p3Rndgpv0lq5vpkedXK9lcxo8Rj92h6qdT9P6OR7R-cLfvNOl6aN0L9QDAAw","dq":"LOuXwJ4zW7qtlI40VMBthsLVVmQHa-1rbfYpRwVf0uQgTRFYhdq6T1uk7yJ-uw4W84i3a2seWDW8hNZhnE-GN40ptMn_7Pyuq3tutyBvIBsf15uMd4KOf7z6n58vsghuGr2iOtib2gCZF4CRNyot4BFGZZyBSdoknQcfVRXT5UQ3QGPJ-cVO6dHLRn4xFPnYV2RDtsHM_D6Q0WQjta_bL_XVwr9Ivx1PNBtJaE7ySRP8ISCSPQXvaUxrrPOo5sbpXifB5aEllX8wYIs2MNTJhX-B1JHJMfJQVNmsuW9cQHeVgFThZp-_nDoxQKTdLtROfjnRgbCFpqr4t58w8XD_YQ","qi":"ImkdM0kwMvIQynehAma2kpjq72Wn54nr1TspB1CTrCTu9swcrgxhHbrd0CELrB5L1xhSBUzzRS05_Jx23fnKjVohV_Zm86QoaY8OAPKwVqVzA77PFfNChBoQ68NQhpXsFYKPU_5bhohhGMytkFKOqA3V1YDhyr8hStlcWLmIkt7MQMzPRK5s2z8uFmZ2KdSynfwr53pqr0UJwq9eCdzKGfL4aLiCEpL60yOk4ZGNCmKSl6pwG-cJQ9s-3cak5aiR54PxdjE0uJ3VUKQDq0lRAETM5lk8cEdINLwVlQ6g3Laxbh-BZIQl2WepmddH8ufAbZMaUgtYq_rzThM68qHLYw"} \ No newline at end of file diff --git a/apps/arweave/e2e/fixtures/wallets/wallet_b.json b/apps/arweave/e2e/fixtures/wallets/wallet_b.json new file mode 100644 index 000000000..8017170da --- /dev/null +++ b/apps/arweave/e2e/fixtures/wallets/wallet_b.json @@ -0,0 +1 @@ +{"kty":"RSA","ext":true,"e":"AQAB","n":"2xxp5KmsGIvR8Y6oSQJJNPgblXNX9vqKnE9P3CoujCnXdaYWsfbpBczZrSnaNV5w3_28Ph4fQHx4JWg6IqegSCB0tpZdnApRvLg406Ho8SWsc0d5QNtEZCvDb4RqWFwVmb7s6cXKtLw41Mq0rk_wI5pKurKpc5Kg_etw3K7TaSWOcsOH6Q42tiKAohN1tkwYnD0-nfRms6pZMVbLsgAMrA5-hojyPF3hSbtYEDYkMk9LOaXh2CXihlRKmpvWlZUo4jjJLUwVwB4k1YiMbLbnBcXpWKcCZtyU3_B_Q5-V1cPBHTMlG1AHrZGtkchRPTyozWvAPGzuyOpbD48k-RjrWkfzXOgsLfCYq5nCBWmOxoP0EgWm7fcgkKK0Q6LJ1CpIa-TtYzFn1Fph6ZrbYG9vxbkI5DP294txGxxjxK-f6gStQj3BG444q_NKfwfpf-sc2v--x50DG_LH3RnGshlcEdHml-107U8M4nHUBjEovtV92MPX0L9j0xLFYA27nWAHyNQhcuWkQbtXYxxk9WoSHM65o-_heDw7BAXc5NxVWXESy-Y_Zpm7i_fyAPiAiHMorqnR_x1ZaXbr7YnSD3e3eEWU0IomCjqcrEyF28kFhWTrlN0UbbCUMSRMTHq8jvTDL3aTggVWABw53DuneCoy87X06x5nBfHQet5DVeCqRdM","d":"Led5Nt2qjka8vcUt8_ugOJJKzrfsnFGt2poYfu4TJ8G-e6uuFYmtnW_ebxwZ1uFQKvKOylKnW5-E4aa4qWfbV0EPEIcJrydHGrF9vtjPDj8A-65bjjJh1N7v6EJ3wOkjez0Vy8ITaOwoLxc3oSsvPd_ZlO5QxSGEIYxLh85L4sQEC22FJWf_luSjluqv-laQjRoDYjZi53RZ9xXaMJs4ctOFe0OtFdZzIMOfHnEGJFDv8aPoxnmP0QwtHZ13cQFO7UlNqUlhAOdx8jl-qWowyR5xzTqFX-VOGCUOyM9IajOyW0EPBRjHtjeCLiI4fIJBY7bzl9dct_9QNraK3ccwD7SkMJwMlkycIR0V3rvn1nvg7MrJOaDZCiKOod_6M1NIYuKHTFcnvQu67E147YGnWVX2BVMlIiB6M9IUymjxfpxhdR-iu_t-i94NoLcxaaK8BaaVV9N2qbZhCmEXjQGF8qo-lEPRpMHlhJKN7I68L30kMkGexz9OjiOT0WAgpmxOxRhvRsl0bI8A26jvg4amkY_nc48lmRF9xdhYeArBZm0S3NFO2qVcyc4CaF1XAx6GkAd7k4MQi1TqEMz5hENEzFMQ516CIJAN3IzqEh0R-1B66uWm3Lm-XYsUUssKJ2BUPWm1toplfEguXVSk9eYhy_MG03LHDej-YUeLAa9X0Q","p":"7Vu-VFXsiTjBG9rxWWnriVDA65fWq3TSa3-x7DCaKpAOOV7yl5Mw0t5Jnp37qJJCPTU_-F_inST5a7Uhp07i_StQNoL9WrIQh7io8zk2GuvuvJx5h85zq6VYq9pC-jbf3ymO-H05jsjKL_ixX0UxaH4ng8wtUgsVyGhWF5CFk-AHiyugsIbMzoG_jVVPMkLiT0gd8yjI1fwVGFp2ZY-_KHiVT5pF33uRu3_DP_i3isMwUMFlnSb7sfZutCG0riVgZveyiKyj-PNcVpZ9kXm82ZVw5bA_pm_FG05kvHU5XT28iQLdIm09zMedrrdhJ7CGMebUtBC0pbApDXa222e5sQ","q":"7FHLM7XQXgB5u7lVhYU_lV5hydFGo_G-TDkFY3_jHWTHyd_7fRiEqHCFk5INNgJSDVwzp3eesNT7MkWS2JTxnBIWH--4myAqUfSp1ttFYWNm2AfDndht14-R1YE2fXWasgTmEXTgkpID4WmWeSTl7tv28vWs-CgzSWl--C3Vr7fHUOTFiPkg2pDTdznHVgzT-SFFFqnvIMS0C1VWf5C85cOigNQy7RWmtBqslPtv48LQ0hYkbMAVAZ3XYnatBmsj1bc8EVO1eFLNeP5hvkV7SuwGKdCQyvMkgBtW7gb9sJUfhAANfvIuSWaREegtPJ5KwspEhBhpsv9F2hOmDAYUww","dp":"qryBrlyYZyTCE-1sCqtcWEwUWePA8Vh5PAaAz6suWkuBT9dynYGtbyGix0xRCDMdHrY9K8adVfiQyd9jM9xU_1O2wV98K09HALneHgcbWkY4Vsgfy4bAQcoQfJ3l6-KpKvfT9f7t9j2M4vD7ddJp9gY5Gl82gnui0aPruculqndONdfOIOz2Sd2fEmU5MKhX7jur_4to3DQWYIxB-lBqawxCKx6IAHf8nmkK4-te65v4Fz7mfyLZjmv7ues88r_EFo06iYHV-W_lDgv2izyMkd8jdLVRM8HWgQvk_oM8HkwYYF4E_4yhFbrJPDKA2nHqNd8bReN2bnDHNv4cDrsQIQ","dq":"lymm4nfdRhPlyme9xb-7MU-DG7ZLCll7EYSz5raKT2YEyiQE2TsSuC_pscCNxMttMvCUdf31O0WxPLH2QaXceqmzD1Cm9Et55pyq-y2dTrNnuK4WueQUNvu2HC0f7taIUnEBvY7Wi8rswoZo4yrwDX8UkssFjmMgk0fxGM0wz8qtqxf7Jye8lTJooe4KjQd9m_FlIR8oP_yy8kDvKIAr5IjkbKXPwYnE7ZXWaSIAq18Vdh0Fxa6EgVk2ydwBx4ZHENC5kpfKD6JfnpKRcUU-nWkmdB7eT4OCCJP0YiOEqSxqUWQ7PcWqR_dcumiabxkN11XMx_ZZvk69nsZMw4osQw","qi":"FI3Yl18HjFsJhUuxh-WynnSIK9gSegwVK_EEdtYIvOOpKkjJDMQlnRYzNDnVbkFk6LB_dCZfmahQh5z193zRzAXRklE03d_WECc_UrCXhqjrwXkF33ZMQVZtXpjvB3krCXhnOmSNFwQZQDlJcUcPcqbTLrUukMm1_kgJxfch5PFANLUPKL-2_A8W4llpKvTeRZlED0BIKeJOMgDSsZvqbzHZif6ahSDOFO6D_SwkTjZ_qZ0IU2gU5wXNa6QhjcZX80Y05ROJrrT1O-K6rc6SJMYL7TW2xHZ2YmC2GX54rLUtuMiXWxllP1dhMDHIoZv3K8yDKi6-0ciOmWiuQJKNbA"} \ No newline at end of file diff --git a/apps/arweave/e2e/fixtures/wallets/wallet_c.json b/apps/arweave/e2e/fixtures/wallets/wallet_c.json new file mode 100644 index 000000000..fd07059f1 --- /dev/null +++ b/apps/arweave/e2e/fixtures/wallets/wallet_c.json @@ -0,0 +1 @@ +{"kty":"RSA","ext":true,"e":"AQAB","n":"qmUNnwxgCkuOlHxaDy89YKmh1tUPg3yID0O0ZSGGJJz-E_gQkLQuigq5zZZCLKnL5KGekpPeq66SNQHZmu2x2O_VbsMJ2rH2WlPqBU9NOgSYh-4MCm56sDeTPzYwGcN5RkTJqvrfvy3E0Ej1V2tPCITJYUINmMoaY9zUuhFQ2Az0sCLRnpyRu8pLmn5ccwmLEYXl3ncvzvgZT6wm-uhcVFRfCIBMe_3ccy7-i3uhS_3R1usq1CoDkyULKYSBeD3UcQwcmSyDaP8kU5bG0NQIzP10cX2_qwsUbm9f27e-Mxs7OPYLWZFW1Xl4WOkXEWYrmtSxJue7eDEEp_gBaSGSYy4_XtjPhM6LsOSs59NR-qVRchoT8q4w8PUfYwW1wc6ntgP2B931Z_oTK1Xzg6A8yIs8knJDCw0o78i6B6ersMoOHuPVt-NroxAIb3B_Ui97MUPb2UwGj1Lw4ige1R6XZy3rOL0IHaaVnaJQObXBCjDiurYqna7tHACANb7xlVXsVvhi9EliLBe5JCDphpmh2dfDj3nyMI0Ab7d5_BuefchnQwkw_kna3iykxnH5poftKEBFnF7EmjzABm4Xvh8YEVloYO18e19B9VUYJQcZbCOUtq9oD5xBeVt0X1u9waeAQUWLr9HIBg4CkgGv5BdQ5A50mUfVO8uinSh5O0VKau8","d":"p86ow9GbgRRYSiYJ6hRwAO4Ro5fm16pXSgHK56PJ5lZv6dFlIEqtrbGqWF8Jt5dy2-02cGNPlTR4XAVHDrRqO3v9jZBSPmfTuUf44Zl4h-ugE9TfPuh5r6hVZ08-PU7E1SAohicNEB_zKXoPNhhhZ0ow0OLtSPLnIwUowOheOToY7V3bR0mINUd6LWfQlimLdfrE0fH4Mxz_n0fGUFqlJme424wf_vHQxUq8nFCr3tTp8ONmBg3Kk4Z5T12vgm_1LAE6ixLV_KSuhcazGw56rd3qwxXaXe5forACHuPXfGS5BQnlFZFKM9Yr5bgRVanQK57z23qXqQ56KnpKJPrCsc6oroV3U-RvM9Nj_yLStwyeZCsGt0bd_TOxuTkwG9M04KRNM2qx4RuFhBQHiXH7yBe_QZHlxY1ou7K8jBmkNpp73gMOA01szjkOO9KAQI1WbA4BPyIzW428a9Or0H0W3x99Q6u2uMMt9mld3DK_vkRHc8JAXfTEfo43CeS4Gr7RtFel8WZ2n8tXKaAYcM8sGUoifiL-iI1dGKGysBsR8uMafOl4pOJLy7LouzNIvhBu0n-hDxOLbXETQ3y_GCZiuMWKdwqXqJ_BMCOQA1hcr9_sxcpalCy48dtEiQXtKDLt7qFtR8LXgBAiW4yjlkdkwGMx1rkwJMfwPcXNzCqWLQ","p":"vM5SLTPsnVilEwgXs5YUhHGd2fOb8xdQIHPl2_3mn2YntxenS1qDoCYs6FO8hEhE_OEVCf7xaJ0TAEfX_0WgX_-Tptb3XW2GBX6vHTLs-zyO-XulKKyDpKxNTT4W7f4QW486rdDsDG40xxNZkv-lDShFe8h2WuIZ9_EhzHZSrbDOBBg4yM2ERR1h0wv8yM2JjTB5G-tpuhrwdJ_6jeiXsuuO85rVuXuveiSAMIH2AYz1Z0STIGhqgL93uxw29JHg63Q1PaqpyCo9Td0kxyqQX3tN4ZRDZAKj4nO5j_u1Sx2n_TLBzpgkH_Xy1F9y9_IcXbm2AiqnJWnJuVS5viQenQ","q":"5wlUW8FjI3RvwL1QedfFp15ZVPaYc8N8sWH9U5tITK6G6IQ8UWtKphPO_gzxJFtP77sB_70tcf71u3UWY85vzuyQiQAIyjhJAWMupgYxvXn6Up8zUqgIMEUJxzhgVQ5319sWgpAL47YE7zKcy8BHdhesdaPvzIRQxlTHA57kXWur6X6f7QZiP9IySJwaWaXtjPxI07xx1l_D4dbPK6EFSrmW0Q92kC6koqNHiuNKQVhSpSnZ7EkT4VqNjsIvojPEBmTj-4bxEEObV-b-DGDppNXBnTBm2-gKpwsV4GdyGdK8wP1gZTIQtRBm_1GHBe9v8O-ZIGOaQ_NPiYZoQanT-w","dp":"IKEPdpxoofC15ooZfoHLXfA8tXPyWZqH0HP3H4PLnXSMHIpL8SvdX4n5bNU72SicM4-6kRWsJsYuiHfiDk28H5sNq2GvMkhBRyXToZoxdmHK27bQniziO01DtruqPssPjKM-ItfeU2-gU182tb7UiWeSSogkXCSDFGRp0OoJ89aAZBjDh4BtAXzIcS67KwDKasobxAV1KiKJt74GEQxHWzZ2aAc0NG_5rYQtWzS6jR4NMyGYw5sH_OQaDw4bOT0Uv9w_bz7VRLB4E8LKHlluxfGLThbPZrNGG1aglQ-ND0Q6yflBoTCN3bAlnSo5tjvzRwdXOxyf8klMAWlxCDk5yQ","dq":"zgHGo65TvPiE8UKdcJeSmcOKOjVMCOVF2VE7toIevKleiBPpSNw3itDc4DEgEEAPjf6dMLE5xY0HBijIVyRrFAJiepZ6P_5iMoeCv-2ECqSqLWPhOpG0A3570pUVaKJnACVN9AuHXnsd-T-TCicgUU-YqqkMGLve3ooXjsXucNKiTqhm582qa6f8yDvRTyCiKfWG5q4Af5uSqVyGDCwe8Nt9fFqiaLv-dzrKfzBeNNgRkU45D_S1clrxIFtMaABqiR0LIGvZpZvy9zV0UAtWKnGjm4reHLXSUdKTpi33UslTH26Oto0m0pyWipDiqcsvcJHkYzoNAwwAXutnKS3KYw","qi":"F_9EOpmQ3q4-NliFmm0XFUNSp4nNeEJOwWcfCTrlF2ktMtojlt7__7PuBN6uILWQwqfgDsaDjSYQxiCSICMYQve3TqJxOToMv10EAjcQgyZkNiOmuJKYuG8GhRkB9PWRPeATCSuUJrL2D7zFm-ROcfEo6TblktmyEt1bRMIVqaFDuu8tFA31HKvKQxr3N8umkOHBa_RSxhn0dRmT3horCWwBKbPy7RV1JM7h5qSKbOJxZf2PgzaTZ3KTkoPRov4_Po-anTg2yOsIdqh92PrbN82WO_IJNhhlXWsWCYvNBlOTPjmkCQhUQy77qtMwpVOi60XoITIyq0b9D8LO1c6d7Q"} \ No newline at end of file diff --git a/apps/arweave/e2e/fixtures/wallets/wallet_d.json b/apps/arweave/e2e/fixtures/wallets/wallet_d.json new file mode 100644 index 000000000..1f03509ee --- /dev/null +++ b/apps/arweave/e2e/fixtures/wallets/wallet_d.json @@ -0,0 +1 @@ +{"kty":"RSA","ext":true,"e":"AQAB","n":"y3dvwHO8Bclzz58uwX5BMRjbB3i9S3ODEGW8b8mrsnWjm0m104ymNVXEtfgcaRfxit4Odt4MtQd5TrNzVQdmYruOZ_P7frXXCc8kqjGfjNLm96RqTDqVqpl5s8SuTlNOjnDwEdNrEewq2kCWR4roUxHTCc3DUOM6PXyZhlmr2zmdjCmn8L0eTxije7p5QdObW_64kycDbwXcyhOQrZ_588aXFFfTMogXCXk0lmRu_YvLieRmT-nIw7uLKhSjsMwCkEO4DD5lGb9_BE2kl8BGp9kb_Sqebh75kE9IL3b6bU5iABqDRwcPyiIzjKYrSDYpm_8dq6rT2ylw8TMombh9HlLSb-nijFftxW34nC5ue-g5pien-Cwk9Bfwsd-4Gj1nx03sBMbOwdPUmZ6gY0vqW65TOJVhSrD2GO9fIX8b9KdoU15rybCKsiDEWBiaq045vtyv1W78JYonZ5p7nmPzCRieyJuuHRCT3TnLRyobG6eguVQbEdhCp5FWmITT4lDLl5TqHPoqJPxINGHwFtZvqmKw5p1whM4hWCs3A7QMUMaJqZ0vAEOUBTEIph1c17tVuJYADNgKRwZGkXz-JvDH5o8vB01P-BRU5CWlGjzsCfHUvND-zuvlhkoCWwaUrlLSwQZ2ZqIKuAFs3hA-dRYQRcx7OMSJUYiACtXeSfaQAxs","d":"Em3PAW96KEwG4VdZtMzqure1nwegnaToyiNs3fM2SgO9veL_RRoIM-yA1LqUWDCDAED8rmeOXxc-NZKrb5gr_eVfEKtYrDFsOMc6WvADs42mved2eVEVHU6pZ075Or7w7pXsKLEtkYICn6IZ-oDqahvDMbAhcMIkFE2k2jZlCoY9buSXAYcfp6pjpGFPelbgS4TW0v1Foli1ltgO0qsayKnEJWOPDZSmAYWo7bZLF0wCM4sseTCDrrbd9AHKkcjosohvsywznBFsP8eIkPYpcCqKDnQ9xVuo3xlPQH1WUXA4ECpWmahaFcTjRmoGoZPGUQradSIT7lXilPY9Ry8epecbaMdsu4yKcxLl5hF3_zTogtTi1VUnVaDHsDkxu3l3HBvly0OrchVPt44vmyvmsrhHrUUQCvqrKE3CV_00PzRBTzsUSuJLCQeDchTmh2azKoerojk4dqYA0I276mo8B19kEI1G8LZVp1sCaSWAtHzOyQn_bdZnl2o7tjTNzflqmzFtcvl-p8ocvvLCjfMXzY7aC8lNgqNhvTH-lcgLdSIj2x2lnJ2ThgHwjILSSk_R6WC6t1CE96l2akwCkIWsfLnCoxt_XpPw14OxEB4efD_dVI-KuzS-l5CfHAe225GgHFVAB2n-Zetwbr6bL3fflt5rZMf3VWQKdWiwcPuijYE","p":"7L8bLfK8h0ZbJA0Q2A5AltWj5cMbYf5r9AYAiMs8-bo7RnPMT02dKB0KKmCbMJFe8uXTZ6d24-bKBYSjHFJAoONkbe35IdKG_6mUrYRN__N9-__p08YoEE2FdiSDgG6XqlK7DzpPfhPS5a4fM5g8V-hLFXqJb69LfPPARmlaaQoHJEZ05XDaTQwxjtuWFUJ8HASiwvtdFE_uU1i3gfNJswKgfKTbB614-DpNRHtdDMeuExM6_WMmqnwzUnvgPvMWbojHcXpkd0XrCh8Wdz-TIaprvQtHO1NO-WdNPL3wGvceC_6aoNRHdAP5XJIL2wibgBzdmCNCuMq2RRCp6xsNBw","q":"3AN3HP4skZpSQJAS9Hm7y-HzMGYllorPjZtL3hYQiJG0nG7w_GPp_F9fPh5Bvs2pSG852Pftj2H7uchOGn9V0KzN-U9TYOwfSffZI6SrZfg6Mzl_wLOUqHN6Aq5jE4qVgoEv_fvTNcMSlI4kK-iQwW8JHzXuR-kEXhWK8EUmTNHtExzpVYQascyGpBfsJ74O8BdzUh8TrTVxRcO9w6PYQg_Hgn347001CvtEAEOWMSIADBBar1dro6LfyVm20FJLX3gnGSPOH3-kuozAL07dAh_GhHTKpRuwyySDfR8N5T21YL-0HJAlmu-zD7V25jKASuQVVffEfOldZnWTGnwoTQ","dp":"zADtkc1-SW8F8G3V2ueFHrSf08gpW2raaV-WrEm9lE-27kGwh5GQ39UOQnAWqmZKFDKY1dQHbeEcql6eEzSJfloT22pZ6Jw6OipN9KtybyDJqhHe0t8I_OtgGurh6hTiWiGKEVgk0baRX9uIBXSkYvfHY43Ayl2aReThBYuZHbRHbSnNZzy0z_m25qwvishMm_QesLfbgDpUWruy_abAFiIoWt_P4bDI8dWDaYSILRAP314N0fTTh8sYinY2SOg9pyfz_MQDuIemPoWFXWKKDVOGHVOPoP5rqhwrATGGqiXRXXKamgXyQHWANhWfY7HqFR5KkOOphgUfxSnT0cTwlw","dq":"CYRw26U3IllNo5NX7pFxiUFN9tMEXz3D-rk0D_heYLoE2RuHezOLRKqPgS1n5Kwa3ZJKK1OWSDSR4hiDIGxPtwYyps1CqxerxtRc5UjTTUbupZagKyLZlGviZElM6eR90TZrcA47tcCphhmcAPY_hM6b02jO1PeEg9lkuD4ViQ8vtTrz8QoU6YoSbPjH83QqS0KIb43-mOiN7Nmp1NO6oCj0lXWDlj59w-rYpzZFQfzZiawPcDRU6LA8BAbIfLyCnC-jaVf-K6im5JcAHUvJDbV4LfSra3cGL9N1iK0WOctwlC3WycGGjuw9j7lm2lBm8lZpgd2E925U5wDBC01BpQ","qi":"ImTCUAjhh2zfP8JGgRmcftSQ-sqn1jchpNpT548HBO4Bcm4rAlRyOQDCV3Jjqyb3UnbHfdx278f0g4jWHOeHsfw5VZ1BKyJ0uDLnYlUuXQ9WO4Rig0j7iUJjR7xV5y6tQFXbZTqSHnEPp9UQJFKhjoBdNmO7IC-24RfA2aahonsRFbvqDOLdGqYKHJhajCa0IFPqBSkkOhPshMHDdKn5iX0AyT2-YTeYXL1u17H_57Hbry3xjxQsdnAcB8uZy4vwMLiac2-Yy9GmSKP-PoP1O2jCeLbBHDbHQW_MdVw0dpbqwLHDwMpU66e8TJeU5-uzn33qZSNOc0nZCYeoAGoxUw"} \ No newline at end of file diff --git a/apps/arweave/include/ar.hrl b/apps/arweave/include/ar.hrl index dbb09c297..867f89c0e 100644 --- a/apps/arweave/include/ar.hrl +++ b/apps/arweave/include/ar.hrl @@ -357,9 +357,7 @@ %% picked as recall chunks and therefore equally incentivize the storage. -define(PADDING_NODE_DATA_ROOT, <<>>). --ifdef(DEBUG). --define(INITIAL_VDF_DIFFICULTY, 2). --else. +-ifndef(INITIAL_VDF_DIFFICULTY). -define(INITIAL_VDF_DIFFICULTY, 600_000). -endif. diff --git a/apps/arweave/include/ar_config.hrl b/apps/arweave/include/ar_config.hrl index eedc1ffc2..0a6f4c392 100644 --- a/apps/arweave/include/ar_config.hrl +++ b/apps/arweave/include/ar_config.hrl @@ -67,6 +67,7 @@ -define(MAX_PARALLEL_POST_CHUNK_REQUESTS, 100). -define(MAX_PARALLEL_GET_SYNC_RECORD_REQUESTS, 10). -define(MAX_PARALLEL_REWARD_HISTORY_REQUESTS, 1). +-define(MAX_PARALLEL_GET_TX_REQUESTS, 20). %% The number of parallel tx validation processes. -define(MAX_PARALLEL_POST_TX_REQUESTS, 20). @@ -179,7 +180,8 @@ get_wallet_list => ?MAX_PARALLEL_WALLET_LIST_REQUESTS, get_sync_record => ?MAX_PARALLEL_GET_SYNC_RECORD_REQUESTS, post_tx => ?MAX_PARALLEL_POST_TX_REQUESTS, - get_reward_history => ?MAX_PARALLEL_REWARD_HISTORY_REQUESTS + get_reward_history => ?MAX_PARALLEL_REWARD_HISTORY_REQUESTS, + get_tx => ?MAX_PARALLEL_GET_TX_REQUESTS }, disk_cache_size = ?DISK_CACHE_SIZE, packing_rate, diff --git a/apps/arweave/include/ar_consensus.hrl b/apps/arweave/include/ar_consensus.hrl index 32ba74aeb..6314a281a 100755 --- a/apps/arweave/include/ar_consensus.hrl +++ b/apps/arweave/include/ar_consensus.hrl @@ -78,13 +78,9 @@ )). %% Increase the difficulty of PoA1 solutions by this multiplier (e.g. 100x). --ifdef(DEBUG). --define(POA1_DIFF_MULTIPLIER, 1). --else. -ifndef(POA1_DIFF_MULTIPLIER). -define(POA1_DIFF_MULTIPLIER, 100). -endif. --endif. %% The number of nonce limiter steps sharing the entropy. We add the entropy %% from a past block every so often. If we did not add any entropy at all, even @@ -94,13 +90,9 @@ %% adding the entropy at certain blocks (rather than nonce limiter steps) allows %% miners to use extra bandwidth (bearing almost no additional costs) to compute %% nonces on the short forks with different-entropy nonce limiting chains. --ifdef(DEBUG). --define(NONCE_LIMITER_RESET_FREQUENCY, 5). --else. -ifndef(NONCE_LIMITER_RESET_FREQUENCY). -define(NONCE_LIMITER_RESET_FREQUENCY, (10 * 120)). -endif. --endif. %% The maximum number of one-step checkpoints the block header may include. -ifndef(NONCE_LIMITER_MAX_CHECKPOINTS_COUNT). diff --git a/apps/arweave/include/ar_vdf.hrl b/apps/arweave/include/ar_vdf.hrl index ee7ae11e5..39f968514 100755 --- a/apps/arweave/include/ar_vdf.hrl +++ b/apps/arweave/include/ar_vdf.hrl @@ -6,14 +6,16 @@ %% Typical ryzen 5900X iterations for 1 sec -define(VDF_SHA_1S, 15_000_000). +-ifndef(VDF_DIFFICULTY). + -define(VDF_DIFFICULTY, ?VDF_SHA_1S div ?VDF_CHECKPOINT_COUNT_IN_STEP). +-endif. + -ifdef(DEBUG). - -define(VDF_DIFFICULTY, 2). % NOTE. VDF_DIFFICULTY_RETARGET should be > 10 because it's > 10 in mainnet % So VDF difficulty should change slower than difficulty -define(VDF_DIFFICULTY_RETARGET, 20). -define(VDF_HISTORY_CUT, 2). -else. --define(VDF_DIFFICULTY, ?VDF_SHA_1S div ?VDF_CHECKPOINT_COUNT_IN_STEP). -ifndef(VDF_DIFFICULTY_RETARGET). -define(VDF_DIFFICULTY_RETARGET, 720). -endif. @@ -21,3 +23,5 @@ -define(VDF_HISTORY_CUT, 50). -endif. -endif. + + diff --git a/apps/arweave/src/ar.erl b/apps/arweave/src/ar.erl index d9d30f5ce..5f5cdc8d9 100644 --- a/apps/arweave/src/ar.erl +++ b/apps/arweave/src/ar.erl @@ -9,7 +9,7 @@ benchmark_packing/1, benchmark_packing/0, benchmark_vdf/0, benchmark_hash/1, benchmark_hash/0, start/0, start/1, start/2, stop/1, stop_dependencies/0, start_dependencies/0, - tests/0, tests/1, tests/2, shell/0, stop_shell/0, + tests/0, tests/1, tests/2, e2e/0, e2e/1, shell/0, stop_shell/0, docs/0, shutdown/1, console/1, console/2]). -include_lib("arweave/include/ar.hrl"). @@ -851,21 +851,25 @@ warn_if_single_scheduler() -> shell() -> Config = #config{ debug = true }, - start_for_tests(Config), - ar_test_node:boot_peers(). + start_for_tests(test,Config), + ar_test_node:boot_peers(test). stop_shell() -> - ar_test_node:stop_peers(), + ar_test_node:stop_peers(test), init:stop(). %% @doc Run all of the tests associated with the core project. tests() -> - tests([], #config{ debug = true }). + tests(test, [], #config{ debug = true }). -tests(Mods, Config) when is_list(Mods) -> +tests(Mod) -> + tests(test, Mod). + +tests(TestType, Mods, Config) when is_list(Mods) -> try - start_for_tests(Config), - ar_test_node:boot_peers() + start_for_tests(TestType, Config), + ar_test_node:boot_peers(TestType), + ar_test_node:wait_for_peers(TestType) catch Type:Reason -> io:format("Failed to start the peers due to ~p:~p~n", [Type, Reason]), @@ -875,7 +879,7 @@ tests(Mods, Config) when is_list(Mods) -> try eunit:test({timeout, ?TEST_TIMEOUT, [Mods]}, [verbose, {print_depth, 100}]) after - ar_test_node:stop_peers() + ar_test_node:stop_peers(TestType) end, case Result of ok -> ok; @@ -883,11 +887,11 @@ tests(Mods, Config) when is_list(Mods) -> end. -start_for_tests(Config) -> +start_for_tests(TestType, Config) -> UniqueName = ar_test_node:get_node_namespace(), TestConfig = Config#config{ peers = [], - data_dir = ".tmp/data_test_main_" ++ UniqueName, + data_dir = ".tmp/data_" ++ atom_to_list(TestType) ++ "_main_" ++ UniqueName, port = ar_test_node:get_unused_port(), disable = [randomx_jit], packing_rate = 20, @@ -897,8 +901,8 @@ start_for_tests(Config) -> %% @doc Run the tests for a set of module(s). %% Supports strings so that it can be trivially induced from a unix shell call. -tests(Mod) when not is_list(Mod) -> tests([Mod]); -tests(Args) -> +tests(TestType, Mod) when not is_list(Mod) -> tests(TestType, [Mod]); +tests(TestType, Args) -> Mods = lists:map( fun(Mod) when is_atom(Mod) -> Mod; @@ -906,7 +910,12 @@ tests(Args) -> end, Args ), - tests(Mods, #config{ debug = true }). + tests(TestType, Mods, #config{ debug = true }). + +e2e() -> + tests(e2e, [ar_sync_pack_mine_tests, ar_repack_mine_tests]). +e2e(Mod) -> + tests(e2e, Mod). %% @doc Generate the project documentation. docs() -> @@ -947,11 +956,11 @@ commandline_parser_test_() -> end}. -ifdef(DEBUG). -console(_) -> - ok. +console(Format) -> + ?LOG_INFO(io_lib:format(Format, [])). -console(_, _) -> - ok. +console(Format, Params) -> + ?LOG_INFO(io_lib:format(Format, Params)). -else. console(Format) -> io:format(Format). diff --git a/apps/arweave/src/ar_bench_hash.erl b/apps/arweave/src/ar_bench_hash.erl index 32f94e120..0abcd5fd0 100644 --- a/apps/arweave/src/ar_bench_hash.erl +++ b/apps/arweave/src/ar_bench_hash.erl @@ -52,10 +52,9 @@ run_benchmark(RandomXState, JIT, LargePages, HardwareAES) -> Iterations = 1000, {H0Time, _} = timer:tc(fun() -> lists:foreach( - fun(_) -> - PartitionNumber = rand:uniform(1000), + fun(I) -> Data = << NonceLimiterOutput:32/binary, - PartitionNumber:256, Seed:32/binary, MiningAddr/binary >>, + I:256, Seed:32/binary, MiningAddr/binary >>, ar_mine_randomx:hash(RandomXState, Data, JIT, LargePages, HardwareAES) end, lists:seq(1, Iterations)) diff --git a/apps/arweave/src/ar_chunk_storage.erl b/apps/arweave/src/ar_chunk_storage.erl index 45a26e3fc..49c211081 100644 --- a/apps/arweave/src/ar_chunk_storage.erl +++ b/apps/arweave/src/ar_chunk_storage.erl @@ -6,7 +6,8 @@ -export([start_link/2, put/2, put/3, open_files/1, get/1, get/2, get/5, read_chunk2/5, get_range/2, get_range/3, close_file/2, close_files/1, cut/2, delete/1, delete/2, - list_files/2, run_defragmentation/0]). + list_files/2, run_defragmentation/0, + get_storage_module_path/2, get_chunk_storage_path/2]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). @@ -183,6 +184,14 @@ run_defragmentation() -> ok = update_sizes_file(Files, #{}) end. +get_storage_module_path(DataDir, "default") -> + DataDir; +get_storage_module_path(DataDir, StoreID) -> + filename:join([DataDir, "storage_modules", StoreID]). + +get_chunk_storage_path(DataDir, StoreID) -> + filename:join([get_storage_module_path(DataDir, StoreID), ?CHUNK_DIR]). + %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== @@ -192,13 +201,7 @@ init({StoreID, RepackInPlacePacking}) -> process_flag(trap_exit, true), {ok, Config} = application:get_env(arweave, config), DataDir = Config#config.data_dir, - Dir = - case StoreID of - "default" -> - DataDir; - _ -> - filename:join([DataDir, "storage_modules", StoreID]) - end, + Dir = get_storage_module_path(DataDir, StoreID), ok = filelib:ensure_dir(Dir ++ "/"), ok = filelib:ensure_dir(filename:join(Dir, ?CHUNK_DIR) ++ "/"), FileIndex = read_file_index(Dir), @@ -430,12 +433,8 @@ store_repack_cursor(Cursor, StoreID, TargetPacking) -> get_filepath(Name, StoreID) -> {ok, Config} = application:get_env(arweave, config), DataDir = Config#config.data_dir, - case StoreID of - "default" -> - filename:join([DataDir, ?CHUNK_DIR, Name]); - _ -> - filename:join([DataDir, "storage_modules", StoreID, ?CHUNK_DIR, Name]) - end. + ChunkDir = get_chunk_storage_path(DataDir, StoreID), + filename:join([ChunkDir, Name]). handle_store_chunk(PaddedOffset, Chunk, FileIndex, StoreID) -> Key = get_key(PaddedOffset), @@ -658,13 +657,7 @@ sync_and_close_files([]) -> ok. list_files(DataDir, StoreID) -> - Dir = - case StoreID of - "default" -> - DataDir; - _ -> - filename:join([DataDir, "storage_modules", StoreID]) - end, + Dir = get_storage_module_path(DataDir, StoreID), ok = filelib:ensure_dir(Dir ++ "/"), ok = filelib:ensure_dir(filename:join(Dir, ?CHUNK_DIR) ++ "/"), StorageIndex = read_file_index(Dir), diff --git a/apps/arweave/src/ar_data_sync.erl b/apps/arweave/src/ar_data_sync.erl index 9d3a0e5c4..ac574d708 100644 --- a/apps/arweave/src/ar_data_sync.erl +++ b/apps/arweave/src/ar_data_sync.erl @@ -9,7 +9,8 @@ get_tx_offset/1, get_tx_offset_data_in_range/2, has_data_root/2, request_tx_data_removal/3, request_data_removal/4, record_disk_pool_chunks_count/0, record_chunk_cache_size_metric/0, is_chunk_cache_full/0, is_disk_space_sufficient/1, - get_chunk_by_byte/2, get_chunk_seek_offset/1, read_chunk/4, read_data_path/2, + get_chunk_by_byte/2, advance_chunks_index_cursor/1, get_chunk_seek_offset/1, + read_chunk/4, read_data_path/2, increment_chunk_cache_size/0, decrement_chunk_cache_size/0, get_chunk_padded_offset/1, get_chunk_metadata_range/3, get_merkle_rebase_threshold/0, should_store_in_chunk_storage/3]). @@ -28,7 +29,7 @@ -include_lib("arweave/include/ar_sync_buckets.hrl"). -ifdef(DEBUG). --define(COLLECT_SYNC_INTERVALS_FREQUENCY_MS, 5000). +-define(COLLECT_SYNC_INTERVALS_FREQUENCY_MS, 5_000). -else. -define(COLLECT_SYNC_INTERVALS_FREQUENCY_MS, 300_000). -endif. @@ -529,6 +530,16 @@ get_chunk_by_byte(ChunksIndex, Byte) -> {ok, Key, FullMetaData} end. +%% @doc: handle situation where get_chunks_by_byte returns invalid_iterator, so we can't +%% use the chunk's end offset to advance the cursor. +%% +%% get_chunk_by_byte looks for a key with the same prefix or the next prefix. +%% Therefore, if there is no such key, it does not make sense to look for any +%% key smaller than the prefix + 2 in the next iteration. +advance_chunks_index_cursor(Cursor) -> + PrefixSpaceSize = trunc(math:pow(2, ?OFFSET_KEY_BITSIZE - ?OFFSET_KEY_PREFIX_BITSIZE)), + ((Cursor div PrefixSpaceSize) + 2) * PrefixSpaceSize. + read_chunk(Offset, ChunkDataDB, ChunkDataKey, StoreID) -> case ar_kv:get(ChunkDataDB, ChunkDataKey) of not_found -> @@ -1270,12 +1281,7 @@ handle_cast({remove_range, End, Cursor, Ref, PID}, State) -> end, {noreply, State}; {error, invalid_iterator} -> - %% get_chunk_by_byte looks for a key with the same prefix or the next prefix. - %% Therefore, if there is no such key, it does not make sense to look for any - %% key smaller than the prefix + 2 in the next iteration. - PrefixSpaceSize = - trunc(math:pow(2, ?OFFSET_KEY_BITSIZE - ?OFFSET_KEY_PREFIX_BITSIZE)), - NextCursor = ((Cursor div PrefixSpaceSize) + 2) * PrefixSpaceSize, + NextCursor = advance_chunks_index_cursor(Cursor), gen_server:cast(self(), {remove_range, End, NextCursor, Ref, PID}), {noreply, State}; {error, Reason} -> diff --git a/apps/arweave/src/ar_doctor_bench.erl b/apps/arweave/src/ar_doctor_bench.erl index 717339a7e..a4f63d720 100644 --- a/apps/arweave/src/ar_doctor_bench.erl +++ b/apps/arweave/src/ar_doctor_bench.erl @@ -5,7 +5,7 @@ -include_lib("kernel/include/file.hrl"). -include_lib("arweave/include/ar.hrl"). -include_lib("arweave/include/ar_config.hrl"). --include_lib("arweave/include/ar_chunk_storage.hrl"). +-include_lib("arweave/include/ar_mining.hrl"). -include_lib("arweave/include/ar_consensus.hrl"). -define(NUM_ITERATIONS, 5). @@ -41,14 +41,19 @@ bench_read(Args) -> [DurationString, DataDir | StorageModuleConfigs] = Args, Duration = list_to_integer(DurationString), - StorageModules = parse_storage_modules(StorageModuleConfigs, []), - Config = #config{data_dir = DataDir, storage_modules = StorageModules}, + {StorageModules, Address} = parse_storage_modules(StorageModuleConfigs, [], undefined), + ar:console("Assuming mining address: ~p~n", [ar_util:safe_encode(Address)]), + Config = #config{ + data_dir = DataDir, + storage_modules = StorageModules, + mining_addr = Address}, application:set_env(arweave, config, Config), ar_kv_sup:start_link(), ar_storage_sup:start_link(), ar_sync_record_sup:start_link(), ar_chunk_storage_sup:start_link(), + ar_mining_io:start_link(standalone), ar:console("~n~nDisk read benchmark will run for ~B seconds.~n", [Duration]), ar:console("Data will be logged continuously to ~p in the format:~n", [?OUTPUT_FILENAME]), @@ -74,20 +79,30 @@ bench_read(Args) -> true. -parse_storage_modules([], StorageModules) -> - StorageModules; -parse_storage_modules([StorageModuleConfig | StorageModuleConfigs], StorageModules) -> +parse_storage_modules([], StorageModules, Address) -> + {StorageModules, Address}; +parse_storage_modules([StorageModuleConfig | StorageModuleConfigs], StorageModules, Address) -> {ok, StorageModule} = ar_config:parse_storage_module(StorageModuleConfig), - parse_storage_modules(StorageModuleConfigs, StorageModules ++ [StorageModule]). + Address2 = ar_storage_module:module_address(StorageModule), + case Address2 == Address orelse Address == undefined of + true -> + ok; + false -> + ar:console("Warning: multiple mining addresses specified in storage_modules:~n") + end, + parse_storage_modules( + StorageModuleConfigs, + StorageModules ++ [StorageModule], + Address2). read_storage_module(_DataDir, StorageModule, StopTime) -> StoreID = ar_storage_module:id(StorageModule), ar_chunk_storage:open_files(StoreID), - {StartOffset, EndOffset} = ar_storage_module:get_range(StoreID), + {StartOffset, EndOffset} = ar_storage_module:module_range(StorageModule), OutputFileName = string:replace(?OUTPUT_FILENAME, "", StoreID), - random_read(StoreID, StartOffset, EndOffset, StopTime, OutputFileName). + random_read(StorageModule, StartOffset, EndOffset, StopTime, OutputFileName). % random_chunk_pread(DataDir, StoreID), % random_dev_pread(DataDir, StoreID), @@ -96,13 +111,13 @@ read_storage_module(_DataDir, StorageModule, StopTime) -> % dd_devs_read(DataDir, StoreID), % dd_dev_read(DataDir, StoreID), -random_read(StoreID, StartOffset, EndOffset, StopTime, OutputFileName) -> - random_read(StoreID, StartOffset, EndOffset, StopTime, OutputFileName, 0, 0). -random_read(StoreID, StartOffset, EndOffset, StopTime, OutputFileName, SumChunks, SumElapsedTime) -> +random_read(StorageModule, StartOffset, EndOffset, StopTime, OutputFileName) -> + random_read(StorageModule, StartOffset, EndOffset, StopTime, OutputFileName, 0, 0). +random_read(StorageModule, StartOffset, EndOffset, StopTime, OutputFileName, SumChunks, SumElapsedTime) -> StartTime = erlang:monotonic_time(), case StartTime < StopTime of true -> - Chunks = read(StoreID, StartOffset, EndOffset, ?RECALL_RANGE_SIZE, ?NUM_FILES), + Chunks = read(StorageModule, StartOffset, EndOffset, ?RECALL_RANGE_SIZE, ?NUM_FILES), EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime - StartTime, native, millisecond), @@ -112,21 +127,37 @@ random_read(StoreID, StartOffset, EndOffset, StopTime, OutputFileName, SumChunks Line = io_lib:format("~B,~B,~B,~B~n", [ Timestamp, BytesRead, ElapsedTime, BytesRead * 1000 div ElapsedTime]), file:write_file(OutputFileName, Line, [append]), - random_read(StoreID, StartOffset, EndOffset, StopTime, OutputFileName, + random_read(StorageModule, StartOffset, EndOffset, StopTime, OutputFileName, SumChunks + Chunks, SumElapsedTime + ElapsedTime); false -> + StoreID = ar_storage_module:id(StorageModule), {StoreID, SumChunks, SumElapsedTime} end. -read(StoreID, StartOffset, EndOffset, Size, NumReads) -> - read(StoreID, StartOffset, EndOffset, Size, 0, NumReads). +read(StorageModule, StartOffset, EndOffset, Size, NumReads) -> + read(StorageModule, StartOffset, EndOffset, Size, 0, NumReads). -read(_StoreID, _StartOffset, _EndOffset, _Size, NumChunks, 0) -> +read(_StorageModule, _StartOffset, _EndOffset, _Size, NumChunks, 0) -> NumChunks; -read(StoreID, StartOffset, EndOffset, Size, NumChunks, NumReads) -> +read(StorageModule, StartOffset, EndOffset, Size, NumChunks, NumReads) -> Offset = rand:uniform(EndOffset - Size - StartOffset + 1) + StartOffset, - Chunks = ar_chunk_storage:get_range(Offset, Size, StoreID), - read(StoreID, StartOffset, EndOffset, Size, NumChunks + length(Chunks), NumReads - 1). + Candidate = #mining_candidate{ + mining_address = ar_storage_module:module_address(StorageModule), + packing_difficulty = ar_storage_module:module_packing_difficulty(StorageModule) + }, + RangeExists = ar_mining_io:read_recall_range(chunk1, self(), Candidate, Offset), + case RangeExists of + true -> + receive + {chunks_read, _WhichChunk, _Candidate, _RecallRangeStart, ChunkOffsets} -> + read(StorageModule, StartOffset, EndOffset, Size, + NumChunks + length(ChunkOffsets), NumReads - 1) + end; + false -> + %% Try again with a new random offset + read(StorageModule, StartOffset, EndOffset, Size, NumChunks, NumReads) + end. + %% XXX: the following functions are not used, but may be useful in the future to benchmark %% different read strategies. They can be deleted when they are no longer useful. diff --git a/apps/arweave/src/ar_http_iface_client.erl b/apps/arweave/src/ar_http_iface_client.erl index ddaf710f6..95bf51214 100644 --- a/apps/arweave/src/ar_http_iface_client.erl +++ b/apps/arweave/src/ar_http_iface_client.erl @@ -4,8 +4,9 @@ -module(ar_http_iface_client). +-export([send_tx_json/3, send_tx_json/4, send_tx_binary/3, send_tx_binary/4]). -export([send_block_json/3, send_block_binary/3, send_block_binary/4, - send_tx_json/3, send_tx_binary/3, send_block_announcement/2, + send_block_announcement/2, get_block/3, get_tx/2, get_txs/2, get_tx_from_remote_peer/2, get_tx_data/2, get_wallet_list_chunk/2, get_wallet_list_chunk/3, get_wallet_list/2, add_peer/1, get_info/1, get_info/2, get_peers/1, @@ -28,28 +29,83 @@ -include_lib("arweave/include/ar_mining.hrl"). -include_lib("arweave/include/ar_wallets.hrl"). -%% @doc Send a JSON-encoded transaction to the given Peer. +%%-------------------------------------------------------------------- +%% @doc Send a JSON-encoded transaction to the given Peer with default +%% parameters. +%% +%% == Examples == +%% +%% ``` +%% Host = {127,0,0,1}, +%% Port = 1984, +%% Peer = {Host, Port}, +%% TXID = <<0:256>>, +%% Bin = ar_serialize:tx_to_binary(#tx{}), +%% send_tx_json(Peer, TXID, Bin). +%% ''' +%% +%% @see send_tx_json/4 +%% @end +%%-------------------------------------------------------------------- send_tx_json(Peer, TXID, Bin) -> + send_tx_json(Peer, TXID, Bin, #{}). + +%%-------------------------------------------------------------------- +%% @doc Send a JSON-encoded transaction to the given Peer. +%% +%% == Examples == +%% +%% ``` +%% Host = {127,0,0,1}, +%% Port = 1984, +%% Peer = {Host, Port}, +%% TXID = <<0:256>>, +%% Bin = ar_serialize:tx_to_binary(#tx{}), +%% Opts = #{ connect_timeout => 5 +%% , timeout => 30 +%% }, +%% send_tx_json(Peer, TXID, Bin, Opts). +%% ''' +%% +%% @end +%%-------------------------------------------------------------------- +send_tx_json(Peer, TXID, Bin, Opts) -> + ConnectTimeout = maps:get(connect_timeout, Opts, 5), + Timeout = maps:get(timeout, Opts, 30), ar_http:req(#{ method => post, peer => Peer, path => "/tx", headers => add_header(<<"arweave-tx-id">>, ar_util:encode(TXID), p2p_headers()), body => Bin, - connect_timeout => 5000, - timeout => 30 * 1000 + connect_timeout => ConnectTimeout * 1000, + timeout => Timeout * 1000 }). -%% @doc Send a binary-encoded transaction to the given Peer. +%%-------------------------------------------------------------------- +%% @doc Send a binary-encoded transaction to the given Peer with +%% default parameters. +%% @see send_tx_binary/4 +%% @end +%%-------------------------------------------------------------------- send_tx_binary(Peer, TXID, Bin) -> + send_tx_binary(Peer, TXID, Bin, #{}). + +%%-------------------------------------------------------------------- +%% @doc Send a binary-encoded transaction to the given Peer. +%% @end +%%-------------------------------------------------------------------- +send_tx_binary(Peer, TXID, Bin, Opts) -> + ConnectTimeout = maps:get(connect_timeout, Opts, 5), + Timeout = maps:get(timeout, Opts, 30), ar_http:req(#{ method => post, peer => Peer, path => "/tx2", headers => add_header(<<"arweave-tx-id">>, ar_util:encode(TXID), p2p_headers()), body => Bin, - connect_timeout => 5000, - timeout => 30 * 1000 + connect_timeout => ConnectTimeout * 1000, + timeout => Timeout * 1000 }). %% @doc Announce a block to Peer. diff --git a/apps/arweave/src/ar_http_iface_middleware.erl b/apps/arweave/src/ar_http_iface_middleware.erl index 8af797a77..f75c88e65 100644 --- a/apps/arweave/src/ar_http_iface_middleware.erl +++ b/apps/arweave/src/ar_http_iface_middleware.erl @@ -1593,6 +1593,7 @@ handle_get_tx(Hash, Req, Encoding) -> {error, invalid} -> {400, #{}, <<"Invalid hash.">>, Req}; {ok, ID} -> + ok = ar_semaphore:acquire(get_tx, infinity), case ar_storage:read_tx(ID) of unavailable -> maybe_tx_is_pending_response(ID, Req); diff --git a/apps/arweave/src/ar_kv.erl b/apps/arweave/src/ar_kv.erl index 36cab53f8..d0842ae94 100644 --- a/apps/arweave/src/ar_kv.erl +++ b/apps/arweave/src/ar_kv.erl @@ -80,7 +80,7 @@ create_ets() -> ets:new(?MODULE, [set, public, named_table, {keypos, #db.name}]). - + %% @doc Open a key-value store located at the given filesystem path relative to %% the data directory and identified by the given Name. open(DataDirRelativePath, Name) -> diff --git a/apps/arweave/src/ar_metrics.erl b/apps/arweave/src/ar_metrics.erl index eb15be715..f4c91baa6 100644 --- a/apps/arweave/src/ar_metrics.erl +++ b/apps/arweave/src/ar_metrics.erl @@ -158,10 +158,11 @@ register() -> ]), prometheus_gauge:new([ {name, v2_index_data_size_by_packing}, - {labels, [store_id, packing, partition_number, storage_module_size, storage_module_index]}, + {labels, [store_id, packing, partition_number, storage_module_size, storage_module_index, + packing_difficulty]}, {help, "The size (in bytes) of the data stored and indexed. Grouped by the " "store ID, packing, partition number, storage module size, " - "and storage module index."} + "storage module index, and packing difficulty."} ]), %% Disk pool. diff --git a/apps/arweave/src/ar_mine_randomx.erl b/apps/arweave/src/ar_mine_randomx.erl index 8811d2eb4..98ba92567 100755 --- a/apps/arweave/src/ar_mine_randomx.erl +++ b/apps/arweave/src/ar_mine_randomx.erl @@ -6,7 +6,7 @@ randomx_decrypt_sub_chunk/5, randomx_reencrypt_chunk/7]). -%% These exports are required for the DEBUG mode, where these functions are unused. +%% These exports are required for the STUB mode, where these functions are unused. %% Also, some of these functions are used in ar_mine_randomx_tests. -export([jit/0, large_pages/0, hardware_aes/0, init_fast2/5, init_light2/4]). @@ -18,11 +18,11 @@ %%% Public interface. %%%=================================================================== --ifdef(DEBUG). +-ifdef(STUB_RANDOMX). init_fast(RxMode, Key, _Threads) -> - {RxMode, {debug_state, Key}}. + {RxMode, {stub_state, Key}}. init_light(RxMode, Key) -> - {RxMode, {debug_state, Key}}. + {RxMode, {stub_state, Key}}. -else. init_fast(RxMode, Key, Threads) -> init_fast2(RxMode, Key, jit(), large_pages(), Threads). @@ -174,14 +174,14 @@ info2(_) -> %% ------------------------------------------------------------------------------------------- %% hash2 and randomx_[encrypt|decrypt|reencrypt]_chunk2 -%% DEBUG implementation, used in tests, is called when State is {debug_state, Key} +%% STUB implementation, used in tests, is called when State is {stub_state, Key} %% Otherwise, NIF implementation is used %% We set it up this way so that we can have some tests trigger the NIF implementation %% ------------------------------------------------------------------------------------------- -%% DEBUG implementation -hash2({_, {debug_state, Key}}, Data, _JIT, _LargePages, _HardwareAES) -> +%% STUB implementation +hash2({_, {stub_state, Key}}, Data, _JIT, _LargePages, _HardwareAES) -> crypto:hash(sha256, << Key/binary, Data/binary >>); -%% Non-DEBUG implementation +%% Non-STUB implementation hash2({rx512, State}, Data, JIT, LargePages, HardwareAES) -> {ok, Hash} = ar_rx512_nif:rx512_hash_nif(State, Data, JIT, LargePages, HardwareAES), Hash; @@ -191,8 +191,8 @@ hash2({rx4096, State}, Data, JIT, LargePages, HardwareAES) -> hash2(_BadState, _Data, _JIT, _LargePages, _HardwareAES) -> {error, invalid_randomx_mode}. -%% DEBUG implementation -randomx_decrypt_chunk2({_, {debug_state, _}}, Key, Chunk, _ChunkSize, +%% STUB implementation +randomx_decrypt_chunk2({_, {stub_state, _}}, Key, Chunk, _ChunkSize, {composite, _, PackingDifficulty} = _Packing) -> Options = [{encrypt, false}], IV = binary:part(Key, {0, 16}), @@ -209,11 +209,11 @@ randomx_decrypt_chunk2({_, {debug_state, _}}, Key, Chunk, _ChunkSize, ) end, SubChunks))}; -randomx_decrypt_chunk2({_, {debug_state, _}}, Key, Chunk, _ChunkSize, _Packing) -> +randomx_decrypt_chunk2({_, {stub_state, _}}, Key, Chunk, _ChunkSize, _Packing) -> Options = [{encrypt, false}], IV = binary:part(Key, {0, 16}), {ok, crypto:crypto_one_time(aes_256_cbc, Key, IV, Chunk, Options)}; -%% Non-DEBUG implementation +%% Non-STUB implementation randomx_decrypt_chunk2({rx512, RandomxState}, Key, Chunk, ChunkSize, spora_2_5) -> ar_rx512_nif:rx512_decrypt_chunk_nif(RandomxState, Key, Chunk, ChunkSize, ?RANDOMX_PACKING_ROUNDS, jit(), large_pages(), hardware_aes()); @@ -228,8 +228,8 @@ randomx_decrypt_chunk2({rx4096, RandomxState}, Key, Chunk, ChunkSize, randomx_decrypt_chunk2(_BadState, _Key, _Chunk, _ChunkSize, _Packing) -> {error, invalid_randomx_mode}. -%% DEBUG implementation -randomx_decrypt_sub_chunk2(Packing, {_, {debug_state, _}}, Key, Chunk, SubChunkStartOffset) -> +%% STUB implementation +randomx_decrypt_sub_chunk2(Packing, {_, {stub_state, _}}, Key, Chunk, SubChunkStartOffset) -> {_, _, Iterations} = Packing, Options = [{encrypt, false}], Key2 = crypto:hash(sha256, << Key/binary, SubChunkStartOffset:24 >>), @@ -237,7 +237,7 @@ randomx_decrypt_sub_chunk2(Packing, {_, {debug_state, _}}, Key, Chunk, SubChunkS {ok, lists:foldl(fun(_, Acc) -> crypto:crypto_one_time(aes_256_cbc, Key2, IV, Acc, Options) end, Chunk, lists:seq(1, Iterations))}; -%% Non-DEBUG implementation +%% Non-STUB implementation randomx_decrypt_sub_chunk2(Packing, {rx4096, RandomxState}, Key, Chunk, SubChunkStartOffset) -> {_, _, IterationCount} = Packing, RoundCount = ?COMPOSITE_PACKING_ROUND_COUNT, @@ -247,8 +247,8 @@ randomx_decrypt_sub_chunk2(Packing, {rx4096, RandomxState}, Key, Chunk, SubChunk randomx_decrypt_sub_chunk2(_Packing, _BadState, _Key, _Chunk, _SubChunkStartOffset) -> {error, invalid_randomx_mode}. -%% DEBUG implementation -randomx_encrypt_chunk2({composite, _, PackingDifficulty} = _Packing, {_, {debug_state, _}}, Key, Chunk) -> +%% STUB implementation +randomx_encrypt_chunk2({composite, _, PackingDifficulty} = _Packing, {_, {stub_state, _}}, Key, Chunk) -> Options = [{encrypt, true}, {padding, zero}], IV = binary:part(Key, {0, 16}), SubChunks = split_into_sub_chunks(ar_packing_server:pad_chunk(Chunk)), @@ -264,12 +264,12 @@ randomx_encrypt_chunk2({composite, _, PackingDifficulty} = _Packing, {_, {debug_ ) end, SubChunks))}; -randomx_encrypt_chunk2(_Packing, {_, {debug_state, _}}, Key, Chunk) -> +randomx_encrypt_chunk2(_Packing, {_, {stub_state, _}}, Key, Chunk) -> Options = [{encrypt, true}, {padding, zero}], IV = binary:part(Key, {0, 16}), {ok, crypto:crypto_one_time(aes_256_cbc, Key, IV, ar_packing_server:pad_chunk(Chunk), Options)}; -%% Non-DEBUG implementation +%% Non-STUB implementation randomx_encrypt_chunk2(spora_2_5, {rx512, RandomxState}, Key, Chunk) -> ar_rx512_nif:rx512_encrypt_chunk_nif(RandomxState, Key, Chunk, ?RANDOMX_PACKING_ROUNDS, jit(), large_pages(), hardware_aes()); @@ -283,16 +283,16 @@ randomx_encrypt_chunk2({composite, _Addr, PackingDifficulty}, {rx4096, RandomxSt randomx_encrypt_chunk2(_Packing, _BadState, _Key, _Chunk) -> {error, invalid_randomx_mode}. -%% DEBUG implementation +%% STUB implementation randomx_reencrypt_chunk2(SourcePacking, TargetPacking, - {_, {debug_state, _}} = State, UnpackKey, PackKey, Chunk, ChunkSize) -> + {_, {stub_state, _}} = State, UnpackKey, PackKey, Chunk, ChunkSize) -> case randomx_decrypt_chunk(SourcePacking, State, UnpackKey, Chunk, ChunkSize) of {ok, UnpackedChunk} -> {ok, RepackedChunk} = randomx_encrypt_chunk2(TargetPacking, State, PackKey, ar_packing_server:pad_chunk(UnpackedChunk)), case {SourcePacking, TargetPacking} of {{composite, Addr, _}, {composite, Addr, _}} -> - %% See the same function defined for the no-DEBUG mode. + %% See the same function defined for the non-STUB mode. {ok, RepackedChunk, none}; _ -> {ok, RepackedChunk, UnpackedChunk} @@ -300,7 +300,7 @@ randomx_reencrypt_chunk2(SourcePacking, TargetPacking, Error -> Error end; -%% Non-DEBUG implementation +%% Non-STUB implementation randomx_reencrypt_chunk2({composite, Addr1, PackingDifficulty1}, {composite, Addr2, PackingDifficulty2}, {rx4096, RandomxState}, UnpackKey, PackKey, Chunk, ChunkSize) -> diff --git a/apps/arweave/src/ar_mining_io.erl b/apps/arweave/src/ar_mining_io.erl index e46ea1e6a..bc3da42cb 100644 --- a/apps/arweave/src/ar_mining_io.erl +++ b/apps/arweave/src/ar_mining_io.erl @@ -2,8 +2,9 @@ -behaviour(gen_server). --export([start_link/0, set_largest_seen_upper_bound/1, - get_partitions/0, get_partitions/1, read_recall_range/4, garbage_collect/0]). +-export([start_link/0, start_link/1, set_largest_seen_upper_bound/1, + get_packing/0, get_partitions/0, get_partitions/1, read_recall_range/4, + garbage_collect/0]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). @@ -16,9 +17,12 @@ -define(CACHE_TTL_MS, 2000). -record(state, { + mode = miner, partition_upper_bound = 0, io_threads = #{}, - io_thread_monitor_refs = #{} + io_thread_monitor_refs = #{}, + store_id_to_device = #{}, + partition_to_store_ids = #{} }). %%%=================================================================== @@ -27,7 +31,10 @@ %% @doc Start the gen_server. start_link() -> - gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + start_link(miner). + +start_link(Mode) -> + gen_server:start_link({local, ?MODULE}, ?MODULE, Mode, []). set_largest_seen_upper_bound(PartitionUpperBound) -> gen_server:call(?MODULE, {set_largest_seen_upper_bound, PartitionUpperBound}, 60000). @@ -39,24 +46,45 @@ read_recall_range(WhichChunk, Worker, Candidate, RecallRangeStart) -> gen_server:call(?MODULE, {read_recall_range, WhichChunk, Worker, Candidate, RecallRangeStart}, 60000). +get_packing() -> + {ok, Config} = application:get_env(arweave, config), + %% ar_config:validate_storage_modules/1 ensures that we only mine against a single + %% packing format. So we can grab it any partition. + case Config#config.storage_modules of + [] -> undefined; + [{_, _, Packing} | _Rest] -> Packing + end. + get_partitions(PartitionUpperBound) when PartitionUpperBound =< 0 -> []; get_partitions(PartitionUpperBound) -> + {ok, Config} = application:get_env(arweave, config), Max = ar_node:get_max_partition_number(PartitionUpperBound), - lists:sort(sets:to_list( - lists:foldl( - fun({Partition, MiningAddress, PackingDifficulty, _StoreID}, Acc) -> - case Partition > Max of - true -> - Acc; - _ -> - sets:add_element({Partition, MiningAddress, PackingDifficulty}, Acc) - end - end, - sets:new(), %% Ensure only one entry per partition (i.e. collapse storage modules) - get_io_channels() - )) - ). + AllPartitions = lists:foldl( + fun (Module, Acc) -> + Addr = ar_storage_module:module_address(Module), + PackingDifficulty = + ar_storage_module:module_packing_difficulty(Module), + {Start, End} = ar_storage_module:module_range(Module, 0), + Partitions = get_store_id_partitions({Start, End}, []), + lists:foldl( + fun(PartitionNumber, AccInner) -> + sets:add_element({PartitionNumber, Addr, PackingDifficulty}, AccInner) + end, + Acc, + Partitions + ) + end, + sets:new(), + Config#config.storage_modules + ), + FilteredPartitions = sets:filter( + fun ({PartitionNumber, Addr, _PackingDifficulty}) -> + PartitionNumber =< Max andalso Addr == Config#config.mining_addr + end, + AllPartitions + ), + lists:sort(sets:to_list(FilteredPartitions)). garbage_collect() -> gen_server:cast(?MODULE, garbage_collect). @@ -65,17 +93,8 @@ garbage_collect() -> %%% Generic server callbacks. %%%=================================================================== -init([]) -> - State = - lists:foldl( - fun ({PartitionNumber, MiningAddress, PackingDifficulty, StoreID}, Acc) -> - start_io_thread(PartitionNumber, MiningAddress, - PackingDifficulty, StoreID, Acc) - end, - #state{}, - get_io_channels() - ), - {ok, State}. +init(Mode) -> + {ok, start_io_threads(#state{ mode = Mode })}. handle_call({set_largest_seen_upper_bound, PartitionUpperBound}, _From, State) -> #state{ partition_upper_bound = CurrentUpperBound } = State, @@ -89,18 +108,15 @@ handle_call({set_largest_seen_upper_bound, PartitionUpperBound}, _From, State) - handle_call(get_partitions, _From, #state{ partition_upper_bound = PartitionUpperBound } = State) -> {reply, get_partitions(PartitionUpperBound), State}; -handle_call({read_recall_range, WhichChunk, Worker, Candidate, RecallRangeStart}, _From, - #state{ io_threads = IOThreads } = State) -> - #mining_candidate{ mining_address = MiningAddress, - packing_difficulty = PackingDifficulty } = Candidate, - PartitionNumber = ar_node:get_partition_number(RecallRangeStart), +handle_call({read_recall_range, WhichChunk, Worker, Candidate, RecallRangeStart}, + _From, State) -> + #mining_candidate{ packing_difficulty = PackingDifficulty } = Candidate, RangeEnd = RecallRangeStart + ar_block:get_recall_range_size(PackingDifficulty), - ThreadFound = case find_thread(PartitionNumber, MiningAddress, PackingDifficulty, - RangeEnd, RecallRangeStart, IOThreads) of + ThreadFound = case find_thread(RecallRangeStart, RangeEnd, State) of not_found -> false; - Thread -> - Thread ! {WhichChunk, {Worker, Candidate, RecallRangeStart}}, + {Thread, StoreID} -> + Thread ! {WhichChunk, {Worker, Candidate, RecallRangeStart, StoreID}}, true end, {reply, ThreadFound, State}; @@ -159,103 +175,147 @@ terminate(_Reason, _State) -> %%% Private functions. %%%=================================================================== -%% @doc Returns tuples {PartitionNumber, MiningAddress, PackingDifficulty, StoreID} covering -%% all attached storage modules (excluding the "default" storage module). -%% The assumption is that each IO channel represents a distinct 200MiB/s read channel to -%% which we will (later) assign an IO thread. -get_io_channels() -> +get_system_device(StorageModule) -> {ok, Config} = application:get_env(arweave, config), - MiningAddress = Config#config.mining_addr, - - %% First get the start/end ranges for all storage modules configured for the mining address. - StorageModules = - lists:foldl( - fun ({BucketSize, Bucket, {spora_2_6, Addr}} = M, Acc) when Addr == MiningAddress -> - Start = Bucket * BucketSize, - End = (Bucket + 1) * BucketSize, - StoreID = ar_storage_module:id(M), - [{Start, End, MiningAddress, 0, StoreID} | Acc]; - ({BucketSize, Bucket, {composite, Addr, PackingDifficulty}} = M, Acc) - when Addr == MiningAddress -> - Start = Bucket * BucketSize, - End = (Bucket + 1) * BucketSize, - StoreID = ar_storage_module:id(M), - [{Start, End, MiningAddress, PackingDifficulty, StoreID} | Acc]; - (_Module, Acc) -> - Acc - end, - [], - Config#config.storage_modules - ), - - %% And then map those storage modules to partitions. - get_io_channels(StorageModules, []). - -get_io_channels([], Channels) -> - Channels; -get_io_channels([{Start, End, _MiningAddress, _PackingDifficulty, _StoreID} | StorageModules], - Channels) when Start >= End -> - get_io_channels(StorageModules, Channels); -get_io_channels([{Start, End, MiningAddress, PackingDifficulty, StoreID} | StorageModules], - Channels) -> - PartitionNumber = ar_node:get_partition_number(Start), - Channels2 = [{PartitionNumber, MiningAddress, PackingDifficulty, StoreID} | Channels], - StorageModules2 = [{Start + ?PARTITION_SIZE, - End, MiningAddress, PackingDifficulty, StoreID} | StorageModules], - get_io_channels(StorageModules2, Channels2). - -start_io_thread(PartitionNumber, MiningAddress, PackingDifficulty, StoreID, - #state{ io_threads = Threads } = State) - when is_map_key({PartitionNumber, MiningAddress, PackingDifficulty, StoreID}, Threads) -> - State; -start_io_thread(PartitionNumber, MiningAddress, PackingDifficulty, StoreID, - #state{ io_threads = Threads, io_thread_monitor_refs = Refs } = State) -> - Now = os:system_time(millisecond), - Thread = - spawn( - fun() -> - case StoreID of - "default" -> - ok; - _ -> - ar_chunk_storage:open_files(StoreID) + StoreID = ar_storage_module:id(StorageModule), + Path = ar_chunk_storage:get_chunk_storage_path(Config#config.data_dir, StoreID), + Command = "df -P " ++ Path ++ " | awk 'NR==2 {print $1}'", + Device = os:cmd(Command), + TrimmedDevice = string:trim(Device), + case TrimmedDevice of + "" -> StoreID; % If the command fails or returns an empty string, return StoreID + _ -> TrimmedDevice + end. + +start_io_threads(State) -> + #state{ mode = Mode } = State, + + % Step 1: Group StoreIDs by their system device + DeviceToStoreIDs = map_device_to_store_ids(), + + % Step 2: Start IO threads for each device and populate map indices + maps:fold( + fun(Device, StoreIDs, StateAcc) -> + #state{ io_threads = Threads, io_thread_monitor_refs = Refs, + store_id_to_device = StoreIDToDevice, + partition_to_store_ids = PartitionToStoreIDs } = StateAcc, + + Thread = start_io_thread(Mode, StoreIDs), + ThreadRef = monitor(process, Thread), + + StoreIDToDevice2 = lists:foldl( + fun(StoreID, Acc) -> + maps:put(StoreID, Device, Acc) end, - io_thread(PartitionNumber, MiningAddress, PackingDifficulty, StoreID, #{}, Now) + StoreIDToDevice, StoreIDs), + + PartitionToStoreIDs2 = map_partition_to_store_ids(StoreIDs, PartitionToStoreIDs), + StateAcc#state{ + io_threads = maps:put(Device, Thread, Threads), + io_thread_monitor_refs = maps:put(ThreadRef, Device, Refs), + store_id_to_device = StoreIDToDevice2, + partition_to_store_ids = PartitionToStoreIDs2 + } + end, + State, + DeviceToStoreIDs + ). + +start_io_thread(Mode, StoreIDs) -> + Now = os:system_time(millisecond), + spawn( + fun() -> + open_files(StoreIDs), + io_thread(Mode, #{}, Now) + end + ). + +map_partition_to_store_ids([], PartitionToStoreIDs) -> + PartitionToStoreIDs; +map_partition_to_store_ids([StoreID | StoreIDs], PartitionToStoreIDs) -> + StorageModule = ar_storage_module:get_by_id(StoreID), + {Start, End} = ar_storage_module:module_range(StorageModule, 0), + Partitions = get_store_id_partitions({Start, End}, []), + PartitionToStoreIDs2 = lists:foldl( + fun(Partition, Acc) -> + maps:update_with(Partition, + fun(PartitionStoreIDs) -> [StoreID | PartitionStoreIDs] end, + [StoreID], Acc) + end, + PartitionToStoreIDs, Partitions), + map_partition_to_store_ids(StoreIDs, PartitionToStoreIDs2). + +map_device_to_store_ids() -> + {ok, Config} = application:get_env(arweave, config), + lists:foldl( + fun(Module, Acc) -> + StoreID = ar_storage_module:id(Module), + Device = get_system_device(Module), + maps:update_with(Device, fun(StoreIDs) -> [StoreID | StoreIDs] end, [StoreID], Acc) + end, + #{}, + Config#config.storage_modules + ). + +get_store_ids_for_device(Device, #state{store_id_to_device = StoreIDToDevice}) -> + maps:fold( + fun(StoreID, MappedDevice, Acc) -> + case MappedDevice == Device of + true -> [StoreID | Acc]; + false -> Acc + end + end, + [], + StoreIDToDevice + ). + +get_store_id_partitions({Start, End}, Partitions) when Start >= End -> + Partitions; +get_store_id_partitions({Start, End}, Partitions) -> + PartitionNumber = ar_node:get_partition_number(Start), + get_store_id_partitions({Start + ?PARTITION_SIZE, End}, [PartitionNumber | Partitions]). + +open_files(StoreIDs) -> + lists:foreach( + fun(StoreID) -> + case StoreID of + "default" -> + ok; + _ -> + ar_chunk_storage:open_files(StoreID) end - ), - Ref = monitor(process, Thread), - Key = {PartitionNumber, MiningAddress, PackingDifficulty, StoreID}, - Threads2 = maps:put(Key, Thread, Threads), - Refs2 = maps:put(Ref, Key, Refs), - ?LOG_DEBUG([{event, started_io_mining_thread}, - {partition_number, PartitionNumber}, - {mining_addr, ar_util:safe_encode(MiningAddress)}, - {packing_difficulty, PackingDifficulty}, - {store_id, StoreID}]), - State#state{ io_threads = Threads2, io_thread_monitor_refs = Refs2 }. + end, + StoreIDs). handle_io_thread_down(Ref, Reason, - #state{ io_threads = Threads, io_thread_monitor_refs = Refs } = State) -> + #state{ mode = Mode, io_threads = Threads, io_thread_monitor_refs = Refs } = State) -> ?LOG_WARNING([{event, mining_io_thread_down}, {reason, io_lib:format("~p", [Reason])}]), - ThreadID = {PartitionNumber, MiningAddress, PackingDifficulty, - StoreID} = maps:get(Ref, Refs), + Device = maps:get(Ref, Refs), Refs2 = maps:remove(Ref, Refs), - Threads2 = maps:remove(ThreadID, Threads), - start_io_thread(PartitionNumber, MiningAddress, PackingDifficulty, StoreID, - State#state{ io_threads = Threads2, io_thread_monitor_refs = Refs2 }). + Threads2 = maps:remove(Device, Threads), + + StoreIDs = get_store_ids_for_device(Device, State), + Thread = start_io_thread(Mode, StoreIDs), + ThreadRef = monitor(process, Thread), + State#state{ io_threads = maps:put(Device, Thread, Threads2), + io_thread_monitor_refs = maps:put(ThreadRef, Device, Refs2) }. -io_thread(PartitionNumber, MiningAddress, PackingDifficulty, StoreID, Cache, LastClearTime) -> +io_thread(Mode, Cache, LastClearTime) -> receive - {WhichChunk, {Worker, Candidate, RecallRangeStart}} -> + {WhichChunk, {Worker, Candidate, RecallRangeStart, StoreID}} -> {ChunkOffsets, Cache2} = - get_chunks(WhichChunk, Candidate, RecallRangeStart, StoreID, Cache), - ar_mining_worker:chunks_read( - Worker, WhichChunk, Candidate, RecallRangeStart, ChunkOffsets), + get_chunks(Mode, WhichChunk, Candidate, RecallRangeStart, StoreID, Cache), + chunks_read(Mode, Worker, WhichChunk, Candidate, RecallRangeStart, ChunkOffsets), {Cache3, LastClearTime2} = maybe_clear_cached_chunks(Cache2, LastClearTime), - io_thread(PartitionNumber, MiningAddress, PackingDifficulty, StoreID, - Cache3, LastClearTime2) + io_thread(Mode, Cache3, LastClearTime2) end. +chunks_read(miner, Worker, WhichChunk, Candidate, RecallRangeStart, ChunkOffsets) -> + ar_mining_worker:chunks_read( + Worker, WhichChunk, Candidate, RecallRangeStart, ChunkOffsets); +chunks_read(standalone, Worker, WhichChunk, Candidate, RecallRangeStart, ChunkOffsets) -> + Worker ! {chunks_read, WhichChunk, Candidate, RecallRangeStart, ChunkOffsets}. + get_packed_intervals(Start, End, MiningAddress, PackingDifficulty, "default", Intervals) -> Packing = ar_block:get_packing(PackingDifficulty, MiningAddress), case ar_sync_record:get_next_synced_interval(Start, End, Packing, ar_data_sync, "default") of @@ -297,20 +357,20 @@ maybe_clear_cached_chunks(Cache, LastClearTime) -> %% %% However if the request is from our local miner there's no need to cache since the H1 %% batch is always handled all at once. -get_chunks(WhichChunk, Candidate, RangeStart, StoreID, Cache) -> +get_chunks(Mode, WhichChunk, Candidate, RangeStart, StoreID, Cache) -> case Candidate#mining_candidate.cm_lead_peer of not_set -> - ChunkOffsets = read_range(WhichChunk, Candidate, RangeStart, StoreID), + ChunkOffsets = read_range(Mode, WhichChunk, Candidate, RangeStart, StoreID), {ChunkOffsets, Cache}; _ -> - cached_read_range(WhichChunk, Candidate, RangeStart, StoreID, Cache) + cached_read_range(Mode, WhichChunk, Candidate, RangeStart, StoreID, Cache) end. -cached_read_range(WhichChunk, Candidate, RangeStart, StoreID, Cache) -> +cached_read_range(Mode, WhichChunk, Candidate, RangeStart, StoreID, Cache) -> Now = os:system_time(millisecond), case maps:get(RangeStart, Cache, not_found) of not_found -> - ChunkOffsets = read_range(WhichChunk, Candidate, RangeStart, StoreID), + ChunkOffsets = read_range(Mode, WhichChunk, Candidate, RangeStart, StoreID), Cache2 = maps:put(RangeStart, {Now, ChunkOffsets}, Cache), {ChunkOffsets, Cache2}; {_CachedTime, ChunkOffsets} -> @@ -326,7 +386,7 @@ cached_read_range(WhichChunk, Candidate, RangeStart, StoreID, Cache) -> {ChunkOffsets, Cache} end. -read_range(WhichChunk, Candidate, RangeStart, StoreID) -> +read_range(Mode, WhichChunk, Candidate, RangeStart, StoreID) -> StartTime = erlang:monotonic_time(), #mining_candidate{ mining_address = MiningAddress, packing_difficulty = PackingDifficulty } = Candidate, @@ -335,7 +395,7 @@ read_range(WhichChunk, Candidate, RangeStart, StoreID) -> MiningAddress, PackingDifficulty, StoreID, ar_intervals:new()), ChunkOffsets = ar_chunk_storage:get_range(RangeStart, RecallRangeSize, StoreID), ChunkOffsets2 = filter_by_packing(ChunkOffsets, Intervals, StoreID), - log_read_range(Candidate, WhichChunk, length(ChunkOffsets), StartTime), + log_read_range(Mode, Candidate, WhichChunk, length(ChunkOffsets), StartTime), ChunkOffsets2. filter_by_packing([], _Intervals, _StoreID) -> @@ -350,7 +410,9 @@ filter_by_packing([{EndOffset, Chunk} | ChunkOffsets], Intervals, "default" = St filter_by_packing(ChunkOffsets, _Intervals, _StoreID) -> ChunkOffsets. -log_read_range(Candidate, WhichChunk, FoundChunks, StartTime) -> +log_read_range(standalone, _Candidate, _WhichChunk, _FoundChunks, _StartTime) -> + ok; +log_read_range(_Mode, Candidate, WhichChunk, FoundChunks, StartTime) -> EndTime = erlang:monotonic_time(), ElapsedTime = erlang:convert_time_unit(EndTime-StartTime, native, millisecond), ReadRate = case ElapsedTime > 0 of @@ -375,35 +437,28 @@ log_read_range(Candidate, WhichChunk, FoundChunks, StartTime) -> % {partition_number, PartitionNumber}]), ok. -find_thread(PartitionNumber, MiningAddress, PackingDifficulty, RangeEnd, RangeStart, Threads) -> - Keys = find_thread2(PartitionNumber, MiningAddress, PackingDifficulty, - maps:iterator(Threads)), - case find_thread3(Keys, RangeEnd, RangeStart, 0, not_found) of +find_thread(RangeStart, RangeEnd, State) -> + PartitionNumber = ar_node:get_partition_number(RangeStart), + StoreIDs = maps:get(PartitionNumber, State#state.partition_to_store_ids, not_found), + StoreID = find_largest_intersection(StoreIDs, RangeStart, RangeEnd, 0, not_found), + Device = maps:get(StoreID, State#state.store_id_to_device, not_found), + Thread = maps:get(Device, State#state.io_threads, not_found), + case Thread of not_found -> not_found; - Key -> - maps:get(Key, Threads) - end. - -find_thread2(PartitionNumber, MiningAddress, PackingDifficulty, Iterator) -> - case maps:next(Iterator) of - none -> - []; - {{PartitionNumber, MiningAddress, PackingDifficulty, _StoreID} = Key, - _Thread, Iterator2} -> - [Key | find_thread2(PartitionNumber, MiningAddress, PackingDifficulty, Iterator2)]; - {_Key, _Thread, Iterator2} -> - find_thread2(PartitionNumber, MiningAddress, PackingDifficulty, Iterator2) + _ -> + {Thread, StoreID} end. -find_thread3([Key | Keys], RangeEnd, RangeStart, Max, MaxKey) -> - {_PartitionNumber, _MiningAddress, _PackingDifficulty, StoreID} = Key, +find_largest_intersection(not_found, _RangeStart, _RangeEnd, _Max, _MaxKey) -> + not_found; +find_largest_intersection([StoreID | StoreIDs], RangeStart, RangeEnd, Max, MaxKey) -> I = ar_sync_record:get_intersection_size(RangeEnd, RangeStart, ar_chunk_storage, StoreID), case I > Max of true -> - find_thread3(Keys, RangeEnd, RangeStart, I, Key); + find_largest_intersection(StoreIDs, RangeStart, RangeEnd, I, StoreID); false -> - find_thread3(Keys, RangeEnd, RangeStart, Max, MaxKey) + find_largest_intersection(StoreIDs, RangeStart, RangeEnd, Max, MaxKey) end; -find_thread3([], _RangeEnd, _RangeStart, _Max, MaxKey) -> +find_largest_intersection([], _RangeStart, _RangeEnd, _Max, MaxKey) -> MaxKey. diff --git a/apps/arweave/src/ar_mining_server.erl b/apps/arweave/src/ar_mining_server.erl index c89006ecb..b892dd96c 100644 --- a/apps/arweave/src/ar_mining_server.erl +++ b/apps/arweave/src/ar_mining_server.erl @@ -7,7 +7,8 @@ start_mining/1, set_difficulty/1, set_merkle_rebase_threshold/1, set_height/1, compute_h2_for_peer/1, prepare_and_post_solution/1, prepare_poa/3, get_recall_bytes/5, active_sessions/0, encode_sessions/1, add_pool_job/6, - is_one_chunk_solution/1, fetch_poa_from_peers/2, log_prepare_solution_failure/3]). + is_one_chunk_solution/1, fetch_poa_from_peers/2, log_prepare_solution_failure/3, + get_packing_difficulty/1]). -export([pause/0]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). @@ -115,6 +116,13 @@ log_prepare_solution_failure(Solution, FailureReason, AdditionalLogData) -> {solution_hash, ar_util:safe_encode(SolutionH)}, {packing_difficulty, PackingDifficulty} | AdditionalLogData]). +-spec get_packing_difficulty(Packing :: ar_storage_module:packing()) -> + PackingDifficulty :: non_neg_integer(). +get_packing_difficulty({composite, _, Difficulty}) -> + Difficulty; +get_packing_difficulty(_) -> + 0. + %%%=================================================================== %%% Generic server callbacks. %%%=================================================================== @@ -126,13 +134,9 @@ init([]) -> ar_chunk_storage:open_files("default"), Partitions = ar_mining_io:get_partitions(infinity), + Packing = ar_mining_io:get_packing(), + PackingDifficulty = get_packing_difficulty(Packing), - %% ar_config:validate_storage_modules/1 ensures that we only mine against a single - %% packing format. So we can grab the packing difficulty from any partition. - {MiningAddr, PackingDifficulty} = case Partitions of - [{_Partition, Addr, Difficulty} | _Rest] -> {Addr, Difficulty}; - [] -> {undefined, 0} - end, Workers = lists:foldl( fun({Partition, _Addr, Difficulty}, Acc) -> maps:put({Partition, Difficulty}, @@ -143,9 +147,8 @@ init([]) -> ), ?LOG_INFO([{event, mining_server_init}, - {mining_addr, ar_util:safe_encode(MiningAddr)}, - {packing_difficulty, PackingDifficulty}, - {partitions, length(Partitions)}]), + {packing, ar_serialize:encode_packing(Packing, false)}, + {partitions, [ Partition || {Partition, _, _} <- Partitions]}]), {ok, #state{ workers = Workers, @@ -493,13 +496,16 @@ distribute_output(Candidate, State) -> distribute_output(ar_mining_io:get_partitions(), Candidate, State). distribute_output([], _Candidate, _State) -> + ?LOG_DEBUG([{event, distribute_output_done}]), ok; distribute_output([{_Partition, _MiningAddress, PackingDifficulty} | _Partitions], _Candidate, #state{ allow_composite_packing = false }) when PackingDifficulty >= 1 -> %% Do not mine with the composite packing until some time after the fork 2.8. + ?LOG_DEBUG([{event, distribute_output_skipping_composite_packing}]), ok; distribute_output([{Partition, MiningAddress, PackingDifficulty} | Partitions], Candidate, State) -> + ?LOG_DEBUG([{event, distribute_output}, {partition, Partition}]), case get_worker({Partition, PackingDifficulty}, State) of not_found -> ?LOG_ERROR([{event, worker_not_found}, {partition, Partition}]), @@ -661,7 +667,10 @@ prepare_solution(steps, Candidate, Solution) -> {start_step_number, PrevStepNumber}, {next_step_number, StepNumber}, {next_seed, ar_util:safe_encode(PrevNextSeed)}, - {next_vdf_difficulty, PrevNextVDFDifficulty}]), + {next_vdf_difficulty, PrevNextVDFDifficulty}, + {h1, ar_util:safe_encode(Candidate#mining_candidate.h1)}, + {h2, ar_util:safe_encode(Candidate#mining_candidate.h2)} + ]), error end; diff --git a/apps/arweave/src/ar_mining_stats.erl b/apps/arweave/src/ar_mining_stats.erl index a698b62a5..748f7db5d 100644 --- a/apps/arweave/src/ar_mining_stats.erl +++ b/apps/arweave/src/ar_mining_stats.erl @@ -5,7 +5,8 @@ set_total_data_size/1, set_storage_module_data_size/6, vdf_computed/0, raw_read_rate/2, chunks_read/2, h1_computed/2, h2_computed/2, h1_solution/0, h2_solution/0, block_found/0, - h1_sent_to_peer/2, h1_received_from_peer/2, h2_sent_to_peer/1, h2_received_from_peer/1]). + h1_sent_to_peer/2, h1_received_from_peer/2, h2_sent_to_peer/1, h2_received_from_peer/1, + get_partition_data_size/2]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). @@ -162,8 +163,11 @@ set_storage_module_data_size( StoreLabel = ar_storage_module:label_by_id(StoreID), PackingLabel = ar_storage_module:packing_label(Packing), try + PackingDifficulty = ar_mining_server:get_packing_difficulty(Packing), prometheus_gauge:set(v2_index_data_size_by_packing, - [StoreLabel, PackingLabel, PartitionNumber, StorageModuleSize, StorageModuleIndex], + [StoreLabel, PackingLabel, PartitionNumber, + StorageModuleSize, StorageModuleIndex, + PackingDifficulty], DataSize), ets:insert(?MODULE, { {partition, PartitionNumber, storage_module, StoreID, packing, Packing}, DataSize}) @@ -311,59 +315,6 @@ get_start(Key) -> Start end. -get_packing() -> - {ok, Config} = application:get_env(arweave, config), - MiningAddress = Config#config.mining_addr, - Pattern1 = { - partition, '_', storage_module, '_', packing, - {spora_2_6, MiningAddress} - }, - Pattern2 = { - partition, '_', storage_module, '_', packing, - {composite, MiningAddress, '_'} - }, - - Results = - ets:match_object(?MODULE, {Pattern1, '_'}) ++ - ets:match_object(?MODULE, {Pattern2, '_'}), - - % Extract Packings and create a set - Packings = [Packing || {{_, _, _, _, _, Packing}, _} <- Results], - PackingsSet = sets:from_list(Packings), - - case sets:to_list(PackingsSet) of - [SinglePacking] -> SinglePacking; - [] -> - % No results found - undefined; - MultiplePackings -> - % More than one unique packing found - ?LOG_WARNING([ - {event, get_packing_failed}, {reason, multiple_unique_packings}, - {unique_packings, - string:join( - [format_packing(Packing) || Packing <- MultiplePackings], ", ")} - ]), - undefined - end. - -format_packing({spora_2_6, Addr}) -> - "spora_2_6_" ++ binary_to_list(ar_util:encode(Addr)); -format_packing({composite, Addr, Difficulty}) -> - "composite_" ++ binary_to_list(ar_util:encode(Addr)) ++ "." ++ integer_to_list(Difficulty); -format_packing(spora_2_5) -> - "spora_2_5"; -format_packing(unpacked) -> - "unpacked"; -format_packing(Packing) -> - ?LOG_ERROR("Unexpected packing: ~p", [Packing]), - "unknown". - -get_packing_difficulty({composite, _, Difficulty}) -> - Difficulty; -get_packing_difficulty(_) -> - 0. - get_hashrate_divisor(PackingDifficulty) -> %% Raw hashrate varies based on packing difficulty. Assuming a spora_2_6 base hashrate %% of 404, the raw hashrate at different packing difficulties is: @@ -427,7 +378,7 @@ vdf_speed(Now) -> get_hash_hps(PoA1Multiplier, Packing, PartitionNumber, TotalCurrent, Now) -> H1 = get_average_count_by_time({partition, PartitionNumber, h1, TotalCurrent}, Now), H2 = get_average_count_by_time({partition, PartitionNumber, h2, TotalCurrent}, Now), - PackingDifficulty = get_packing_difficulty(Packing), + PackingDifficulty = ar_mining_server:get_packing_difficulty(Packing), ((H1 / PoA1Multiplier) + H2) / get_hashrate_divisor(PackingDifficulty). %% @doc calculate the maximum hash rate (in MiB per second read from disk) for the given VDF @@ -435,7 +386,7 @@ get_hash_hps(PoA1Multiplier, Packing, PartitionNumber, TotalCurrent, Now) -> optimal_partition_read_mibps(_Packing, undefined, _PartitionDataSize, _TotalDataSize, _WeaveSize) -> 0.0; optimal_partition_read_mibps(Packing, VDFSpeed, PartitionDataSize, TotalDataSize, WeaveSize) -> - PackingDifficulty = get_packing_difficulty(Packing), + PackingDifficulty = ar_mining_server:get_packing_difficulty(Packing), RecallRangeSize = ar_block:get_recall_range_size(PackingDifficulty) / ?MiB, (RecallRangeSize / VDFSpeed) * min(1.0, (PartitionDataSize / ?PARTITION_SIZE)) * @@ -454,23 +405,24 @@ optimal_partition_hash_hps(PoA1Multiplier, VDFSpeed, PartitionDataSize, TotalDat generate_report() -> {ok, Config} = application:get_env(arweave, config), Height = ar_node:get_height(), + Packing = ar_mining_io:get_packing(), + Partitions = ar_mining_io:get_partitions(), generate_report( Height, - ar_mining_io:get_partitions(), + Packing, + Partitions, Config#config.cm_peers, ar_node:get_weave_size(), erlang:monotonic_time(millisecond) ). -generate_report(_Height, [], _Peers, _WeaveSize, Now) -> +generate_report(_Height, _Packing, [], _Peers, _WeaveSize, Now) -> #report{ now = Now }; -generate_report(Height, Partitions, Peers, WeaveSize, Now) -> +generate_report(Height, Packing, Partitions, Peers, WeaveSize, Now) -> PoA1Multiplier = ar_difficulty:poa1_diff_multiplier(Height), VDFSpeed = vdf_speed(Now), - %% We currently only support mining against a single data packing format - Packing = get_packing(), TotalDataSize = get_total_minable_data_size(Packing), Report = #report{ now = Now, @@ -993,7 +945,6 @@ test_data_size_stats() -> do_test_data_size_stats(Mining, Packing) -> reset_all_stats(), - ?assertEqual(undefined, get_packing()), ?assertEqual(0, get_total_minable_data_size(Mining)), ?assertEqual(0, get_partition_data_size(1, Mining)), ?assertEqual(0, get_partition_data_size(2, Mining)), @@ -1028,7 +979,6 @@ do_test_data_size_stats(Mining, Packing) -> ar_storage_module:id({?PARTITION_SIZE, 2, Packing}), Packing, 2, ?PARTITION_SIZE, 2, 203), - ?assertEqual(Mining, get_packing()), ?assertEqual(214, get_partition_data_size(1, Mining)), ?assertEqual(202, get_partition_data_size(2, Mining)), ?assertEqual(214, get_total_minable_data_size(Mining)), @@ -1053,13 +1003,11 @@ do_test_data_size_stats(Mining, Packing) -> ar_storage_module:id({?PARTITION_SIZE, 2, Packing}), Packing, 2, ?PARTITION_SIZE, 2, 53), - ?assertEqual(Mining, get_packing()), ?assertEqual(336, get_partition_data_size(1, Mining)), ?assertEqual(52, get_partition_data_size(2, Mining)), ?assertEqual(336, get_total_minable_data_size(Mining)), reset_all_stats(), - ?assertEqual(undefined, get_packing()), ?assertEqual(0, get_total_minable_data_size(Mining)), ?assertEqual(0, get_partition_data_size(1, Mining)), ?assertEqual(0, get_partition_data_size(2, Mining)). @@ -1235,7 +1183,7 @@ test_optimal_stats_poa1_multiple_2() -> test_optimal_stats({composite, <<"MINING">>, 2}, 2). test_optimal_stats(Packing, PoA1Multiplier) -> - PackingDifficulty = get_packing_difficulty(Packing), + PackingDifficulty = ar_mining_server:get_packing_difficulty(Packing), RecallRangeSize = case PackingDifficulty of 0 -> 0.5; @@ -1309,7 +1257,7 @@ test_report(Mining, Packing, PoA1Multiplier) -> {composite, Addr, _} -> Addr end, - PackingDifficulty = get_packing_difficulty(Mining), + PackingDifficulty = ar_mining_server:get_packing_difficulty(Mining), DifficultyDivisor = case PackingDifficulty of 0 -> 1.0; @@ -1415,11 +1363,11 @@ test_report(Mining, Packing, PoA1Multiplier) -> ar_mining_stats:h2_received_from_peer(Peer2), ar_mining_stats:h2_received_from_peer(Peer2), - Report1 = generate_report(0, [], [], WeaveSize, Now+1000), + Report1 = generate_report(0, Mining, [], [], WeaveSize, Now+1000), ?assertEqual(#report{ now = Now+1000 }, Report1), log_report(format_report(Report1, WeaveSize)), - Report2 = generate_report(0, Partitions, Peers, WeaveSize, Now+1000), + Report2 = generate_report(0, Mining, Partitions, Peers, WeaveSize, Now+1000), ReportString = format_report(Report2, WeaveSize), log_report(ReportString), diff --git a/apps/arweave/src/ar_mining_worker.erl b/apps/arweave/src/ar_mining_worker.erl index 809b35ffb..e58231246 100644 --- a/apps/arweave/src/ar_mining_worker.erl +++ b/apps/arweave/src/ar_mining_worker.erl @@ -154,6 +154,7 @@ handle_cast({chunks_read, {WhichChunk, Candidate, RangeStart, ChunkOffsets}}, St end; handle_cast({add_task, {TaskType, Candidate, _ExtraArgs} = Task}, State) -> + ?LOG_DEBUG([{event, add_task}, {worker, State#state.name}, {task, TaskType}]), case is_session_valid(State, Candidate) of true -> {noreply, add_task(Task, State)}; @@ -457,8 +458,11 @@ handle_task({computed_h1, Candidate, _ExtraArgs}, State) -> State2 = hash_computed(h1, Candidate, State), case h1_passes_diff_checks(H1, Candidate, State2) of true -> - ?LOG_INFO([{event, found_h1_solution}, {worker, State2#state.name}, - {h1, ar_util:encode(H1)}, {difficulty, get_difficulty(State2, Candidate)}]), + ?LOG_INFO([{event, found_h1_solution}, + {step, Candidate#mining_candidate.step_number}, + {worker, State2#state.name}, + {h1, ar_util:encode(H1)}, + {difficulty, get_difficulty(State2, Candidate)}]), ar_mining_stats:h1_solution(), %% Decrement 1 for chunk1: %% Since we found a solution we won't need chunk2 (and it will be evicted if @@ -519,6 +523,7 @@ handle_task({computed_h2, Candidate, _ExtraArgs}, State) -> true -> ?LOG_INFO([{event, found_h2_solution}, {worker, State#state.name}, + {step, Candidate#mining_candidate.step_number}, {h2, ar_util:encode(H2)}, {difficulty, get_difficulty(State2, Candidate)}, {partial_difficulty, get_partial_difficulty(State2, Candidate)}]), @@ -526,6 +531,7 @@ handle_task({computed_h2, Candidate, _ExtraArgs}, State) -> partial -> ?LOG_INFO([{event, found_h2_partial_solution}, {worker, State2#state.name}, + {step, Candidate#mining_candidate.step_number}, {h2, ar_util:encode(H2)}, {partial_difficulty, get_partial_difficulty(State2, Candidate)}]) end, diff --git a/apps/arweave/src/ar_node_worker.erl b/apps/arweave/src/ar_node_worker.erl index ebedf43c0..dceb79333 100644 --- a/apps/arweave/src/ar_node_worker.erl +++ b/apps/arweave/src/ar_node_worker.erl @@ -383,7 +383,12 @@ handle_info({event, node_state, _Event}, State) -> handle_info({event, nonce_limiter, initialized}, State) -> [{_, {Height, Blocks, BI}}] = ets:lookup(node_state, join_state), ar_storage:store_block_index(BI), - B = hd(Blocks), + RecentBI = lists:sublist(BI, ?BLOCK_INDEX_HEAD_LEN), + Current = element(1, hd(RecentBI)), + RecentBlocks = lists:sublist(Blocks, ?STORE_BLOCKS_BEHIND_CURRENT), + RecentBlocks2 = set_poa_caches(RecentBlocks), + ar_block_cache:initialize_from_list(block_cache, RecentBlocks2), + B = hd(RecentBlocks2), RewardHistory = [{H, {Addr, HashRate, Reward, Denomination}} || {{Addr, HashRate, Reward, Denomination}, {H, _, _}} <- lists:zip(B#block.reward_history, @@ -394,15 +399,11 @@ handle_info({event, nonce_limiter, initialized}, State) -> <- lists:zip(B#block.block_time_history, lists:sublist(BI, length(B#block.block_time_history)))], ar_storage:store_block_time_history_part2(BlockTimeHistory), - RecentBI = lists:sublist(BI, ?BLOCK_INDEX_HEAD_LEN), Height = B#block.height, ar_disk_cache:write_block(B), ar_data_sync:join(RecentBI), ar_header_sync:join(Height, RecentBI, Blocks), ar_tx_blacklist:start_taking_down(), - Current = element(1, hd(RecentBI)), - ar_block_cache:initialize_from_list(block_cache, - lists:sublist(Blocks, ?STORE_BLOCKS_BEHIND_CURRENT)), BlockTXPairs = [block_txs_pair(Block) || Block <- Blocks], {BlockAnchors, RecentTXMap} = get_block_anchors_and_recent_txs_map(BlockTXPairs), {Rate, ScheduledRate} = {B#block.usd_to_ar_rate, B#block.scheduled_usd_to_ar_rate}, @@ -1680,14 +1681,34 @@ start_from_state(BI, Height) -> {Skipped, Blocks} -> BI2 = lists:nthtail(Skipped, BI), Height2 = Height - Skipped, - RewardHistoryBI = ar_rewards:trim_buffered_reward_history(Height, BI2), + + %% Until we hit ~2 months post 2.8 hardfork, the reward history accumulated + %% by any node will be shorter than the full expected length. Specicifically + %% it will be 21,600 blocks plus the number of blocks that have elapsed since + %% the 2.8 HF activatin. + InterimRewardHistoryLength = (Height - ar_fork:height_2_8()) + 21600, + RewardHistoryBI = lists:sublist( + ar_rewards:trim_buffered_reward_history(Height, BI2), + InterimRewardHistoryLength + ), + BlockTimeHistoryBI = lists:sublist(BI2, ar_block_time_history:history_length() + ?STORE_BLOCKS_BEHIND_CURRENT), case {ar_storage:read_reward_history(RewardHistoryBI), ar_storage:read_block_time_history(Height2, BlockTimeHistoryBI)} of {not_found, _} -> + ?LOG_ERROR([{event, start_from_state_error}, + {reason, reward_history_not_found}, + {height, Height2}, + {block_index, length(BI2)}, + {reward_history, length(RewardHistoryBI)}]), reward_history_not_found; {_, not_found} -> + ?LOG_ERROR([{event, start_from_state_error}, + {reason, block_time_history_not_found}, + {height, Height2}, + {block_index, length(BI2)}, + {block_time_history, length(BlockTimeHistoryBI)}]), block_time_history_not_found; {RewardHistory, BlockTimeHistory} -> Blocks2 = ar_rewards:set_reward_history(Blocks, RewardHistory), @@ -1755,6 +1776,39 @@ read_recent_blocks3([{BH, _, _} | BI], BlocksToRead, Blocks) -> not_found end. +set_poa_caches([]) -> + []; +set_poa_caches([B | Blocks]) -> + [set_poa_cache(B) | set_poa_caches(Blocks)]. + +set_poa_cache(B) -> + PoA1 = B#block.poa, + PoA2 = B#block.poa2, + MiningAddress = B#block.reward_addr, + PackingDifficulty = B#block.packing_difficulty, + Nonce = B#block.nonce, + RecallByte1 = B#block.recall_byte, + RecallByte2 = B#block.recall_byte2, + Packing = ar_block:get_packing(PackingDifficulty, MiningAddress), + PoACache = compute_poa_cache(B, PoA1, RecallByte1, Nonce, Packing), + B2 = B#block{ poa_cache = PoACache }, + %% Compute PoA2 cache if PoA2 is present. + case RecallByte2 of + undefined -> + B2; + _ -> + PoA2Cache = compute_poa_cache(B, PoA2, RecallByte2, Nonce, Packing), + B2#block{ poa2_cache = PoA2Cache } + end. + +compute_poa_cache(B, PoA, RecallByte, Nonce, Packing) -> + PackingDifficulty = B#block.packing_difficulty, + SubChunkIndex = ar_block:get_sub_chunk_index(PackingDifficulty, Nonce), + {BlockStart, BlockEnd, TXRoot} = ar_block_index:get_block_bounds(RecallByte), + BlockSize = BlockEnd - BlockStart, + ChunkID = ar_tx:generate_chunk_id(PoA#poa.chunk), + {{BlockStart, RecallByte, TXRoot, BlockSize, Packing, SubChunkIndex}, ChunkID}. + dump_mempool(TXs, MempoolSize) -> SerializedTXs = maps:map(fun(_, {TX, St}) -> {ar_serialize:tx_to_binary(TX), St} end, TXs), case ar_storage:write_term(mempool, {SerializedTXs, MempoolSize}) of diff --git a/apps/arweave/src/ar_p3.erl b/apps/arweave/src/ar_p3.erl index 4762944d1..c84ec43eb 100644 --- a/apps/arweave/src/ar_p3.erl +++ b/apps/arweave/src/ar_p3.erl @@ -9,7 +9,7 @@ -export([start_link/0, allow_request/1, reverse_charge/1, get_balance/3, get_rates_json/0]). -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). --ifdef(TEST). +-ifdef(DEBUG). -define(MAX_BLOCK_SCAN, 4). -else. -define(MAX_BLOCK_SCAN, 200). diff --git a/apps/arweave/src/ar_packing_server.erl b/apps/arweave/src/ar_packing_server.erl index 8c849a3f8..9ae16fad9 100644 --- a/apps/arweave/src/ar_packing_server.erl +++ b/apps/arweave/src/ar_packing_server.erl @@ -141,7 +141,9 @@ unpad_chunk(spora_2_5, Unpacked, ChunkSize, _PackedSize) -> unpad_chunk({spora_2_6, _Addr}, Unpacked, ChunkSize, PackedSize) -> unpad_chunk(Unpacked, ChunkSize, PackedSize); unpad_chunk({composite, _Addr, _PackingDifficulty}, Unpacked, ChunkSize, PackedSize) -> - unpad_chunk(Unpacked, ChunkSize, PackedSize). + unpad_chunk(Unpacked, ChunkSize, PackedSize); +unpad_chunk(unpacked, Unpacked, ChunkSize, _PackedSize) -> + binary:part(Unpacked, 0, ChunkSize). unpad_chunk(Unpacked, ChunkSize, PackedSize) -> Padding = binary:part(Unpacked, ChunkSize, PackedSize - ChunkSize), diff --git a/apps/arweave/src/ar_peers.erl b/apps/arweave/src/ar_peers.erl index fa643d43e..92f06ef83 100644 --- a/apps/arweave/src/ar_peers.erl +++ b/apps/arweave/src/ar_peers.erl @@ -20,6 +20,7 @@ -export([set_tag/3, get_tag/2]). -export([connected_peer/1, disconnected_peer/1, is_connected_peer/1]). -export([get_connection_timestamp_peer/1]). +-export([filter_peers/2]). %% The frequency in seconds of re-resolving DNS of peers configured by domain names. -define(STORE_RESOLVED_DOMAIN_S, 60). diff --git a/apps/arweave/src/ar_pricing.erl b/apps/arweave/src/ar_pricing.erl index c33b0448e..f39c7fef4 100644 --- a/apps/arweave/src/ar_pricing.erl +++ b/apps/arweave/src/ar_pricing.erl @@ -768,8 +768,17 @@ network_data_size(Height, TargetTime = ar_testnet:target_block_time(Height), SolutionsPerPartitionPerBlock = (SolutionsPerPartitionPerVDFStep * VDFIntervalTotal * TargetTime) div IntervalTotal, - EstimatedPartitionCount = AverageHashRate div SolutionsPerPartitionPerBlock, - EstimatedPartitionCount * (?PARTITION_SIZE). + ?LOG_DEBUG([{event, network_data_size}, + {solutions_per_partition_per_vdf_step, SolutionsPerPartitionPerVDFStep}, + {vdf_interval_total, VDFIntervalTotal}, {target_time, TargetTime}, + {interval_total, IntervalTotal}, {solutions_per_partition_per_block, + SolutionsPerPartitionPerBlock}]), + case SolutionsPerPartitionPerBlock of + 0 -> 0; + _ -> + EstimatedPartitionCount = AverageHashRate div SolutionsPerPartitionPerBlock, + EstimatedPartitionCount * (?PARTITION_SIZE) + end. %%%=================================================================== %%% Tests. diff --git a/apps/arweave/src/ar_serialize.erl b/apps/arweave/src/ar_serialize.erl index 75e9630f6..01241d9e9 100644 --- a/apps/arweave/src/ar_serialize.erl +++ b/apps/arweave/src/ar_serialize.erl @@ -2108,6 +2108,8 @@ partition_to_json_struct(Bucket, BucketSize, Addr, PackingDifficulty) -> end, {Fields2}. +encode_packing(undefined, false) -> + "undefined"; encode_packing(any, false) -> "any"; encode_packing({spora_2_6, Addr}, _Strict) -> diff --git a/apps/arweave/src/ar_storage.erl b/apps/arweave/src/ar_storage.erl index 81b5af50b..44ef920d1 100644 --- a/apps/arweave/src/ar_storage.erl +++ b/apps/arweave/src/ar_storage.erl @@ -72,6 +72,9 @@ read_reward_history([{H, _WeaveSize, _TXRoot} | BI]) -> History -> case ar_kv:get(reward_history_db, H) of not_found -> + ?LOG_DEBUG([{event, read_reward_history_not_found}, + {reason, missing_block}, + {block, ar_util:encode(H)}]), not_found; {ok, Bin} -> Element = binary_to_term(Bin), diff --git a/apps/arweave/src/ar_storage_module.erl b/apps/arweave/src/ar_storage_module.erl index 8ab5c856d..527b1be02 100644 --- a/apps/arweave/src/ar_storage_module.erl +++ b/apps/arweave/src/ar_storage_module.erl @@ -1,8 +1,9 @@ -module(ar_storage_module). --export([id/1, label/1, address_label/1, address_label/2, packing_label/1, label_by_id/1, - get_by_id/1, get_range/1, get_packing/1, get_size/1, get/2, get_all/1, get_all/2, - has_any/1, has_range/2, get_cover/3]). +-export([id/1, label/1, address_label/1, address_label/2, module_address/1, + module_packing_difficulty/1, packing_label/1, label_by_id/1, get_by_id/1, + get_range/1, module_range/1, module_range/2, get_packing/1, get_size/1, + get/2, get_all/1, get_all/2, has_any/1, has_range/2, get_cover/3]). -export([get_unique_sorted_intervals/1]). @@ -22,6 +23,9 @@ -define(OVERLAP, (?LEGACY_RECALL_RANGE_SIZE)). -endif. +-type storage_module() :: {integer(), integer(), {atom(), binary()}} + | {integer(), integer(), {atom(), binary(), integer()}}. + %%%=================================================================== %%% Public interface. %%%=================================================================== @@ -85,6 +89,20 @@ address_label(Addr, PackingDifficulty) -> integer_to_list(Label) end. +-spec module_address(ar_storage_module:storage_module()) -> binary() | undefined. +module_address({_, _, {spora_2_6, Addr}}) -> + Addr; +module_address({_, _, {composite, Addr, _PackingDifficulty}}) -> + Addr; +module_address(_StorageModule) -> + undefined. + +-spec module_packing_difficulty(ar_storage_module:storage_module()) -> integer(). +module_packing_difficulty({_, _, {composite, _Addr, PackingDifficulty}}) -> + PackingDifficulty; +module_packing_difficulty(_StorageModule) -> + 0. + packing_label({spora_2_6, Addr}) -> AddrLabel = ar_storage_module:address_label(Addr), list_to_atom("spora_2_6_" ++ AddrLabel); @@ -134,12 +152,19 @@ get_range(ID) -> get_range(ID, [Module | Modules]) -> case ar_storage_module:id(Module) == ID of true -> - {BucketSize, Bucket, _Packing} = Module, - {BucketSize * Bucket, (Bucket + 1) * BucketSize + ?OVERLAP}; + module_range(Module); false -> get_range(ID, Modules) end. +-spec module_range(ar_storage_module:storage_module()) -> + {non_neg_integer(), non_neg_integer()}. +module_range(Module) -> + module_range(Module, ?OVERLAP). +module_range(Module, Overlap) -> + {BucketSize, Bucket, _Packing} = Module, + {BucketSize * Bucket, (Bucket + 1) * BucketSize + Overlap}. + %% @doc Return the packing configured for the given module. get_packing(ID) -> {ok, Config} = application:get_env(arweave, config), diff --git a/apps/arweave/src/ar_tx_emitter.erl b/apps/arweave/src/ar_tx_emitter.erl index 6bdf7b762..3f164bc63 100644 --- a/apps/arweave/src/ar_tx_emitter.erl +++ b/apps/arweave/src/ar_tx_emitter.erl @@ -42,7 +42,10 @@ start_link(Name, Workers) -> init(Workers) -> gen_server:cast(?MODULE, process_chunk), - {ok, #state{ workers = queue:from_list(Workers), currently_emitting = sets:new() }}. + State = #state{ workers = queue:from_list(Workers) + , currently_emitting = sets:new() + }, + {ok, State}. handle_call(Request, _From, State) -> ?LOG_WARNING([{event, unhandled_call}, {module, ?MODULE}, {request, Request}]), @@ -50,18 +53,33 @@ handle_call(Request, _From, State) -> handle_cast(process_chunk, State) -> #state{ workers = Q, currently_emitting = Emitting } = State, + + % only current (active) peers should be used, using lifetime + % peers will create unecessary timeouts. The first to + % contact are the trusted peers. TrustedPeers = ar_peers:get_trusted_peers(), - Peers = (ar_peers:get_peers(lifetime) -- TrustedPeers) ++ TrustedPeers, - {ok, Config} = application:get_env(arweave, config), - {Q2, Emitting2} = emit(ar_mempool:get_propagation_queue(), Q, Emitting, Peers, - Config#config.max_propagation_peers, ?CHUNK_SIZE), + CurrentPeers = ar_peers:get_peers(current), + FilteredPeers = ar_peers:filter_peers(CurrentPeers, {timestamp, 60*60*24}), + CleanedPeers = FilteredPeers -- TrustedPeers, + Peers = TrustedPeers ++ CleanedPeers, + + % prepare to emit chunk(s) + PropagationQueue = ar_mempool:get_propagation_queue(), + PropagationMax = max_propagation_peers(), + {Q2, Emitting2} = emit(PropagationQueue, Q, Emitting, Peers, PropagationMax, ?CHUNK_SIZE), + + % check later if emit/6 returns an empty set case sets:is_empty(Emitting2) of true -> ar_util:cast_after(?CHECK_MEMPOOL_FREQUENCY, ?MODULE, process_chunk); false -> ok end, - {noreply, State#state{ workers = Q2, currently_emitting = Emitting2 }}; + + NewState = State#state{ workers = Q2 + , currently_emitting = Emitting2 + }, + {noreply, NewState}; handle_cast(Msg, State) -> ?LOG_ERROR([{event, unhandled_cast}, {module, ?MODULE}, {message, Msg}]), @@ -118,6 +136,10 @@ terminate(_Reason, _State) -> %%% Private functions. %%%=================================================================== +max_propagation_peers() -> + {ok, Config} = application:get_env(arweave, config), + Config#config.max_propagation_peers. + emit(_Set, Q, Emitting, _Peers, _MaxPeers, N) when N =< 0 -> {Q, Emitting}; emit(Set, Q, Emitting, Peers, MaxPeers, N) -> @@ -125,32 +147,42 @@ emit(Set, Q, Emitting, Peers, MaxPeers, N) -> true -> {Q, Emitting}; false -> - {{Utility, TXID}, Set2} = gb_sets:take_largest(Set), - case ets:member(ar_tx_emitter_recently_emitted, TXID) of - true -> - emit(Set2, Q, Emitting, Peers, MaxPeers, N); - false -> - {Emitting2, Q2} = - lists:foldl( - fun(Peer, {Acc, Workers}) -> - {{value, W}, Workers2} = queue:out(Workers), - gen_server:cast(W, {emit, TXID, Peer, self()}), - erlang:send_after(?WORKER_TIMEOUT, ?MODULE, - {timeout, TXID, Peer}), - {sets:add_element({TXID, Peer}, Acc), - queue:in(W, Workers2)} - end, - {Emitting, Q}, - lists:sublist(Peers, MaxPeers) - ), - %% The cache storing recently emitted transactions is used instead - %% of an explicit synchronization of the propagation queue updates - %% with ar_node_worker - we do not rely on ar_node_worker removing - %% emitted transactions from the queue on time. - ets:insert(ar_tx_emitter_recently_emitted, {TXID}), - erlang:send_after(?CLEANUP_RECENTLY_EMITTED_TIMEOUT, ?MODULE, - {remove_from_recently_emitted, TXID}), - ar_events:send(tx, {emitting_scheduled, Utility, TXID}), - emit(Set2, Q2, Emitting2, Peers, MaxPeers, N - 1) - end + emit_set_not_empty(Set, Q, Emitting, Peers, MaxPeers, N) + end. + +emit_set_not_empty(Set, Q, Emitting, Peers, MaxPeers, N) -> + {{Utility, TXID}, Set2} = gb_sets:take_largest(Set), + case ets:member(ar_tx_emitter_recently_emitted, TXID) of + true -> + emit(Set2, Q, Emitting, Peers, MaxPeers, N); + + false -> + % only a subset of the whole peers list is + % taken using max_propagation_peers value. + % the first N peers will be used instead of + % the whole list. unfortunately, this list can + % also have not connected peers. + PeersToSync = lists:sublist(Peers, MaxPeers), + + % for each peers in the sublist, a chunk is + % sent. The workers are taken one by one from + % a FIFO, mainly used to distribute the + % messages across all available workers. + Foldl = fun(Peer, {Acc, Workers}) -> + {{value, W}, Workers2} = queue:out(Workers), + gen_server:cast(W, {emit, TXID, Peer, self()}), + erlang:send_after(?WORKER_TIMEOUT, ?MODULE, {timeout, TXID, Peer}), + {sets:add_element({TXID, Peer}, Acc), queue:in(W, Workers2)} + end, + {Emitting2, Q2} = lists:foldl(Foldl, {Emitting, Q}, PeersToSync), + + %% The cache storing recently emitted transactions is used instead + %% of an explicit synchronization of the propagation queue updates + %% with ar_node_worker - we do not rely on ar_node_worker removing + %% emitted transactions from the queue on time. + ets:insert(ar_tx_emitter_recently_emitted, {TXID}), + erlang:send_after(?CLEANUP_RECENTLY_EMITTED_TIMEOUT, ?MODULE, + {remove_from_recently_emitted, TXID}), + ar_events:send(tx, {emitting_scheduled, Utility, TXID}), + emit(Set2, Q2, Emitting2, Peers, MaxPeers, N - 1) end. diff --git a/apps/arweave/src/ar_tx_emitter_sup.erl b/apps/arweave/src/ar_tx_emitter_sup.erl index 797ca78c2..1f7d326d1 100644 --- a/apps/arweave/src/ar_tx_emitter_sup.erl +++ b/apps/arweave/src/ar_tx_emitter_sup.erl @@ -22,17 +22,38 @@ start_link() -> init([]) -> {ok, Config} = application:get_env(arweave, config), - Workers = lists:map( - fun(Num) -> - Name = list_to_atom("ar_tx_emitter_worker_" ++ integer_to_list(Num)), - {Name, {ar_tx_emitter_worker, start_link, [Name]}, permanent, ?SHUTDOWN_TIMEOUT, - worker, [ar_tx_emitter_worker]} - end, - lists:seq(1, Config#config.max_emitters) - ), - WorkerNames = [element(1, El) || El <- Workers], - Children = [ - ?CHILD_WITH_ARGS(ar_tx_emitter, worker, ar_tx_emitter, [ar_tx_emitter, WorkerNames]) | - Workers - ], - {ok, {{one_for_one, 5, 10}, Children}}. + MaxEmitters = Config#config.max_emitters, + Workers = lists:map(fun tx_workers/1, lists:seq(1, MaxEmitters)), + WorkerNames = [ Name || #{ id := Name } <- Workers], + Emitter = tx_emitter([ar_tx_emitter, WorkerNames]), + ChildrenSpec = [Emitter|Workers], + {ok, {supervisor_spec(), ChildrenSpec}}. + +supervisor_spec() -> + #{ strategy => one_for_one + , intensity => 5 + , period => 10 + }. + +% helper to create ar_tx_emitter process, in charge +% of sending chunk to propagate to ar_tx_emitter_worker. +tx_emitter(Args) -> + #{ id => ar_tx_emitter + , type => worker + , start => {ar_tx_emitter, start_link, Args} + , shutdown => ?SHUTDOWN_TIMEOUT + , modules => [ar_tx_emitter] + , restart => permanent + }. + +% helper function to create ar_tx_workers processes. +tx_workers(Num) -> + Name = "ar_tx_emitter_worker_" ++ integer_to_list(Num), + Atom = list_to_atom(Name), + #{ id => Atom + , start => {ar_tx_emitter_worker, start_link, [Atom]} + , restart => permanent + , type => worker + , timeout => ?SHUTDOWN_TIMEOUT + , modules => [ar_tx_emitter_worker] + }. diff --git a/apps/arweave/src/ar_tx_emitter_worker.erl b/apps/arweave/src/ar_tx_emitter_worker.erl index ee23596b7..1bdbd8a34 100644 --- a/apps/arweave/src/ar_tx_emitter_worker.erl +++ b/apps/arweave/src/ar_tx_emitter_worker.erl @@ -22,7 +22,7 @@ start_link(Name) -> %%% gen_server callbacks. %%%=================================================================== -init([]) -> +init(_) -> {ok, #state{}}. handle_call(Request, _From, State) -> @@ -35,27 +35,15 @@ handle_cast({emit, TXID, Peer, ReplyTo}, State) -> ok; TX -> StartedAt = erlang:timestamp(), - TrustedPeers = ar_peers:get_trusted_peers(), - PropagatedTX = tx_to_propagated_tx(TX, Peer, TrustedPeers), - Release = ar_peers:get_peer_release(Peer), - SendFun = - case Release >= 42 of - true -> - fun() -> - Bin = ar_serialize:tx_to_binary(PropagatedTX), - ar_http_iface_client:send_tx_binary(Peer, TXID, Bin) - end; - false -> - fun() -> - JSON = ar_serialize:jsonify(ar_serialize:tx_to_json_struct( - PropagatedTX)), - ar_http_iface_client:send_tx_json(Peer, TXID, JSON) - end - end, - Reply = SendFun(), - PropagationTimeUs = timer:now_diff(erlang:timestamp(), StartedAt), - record_propagation_status(Reply), - record_propagation_rate(tx_propagated_size(TX), PropagationTimeUs) + Opts = #{ connect_timeout => 1 + , timeout => 5 + }, + emit(#{ tx_id => TXID + , peer => Peer + , tx => TX + , started_at => StartedAt + , opts => Opts + }) end, ReplyTo ! {emitted, TXID, Peer}, {noreply, State}; @@ -112,8 +100,52 @@ tx_to_propagated_tx(#tx{ format = 2 } = TX, Peer, TrustedPeers) -> record_propagation_status(not_sent) -> ok; record_propagation_status(Data) -> - prometheus_counter:inc(propagated_transactions_total, [ar_metrics:get_status_class(Data)]). + StatusClass = ar_metrics:get_status_class(Data), + prometheus_counter:inc(propagated_transactions_total, [StatusClass]), + StatusClass. record_propagation_rate(PropagatedSize, PropagationTimeUs) -> BitsPerSecond = PropagatedSize * 1000000 / PropagationTimeUs * 8, - prometheus_histogram:observe(tx_propagation_bits_per_second, BitsPerSecond). + prometheus_histogram:observe(tx_propagation_bits_per_second, BitsPerSecond), + BitsPerSecond. + +% retrieve information about peer(s) +emit(#{ tx := TX, peer := Peer } = Data) -> + TrustedPeers = ar_peers:get_trusted_peers(), + PropagatedTX = tx_to_propagated_tx(TX, Peer, TrustedPeers), + Release = ar_peers:get_peer_release(Peer), + NewData = Data#{ propagated_tx => PropagatedTX + , trusted_peers => TrustedPeers + , release => Release + }, + emit2(NewData). + +% depending on the version of the peer, different kind of payload +% is being used, one in binary, another one in JSON. +emit2(#{ release := Release, peer := Peer, propagated_tx := PropagatedTX, + tx_id := TXID, opts := Opts } = Data) + when Release >= 42 -> + Bin = ar_serialize:tx_to_binary(PropagatedTX), + Reply = ar_http_iface_client:send_tx_binary(Peer, TXID, Bin, Opts), + NewData = Data#{ reply => Reply }, + emit3(NewData); +emit2(#{ peer := Peer, propagated_tx := PropagatedTX, tx_id := TXID, + opts := Opts } = Data) -> + Serialize = ar_serialize:tx_to_json_struct(PropagatedTX), + JSON = ar_serialize:jsonify(Serialize), + Reply = ar_http_iface_client:send_tx_json(Peer, TXID, JSON, Opts), + NewData = Data#{ reply => Reply }, + emit3(NewData). + +% deal with the reply and update propagation statistics. +emit3(#{ started_at := StartedAt, reply := Reply, tx := TX } = Data) -> + Timestamp = erlang:timestamp(), + PropagationTimeUs = timer:now_diff(Timestamp, StartedAt), + PropagationStatus = record_propagation_status(Reply), + PropagatedSize = tx_propagated_size(TX), + PropagationRate = record_propagation_rate(PropagatedSize, PropagationTimeUs), + Data#{ propagation_time_us => PropagationTimeUs + , propagation_status => PropagationStatus + , propagated_size => PropagatedSize + , propagation_rate => PropagationRate + }. diff --git a/apps/arweave/src/ar_vdf.erl b/apps/arweave/src/ar_vdf.erl index 1b3a7f86a..791018ffa 100755 --- a/apps/arweave/src/ar_vdf.erl +++ b/apps/arweave/src/ar_vdf.erl @@ -20,6 +20,7 @@ compute(StartStepNumber, PrevOutput, IterationCount) -> IterationCount). -ifdef(DEBUG). +%% Slow down VDF calculation on tests since it will complete to fast otherwise. compute2(StartStepNumber, PrevOutput, IterationCount) -> {ok, Output, CheckpointBuffer} = compute(StartStepNumber, PrevOutput, IterationCount), Checkpoints = [Output | checkpoint_buffer_to_checkpoints(CheckpointBuffer)], diff --git a/apps/arweave/src/ar_verify_chunks.erl b/apps/arweave/src/ar_verify_chunks.erl index f9ca015a4..f4aafed21 100644 --- a/apps/arweave/src/ar_verify_chunks.erl +++ b/apps/arweave/src/ar_verify_chunks.erl @@ -114,7 +114,10 @@ verify_chunks({IntervalEnd, IntervalStart}, Intervals, State) -> verify_chunk({error, Reason}, _Intervals, State) -> #state{ cursor = Cursor } = State, - log_error(get_chunk_error, Cursor, ?DATA_CHUNK_SIZE, [{reason, Reason}], State); + NextCursor = ar_data_sync:advance_chunks_index_cursor(Cursor), + RangeSkipped = NextCursor - Cursor, + State2 = log_error(get_chunk_error, Cursor, RangeSkipped, [{reason, Reason}], State), + State2#state{ cursor = NextCursor }; verify_chunk({ok, _Key, MetaData}, Intervals, State) -> {AbsoluteOffset, _ChunkDataKey, _TXRoot, _DataRoot, _TXPath, _TXRelativeOffset, ChunkSize} = MetaData, @@ -535,11 +538,12 @@ test_verify_chunk() -> {Interval, not_found}, #state{})), ExpectedState = #state{ + cursor = 33554432, %% = 2 * 2^24. From ar_data_sync:advance_chunks_index_cursor/1 packing = unpacked, verify_report = #verify_report{ - total_error_bytes = ?DATA_CHUNK_SIZE, + total_error_bytes = 33554432, total_error_chunks = 1, - error_bytes = #{get_chunk_error => ?DATA_CHUNK_SIZE}, + error_bytes = #{get_chunk_error => 33554432}, error_chunks = #{get_chunk_error => 1} } }, @@ -548,5 +552,5 @@ test_verify_chunk() -> verify_chunk( {error, some_error}, {Interval, not_found}, - #state{ packing = unpacked })), + #state{ cursor = 0, packing = unpacked })), ok. diff --git a/apps/arweave/src/ar_weave.erl b/apps/arweave/src/ar_weave.erl index 388822075..9b39ec04d 100644 --- a/apps/arweave/src/ar_weave.erl +++ b/apps/arweave/src/ar_weave.erl @@ -29,6 +29,9 @@ init(WalletList, Diff) -> Size = 262144 * 3, % Matches ?STRICT_DATA_SPLIT_THRESHOLD in tests. init(WalletList, Diff, Size). +init(_WalletList, _Diff, GenesisDataSize) when GenesisDataSize > (4 * ?GiB) -> + erlang:error({size_exceeds_limit, "GenesisDataSize exceeds 4 GiB"}); + %% @doc Create a genesis block with the given accounts and difficulty. init(WalletList, Diff, GenesisDataSize) -> Key = ar_wallet:new_keyfile(), @@ -114,13 +117,15 @@ get_initial_block_time_history() -> [{120, 1, 1}]. -endif. +%% @doc: create a genesis transaction with the given key and data size. This is only used +%% in tests and when launching a localnet node. create_genesis_tx(Key, Size) -> {_, {_, Pk}} = Key, UnsignedTX = (ar_tx:new())#tx{ owner = Pk, reward = 0, - data = crypto:strong_rand_bytes(Size), + data = ar_test_node:generate_genesis_data(Size), data_size = Size, target = <<>>, quantity = 0, diff --git a/apps/arweave/test/ar_start_from_block_tests.erl b/apps/arweave/test/ar_start_from_block_tests.erl index 7b88a5664..bce66a363 100644 --- a/apps/arweave/test/ar_start_from_block_tests.erl +++ b/apps/arweave/test/ar_start_from_block_tests.erl @@ -114,7 +114,8 @@ restart_from_block(Peer, BH) -> ok = ar_test_node:set_config(Peer, Config#config{ start_from_latest_state = false, start_from_block = BH }), - ar_test_node:restart(Peer). + ar_test_node:restart(Peer), + ar_test_node:remote_call(Peer, ar_test_node, wait_until_syncs_genesis_data, []). assert_start_from(ExpectedPeer, Peer, Height) -> ?LOG_ERROR([{event, assert_start_from}, {expected_peer, ExpectedPeer}, {peer, Peer}, {height, Height}]), diff --git a/apps/arweave/test/ar_test_node.erl b/apps/arweave/test/ar_test_node.erl index 86c63e9d0..b9480e2c1 100644 --- a/apps/arweave/test/ar_test_node.erl +++ b/apps/arweave/test/ar_test_node.erl @@ -1,17 +1,19 @@ -module(ar_test_node). %% The new, more flexible, and more user-friendly interface. --export([get_config/1,set_config/2, wait_until_joined/0, restart/0, restart/1, - start_node/2, start_node/3, start_coordinated/1, base_cm_config/1, mine/1, +-export([boot_peers/1, wait_for_peers/1, get_config/1,set_config/2, + update_config/2, update_config/1, + wait_until_joined/0, wait_until_joined/1, restart/0, restart/1, + start_other_node/4, start_node/2, start_node/3, start_coordinated/1, base_cm_config/1, mine/1, wait_until_height/2, http_get_block/2, get_blocks/1, mock_to_force_invalid_h1/0, get_difficulty_for_invalid_hash/0, invalid_solution/0, valid_solution/0, remote_call/4, load_fixture/1, - get_default_storage_module_packing/2]). + get_default_storage_module_packing/2, generate_genesis_data/1, get_genesis_chunk/1]). %% The "legacy" interface. --export([boot_peers/0, boot_peer/1, start/0, start/1, start/2, start/3, start/4, +-export([start/0, start/1, start/2, start/3, start/4, stop/0, stop/1, start_peer/2, start_peer/3, start_peer/4, peer_name/1, peer_port/1, - stop_peers/0, stop_peer/1, connect_to_peer/1, disconnect_from/1, + stop_peers/1, stop_peer/1, connect_to_peer/1, disconnect_from/1, join/2, join_on/1, rejoin_on/1, peer_ip/1, get_node_namespace/0, get_unused_port/0, @@ -26,7 +28,7 @@ post_tx_to_peer/2, post_tx_to_peer/3, assert_post_tx_to_peer/2, assert_post_tx_to_peer/3, post_and_mine/2, post_block/2, post_block/3, send_new_block/2, await_post_block/2, await_post_block/3, sign_block/3, read_block_when_stored/1, - read_block_when_stored/2, get_chunk/2, get_chunk_proof/2, post_chunk/2, + read_block_when_stored/2, get_chunk/2, get_chunk/3, get_chunk_proof/2, post_chunk/2, random_v1_data/1, assert_get_tx_data/3, assert_data_not_found/2, post_tx_json/2, wait_until_syncs_genesis_data/0, @@ -53,38 +55,42 @@ %%%=================================================================== %%% Public interface. %%%=================================================================== -all_peers() -> - [peer1, peer2, peer3, peer4]. - -boot_peers() -> - boot_peers(all_peers()). +all_peers(test) -> + [{test, peer1}, {test, peer2}, {test, peer3}, {test, peer4}]; +all_peers(e2e) -> + [{e2e, peer1}, {e2e, peer2}]. boot_peers([]) -> ok; -boot_peers([Node | Peers]) -> - boot_peer(Node), - boot_peers(Peers). +boot_peers([{Build, Node} | Peers]) -> + boot_peer(Build, Node), + boot_peers(Peers); +boot_peers(TestType) -> + boot_peers(all_peers(TestType)). -boot_peer(Node) -> - try_boot_peer(Node, ?MAX_BOOT_RETRIES). +boot_peer(Build, Node) -> + try_boot_peer(Build, Node, ?MAX_BOOT_RETRIES). -try_boot_peer(_Node, 0) -> +try_boot_peer(_Build, _Node, 0) -> %% You might log an error or handle this case specifically as per your application logic. {error, max_retries_exceeded}; -try_boot_peer(Node, Retries) -> +try_boot_peer(Build, Node, Retries) -> NodeName = peer_name(Node), Port = get_unused_port(), Cookie = erlang:get_cookie(), - Paths = code:get_path(), + + Paths = code:get_path(), + filelib:ensure_dir("./.tmp"), Schedulers = erlang:system_info(schedulers_online), Cmd = io_lib:format( - "erl +S ~B:~B -noshell -name ~s -pa ~s -setcookie ~s -run ar main debug port ~p " ++ + "erl +S ~B:~B -pa ~s -config config/sys.config -noshell " ++ + "-name ~s -setcookie ~s -run ar main debug port ~p " ++ "data_dir .tmp/data_test_~s no_auto_join packing_rate 20 " ++ "> ~s-~s.out 2>&1 &", - [Schedulers, Schedulers, NodeName, string:join(Paths, " "), Cookie, Port, NodeName, + [Schedulers, Schedulers, string:join(Paths, " "), NodeName, Cookie, Port, NodeName, Node, get_node_namespace()]), - io:format("Launching peer: ~s~n", [Cmd]), + io:format("Launching peer ~p: ~s~n", [Node, Cmd]), os:cmd(Cmd), case wait_until_node_is_ready(NodeName) of {ok, _Node} -> @@ -92,9 +98,20 @@ try_boot_peer(Node, Retries) -> {node(), NodeName}; {error, Reason} -> io:format("Error starting ~s: ~p. Retries left: ~p~n", [NodeName, Reason, Retries]), - try_boot_peer(Node, Retries - 1) + try_boot_peer(Build, Node, Retries - 1) end. +wait_for_peers([]) -> + ok; +wait_for_peers([{_Build, Node} | Peers]) -> + wait_for_peer(Node), + wait_for_peers(Peers); +wait_for_peers(TestType) -> + wait_for_peers(all_peers(TestType)). + +wait_for_peer(Node) -> + remote_call(Node, application, ensure_all_started, [arweave, permanent], 60000). + self_node() -> list_to_atom(get_node()). @@ -107,14 +124,13 @@ peer_port(Node) -> {ok, Config} = ar_test_node:remote_call(Node, application, get_env, [arweave, config]), Config#config.port. -stop_peers() -> - stop_peers(all_peers()). - stop_peers([]) -> ok; -stop_peers([Node | Peers]) -> +stop_peers([{_Build, Node} | Peers]) -> stop_peer(Node), - stop_peers(Peers). + stop_peers(Peers); +stop_peers(TestType) -> + stop_peers(all_peers(TestType)). stop_peer(Node) -> try @@ -145,13 +161,11 @@ get_config(Node) -> set_config(Node, Config) -> remote_call(Node, application, set_env, [arweave, config, Config]). -%% @doc Start a node with the given genesis block and configuration. -start_node(B0, Config) -> - start_node(B0, Config, true). -start_node(B0, Config, WaitUntilSync) -> +update_config(Node, Config) -> + remote_call(Node, ar_test_node, update_config, [Config]). + +update_config(Config) -> {ok, BaseConfig} = application:get_env(arweave, config), - clean_up_and_stop(), - write_genesis_files(BaseConfig#config.data_dir, B0), Config2 = BaseConfig#config{ start_from_latest_state = Config#config.start_from_latest_state, auto_join = Config#config.auto_join, @@ -171,9 +185,23 @@ start_node(B0, Config, WaitUntilSync) -> cm_peers = Config#config.cm_peers, local_peers = Config#config.local_peers, mine = Config#config.mine, - storage_modules = Config#config.storage_modules + storage_modules = Config#config.storage_modules, + repack_in_place_storage_modules = Config#config.repack_in_place_storage_modules }, ok = application:set_env(arweave, config, Config2), + Config2. + +start_other_node(Node, B0, Config, WaitUntilSync) -> + remote_call(Node, ar_test_node, start_node, [B0, Config, WaitUntilSync], 90000). + +%% @doc Start a node with the given genesis block and configuration. +start_node(B0, Config) -> + start_node(B0, Config, true). +start_node(B0, Config, WaitUntilSync) -> + clean_up_and_stop(), + {ok, BaseConfig} = application:get_env(arweave, config), + write_genesis_files(BaseConfig#config.data_dir, B0), + update_config(Config), ar:start_dependencies(), wait_until_joined(), case WaitUntilSync of @@ -306,6 +334,7 @@ load_fixture(Fixture) -> clean_up_and_stop() -> Config = stop(), + ok = filelib:ensure_dir(Config#config.data_dir), {ok, Entries} = file:list_dir_all(Config#config.data_dir), lists:foreach( fun ("wallets") -> @@ -431,7 +460,7 @@ remote_call(Node, Module, Function, Args, Timeout) -> ), case Result of {error, timeout} -> - ?debugFmt("Timed out (~pms) waiting for the rpc reply; module: ~p, function: ~p, " + ?debugFmt("Timed out (~pms) waiting for the rpc reply; module: ~p, function: ~p, " "args: ~p, node: ~p.~n", [Timeout, Module, Function, Args, Node]); _ -> ok @@ -500,11 +529,10 @@ start(B0, RewardAddr, Config, StorageModules) -> restart() -> stop(), ar:start_dependencies(), - wait_until_joined(), - wait_until_syncs_genesis_data(). + wait_until_joined(). restart(Node) -> - remote_call(Node, ?MODULE, restart, []). + remote_call(Node, ?MODULE, restart, [], 90000). start_peer(Node, Args) when is_list(Args) -> remote_call(Node, ?MODULE, start , Args, ?PEER_START_TIMEOUT), @@ -942,21 +970,21 @@ mock_functions(Functions) -> false -> meck:new(Module, [passthrough]), lists:foreach( - fun(Node) -> + fun({_Build, Node}) -> remote_call(Node, meck, new, [Module, [no_link, passthrough]]) end, - all_peers()), + all_peers(test)), maps:put(Module, true, Mocked); true -> Mocked end, lists:foreach( - fun(Node) -> + fun({_Build, Node}) -> meck:expect(Module, Fun, Mock), remote_call(Node, meck, expect, [Module, Fun, Mock]) end, - [main | all_peers()]), + [{test, main} | all_peers(test)]), NewMocked end, maps:new(), @@ -967,10 +995,10 @@ mock_functions(Functions) -> maps:fold( fun(Module, _, _) -> lists:foreach( - fun(Node) -> + fun({_Build, Node}) -> remote_call(Node, meck, unload, [Module]) end, - [main | all_peers()]) + [{test, main} | all_peers(test)]) end, noop, Mocked @@ -991,6 +1019,7 @@ test_with_mocked_functions(Functions, TestFun, Timeout) -> post_and_mine(#{ miner := Node, await_on := AwaitOnNode }, TXs) -> CurrentHeight = remote_call(Node, ar_node, get_height, []), + ?LOG_INFO("post_and_mine height (~p): ~p", [Node, CurrentHeight+1]), lists:foreach(fun(TX) -> assert_post_tx_to_peer(Node, TX) end, TXs), mine(Node), [{H, _, _} | _] = wait_until_height(AwaitOnNode, CurrentHeight + 1), @@ -1103,11 +1132,20 @@ read_block_when_stored(H, IncludeTXs) -> B. get_chunk(Node, Offset) -> + get_chunk(Node, Offset, undefined). + +get_chunk(Node, Offset, Packing) -> + Headers = case Packing of + undefined -> []; + _ -> + PackingBinary = iolist_to_binary(ar_serialize:encode_packing(Packing, false)), + [{<<"x-packing">>, PackingBinary}] + end, ar_http:req(#{ method => get, peer => peer_ip(Node), path => "/chunk/" ++ integer_to_list(Offset), - headers => [{<<"x-bucket-based-offset">>, <<"true">>}] + headers => [{<<"x-bucket-based-offset">>, <<"true">>} | Headers] }). get_chunk_proof(Node, Offset) -> @@ -1213,3 +1251,43 @@ p2p_headers(Node) -> {<<"x-p2p-port">>, integer_to_binary(peer_port(Node))}, {<<"x-release">>, integer_to_binary(?RELEASE_NUMBER)} ]. + +%% @doc: generate binary data to be used as genesis data in tests. That data is incrementing +%% integer data in 4 byte chunks. e.g. +%% <<0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, ...>> +%% This makes it easier to assert correct chunk data in tests. +-spec generate_genesis_data(integer()) -> binary(). +generate_genesis_data(DataSize) -> + FullChunks = DataSize div 4, + LeftoverBytes = DataSize rem 4, + IncrementingData = generate_data(0, FullChunks * 4, <<>>), + add_padding(IncrementingData, LeftoverBytes). + +%% @doc: get the genesis chunk between a given start and end offset. +-spec get_genesis_chunk(integer()) -> binary(). +-spec get_genesis_chunk(integer(), integer()) -> binary(). +get_genesis_chunk(EndOffset) -> + StartOffset = case EndOffset rem ?DATA_CHUNK_SIZE of + 0 -> + EndOffset - ?DATA_CHUNK_SIZE; + _ -> + (EndOffset div ?DATA_CHUNK_SIZE) * ?DATA_CHUNK_SIZE + end, + get_genesis_chunk(StartOffset, EndOffset). + +get_genesis_chunk(StartOffset, EndOffset) -> + Size = EndOffset - StartOffset, + StartValue = StartOffset div 4, + generate_data(StartValue, Size, <<>>). + +generate_data(CurrentValue, RemainingBytes, Acc) when RemainingBytes >= 4 -> + Chunk = <>, + generate_data(CurrentValue + 1, RemainingBytes - 4, <>); +generate_data(_, RemainingBytes, Acc) -> + add_padding(Acc, RemainingBytes). + +add_padding(Data, 0) -> + Data; +add_padding(Data, LeftoverBytes) -> + Padding = <<16#FF:8, 16#FF:8, 16#FF:8, 16#FF:8>>, + <>. \ No newline at end of file diff --git a/bin/e2e b/bin/e2e new file mode 100755 index 000000000..d4feaad98 --- /dev/null +++ b/bin/e2e @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + + +SCRIPT_DIR="$(dirname "$0")" +cd "$SCRIPT_DIR/.." + +./ar-rebar3 e2e compile + +export ERL_EPMD_ADDRESS=127.0.0.1 + +ERL_TEST_OPTS="-pa `./rebar3 as e2e path` `./rebar3 as e2e path --base`/lib/arweave/test -config config/sys.config" +echo -e "\033[0;32m===> Running tests...\033[0m" + +set -x +set -o pipefail +stdbuf -oL -eL erl $ERL_TEST_OPTS -noshell -name main-e2e@127.0.0.1 -setcookie e2e -run ar e2e ${@:1} -s init stop 2>&1 | tee main-e2e.out +set +x diff --git a/rebar.config b/rebar.config index f0e8d9779..c4553abf5 100644 --- a/rebar.config +++ b/rebar.config @@ -425,6 +425,23 @@ {d, 'DEBUG', debug}, {d, 'FORKS_RESET', true}, {d, 'NETWORK_NAME', "arweave.localtest"}, + {d, 'TEST', true}, + %% lower multiplier to allow single-block solutions in tests + {d, 'POA1_DIFF_MULTIPLIER', 1}, + %% use sha256 instead of randomx to speed up tests + {d, 'STUB_RANDOMX', true}, + {d, 'VDF_DIFFICULTY', 2}, + {d, 'INITIAL_VDF_DIFFICULTY', 2}, + {d, 'NONCE_LIMITER_RESET_FREQUENCY', 5} + ]} + ]}, + {e2e, [ + {deps, [{meck, "0.8.13"}]}, + {erl_opts, [ + {src_dirs, ["src", "test", "e2e"]}, + {d, 'DEBUG', debug}, + {d, 'FORKS_RESET', true}, + {d, 'NETWORK_NAME', "arweave.e2e"}, {d, 'TEST', true} ]} ]}, @@ -467,7 +484,7 @@ no_inline ]}, {relx, [ - {release, {arweave, "2.7.4"}, [ + {release, {arweave, "2.8.2"}, [ {arweave, load}, {recon, load}, b64fast, diff --git a/rebar.lock b/rebar.lock index 68ce99bf9..c02dd5296 100644 --- a/rebar.lock +++ b/rebar.lock @@ -24,6 +24,10 @@ 0}, {<<"quantile_estimator">>,{pkg,<<"quantile_estimator">>,<<"0.2.1">>},1}, {<<"ranch">>,{pkg,<<"ranch">>,<<"1.8.0">>},1}, + {<<"recon">>, + {git,"https://github.com/ferd/recon.git", + {ref,"c2a76855be3a226a3148c0dfc21ce000b6186ef8"}}, + 0}, {<<"rocksdb">>, {git,"https://github.com/ArweaveTeam/erlang-rocksdb.git", {ref,"f580865c0bc18b0302a6190d7fa85e68ec0762e0"}},