Skip to content

Commit

Permalink
fixup
Browse files Browse the repository at this point in the history
  • Loading branch information
Lev Berman committed Jun 23, 2023
1 parent 24ceea9 commit cd76f20
Show file tree
Hide file tree
Showing 4 changed files with 252 additions and 222 deletions.
78 changes: 33 additions & 45 deletions apps/arweave/src/ar_data_sync.erl
Original file line number Diff line number Diff line change
Expand Up @@ -926,20 +926,10 @@ handle_cast({store_fetched_chunk, Peer, Time, TransferSize, Byte, Proof} = Cast,
{BlockStartOffset, BlockEndOffset, TXRoot} = ar_block_index:get_block_bounds(SeekByte),
BlockSize = BlockEndOffset - BlockStartOffset,
Offset = SeekByte - BlockStartOffset,
ValidateDataPathFun =
case BlockStartOffset >= get_merkle_rebase_threshold() of
true ->
fun ar_merkle:validate_path2/4;
false ->
case BlockStartOffset >= ?STRICT_DATA_SPLIT_THRESHOLD of
true ->
fun ar_merkle:validate_path_strict_data_split/4;
false ->
fun ar_merkle:validate_path_strict_borders/4
end
end,
ValidateDataPathRuleset = ar_poa:get_data_path_validation_ruleset(BlockStartOffset,
get_merkle_rebase_threshold()),
case validate_proof(TXRoot, BlockStartOffset, Offset, BlockSize, Proof,
ValidateDataPathFun) of
ValidateDataPathRuleset) of
{need_unpacking, AbsoluteOffset, ChunkArgs, VArgs} ->
{Packing, DataRoot, TXStartOffset, ChunkEndOffset, TXSize, ChunkID} = VArgs,
AbsoluteTXStartOffset = BlockStartOffset + TXStartOffset,
Expand Down Expand Up @@ -1518,22 +1508,12 @@ validate_served_chunk(Args) ->
false ->
case ar_block_index:get_block_bounds(Offset - 1) of
{BlockStart, BlockEnd, TXRoot} ->
ValidateDataPathFun =
case BlockStart >= get_merkle_rebase_threshold() of
true ->
fun ar_merkle:validate_path2/4;
false ->
case BlockStart >= ?STRICT_DATA_SPLIT_THRESHOLD of
true ->
fun ar_merkle:validate_path_strict_data_split/4;
false ->
fun ar_merkle:validate_path_strict_borders/4
end
end,
ValidateDataPathRuleset = ar_poa:get_data_path_validation_ruleset(
BlockStart, get_merkle_rebase_threshold()),
BlockSize = BlockEnd - BlockStart,
ChunkOffset = Offset - BlockStart - 1,
case validate_proof2({TXRoot, ChunkOffset, BlockSize, DataPath, TXPath,
ChunkSize, ValidateDataPathFun}) of
ChunkSize, ValidateDataPathRuleset}) of
{true, ChunkID} ->
{true, ChunkID};
false ->
Expand Down Expand Up @@ -2383,15 +2363,16 @@ enqueue_peer_range(Peer, RangeStart, RangeEnd, ChunkOffsets, {Q, QIntervals}) ->
QIntervals2 = ar_intervals:add(QIntervals, RangeEnd, RangeStart),
{Q2, QIntervals2}.

validate_proof(TXRoot, BlockStartOffset, Offset, BlockSize, Proof, ValidateDataPathFun) ->
validate_proof(TXRoot, BlockStartOffset, Offset, BlockSize, Proof, ValidateDataPathRuleset) ->
#{ data_path := DataPath, tx_path := TXPath, chunk := Chunk, packing := Packing } = Proof,
case ar_merkle:validate_path(TXRoot, Offset, BlockSize, TXPath) of
false ->
false;
{DataRoot, TXStartOffset, TXEndOffset} ->
TXSize = TXEndOffset - TXStartOffset,
ChunkOffset = Offset - TXStartOffset,
case ValidateDataPathFun(DataRoot, ChunkOffset, TXSize, DataPath) of
case ar_merkle:validate_path(DataRoot, ChunkOffset, TXSize, DataPath,
ValidateDataPathRuleset) of
false ->
false;
{ChunkID, ChunkStartOffset, ChunkEndOffset} ->
Expand Down Expand Up @@ -2421,14 +2402,15 @@ validate_proof(TXRoot, BlockStartOffset, Offset, BlockSize, Proof, ValidateDataP
end.

validate_proof2(Args) ->
{TXRoot, Offset, BlockSize, DataPath, TXPath, ChunkSize, ValidateDataPathFun} = Args,
{TXRoot, Offset, BlockSize, DataPath, TXPath, ChunkSize, ValidateDataPathRuleset} = Args,
case ar_merkle:validate_path(TXRoot, Offset, BlockSize, TXPath) of
false ->
false;
{DataRoot, TXStartOffset, TXEndOffset} ->
TXSize = TXEndOffset - TXStartOffset,
ChunkOffset = Offset - TXStartOffset,
case ValidateDataPathFun(DataRoot, ChunkOffset, TXSize, DataPath) of
case ar_merkle:validate_path(DataRoot, ChunkOffset, TXSize, DataPath,
ValidateDataPathRuleset) of
{ChunkID, ChunkStartOffset, ChunkEndOffset} ->
case ChunkEndOffset - ChunkStartOffset == ChunkSize of
false ->
Expand All @@ -2442,19 +2424,21 @@ validate_proof2(Args) ->
end.

validate_data_path(DataRoot, Offset, TXSize, DataPath, Chunk) ->
Base = ar_merkle:validate_path_strict_borders(DataRoot, Offset, TXSize, DataPath),
Strict = ar_merkle:validate_path_strict_data_split(DataRoot, Offset, TXSize, DataPath),
Rebase = ar_merkle:validate_path2(DataRoot, Offset, TXSize, DataPath),
Base = ar_merkle:validate_path(DataRoot, Offset, TXSize, DataPath, strict_borders_ruleset),
Strict = ar_merkle:validate_path(DataRoot, Offset, TXSize, DataPath,
strict_data_split_ruleset),
Rebase = ar_merkle:validate_path(DataRoot, Offset, TXSize, DataPath,
offset_rebase_support_ruleset),
Result =
case {Base, Strict, Rebase} of
{false, false, false} ->
false;
{_, {_, _, _} = R, _} ->
R;
{_, _, {_, _, _} = R} ->
R;
{{_, _, _} = R, _, _} ->
R
{_, {_, _, _} = StrictResult, _} ->
StrictResult;
{_, _, {_, _, _} = RebaseResult} ->
RebaseResult;
{{_, _, _} = BaseResult, _, _} ->
BaseResult
end,
case Result of
false ->
Expand Down Expand Up @@ -2966,13 +2950,17 @@ process_disk_pool_chunk_offset(Iterator, TXRoot, TXPath, AbsoluteOffset, MayConc
case {AbsoluteOffset >= get_merkle_rebase_threshold(),
AbsoluteOffset >= ?STRICT_DATA_SPLIT_THRESHOLD,
PassedBase, PassedStrictValidation, PassedRebaseValidation} of
%% At the rebase threshold we relax some of the validation rules so the strict
%% validation may fail.
{true, true, _, _, true} ->
true;
{false, true, true, true, true} ->
true;
{false, false, true, false, false} ->
%% Between the "strict" and "rebase" thresholds the "base" and "strict split"
%% rules must be followed.
{false, true, true, true, _} ->
true;
{false, false, true, true, true} ->
%% Before the strict threshold only the base (most relaxed) validation must
%% pass.
{false, false, true, _, _} ->
true;
_ ->
false
Expand Down Expand Up @@ -3056,8 +3044,8 @@ process_disk_pool_matured_chunk_offset(Iterator, TXRoot, TXPath, AbsoluteOffset,
%% The other modules will either sync the chunk themselves or copy it over from the
%% other module the next time the node is restarted.
#sync_data_state{ chunk_data_db = ChunkDataDB, store_id = DefaultStoreID } = State,
{Offset, _, ChunkSize, DataRoot, DataPathHash, ChunkDataKey, Key, _BaseSplit, _StrictSplit,
_RebaseSupportSplit} = Args,
{Offset, _, ChunkSize, DataRoot, DataPathHash, ChunkDataKey, Key, _PassedBaseValidation,
_PassedStrictValidation, _PassedRebaseValidation} = Args,
FindStorageModule =
case find_storage_module_for_disk_pool_chunk(AbsoluteOffset) of
not_found ->
Expand Down
Loading

0 comments on commit cd76f20

Please sign in to comment.