Skip to content

Commit fc95021

Browse files
author
Lucius
committed
Changed default value of block-log-split
1 parent 306dbd6 commit fc95021

File tree

32 files changed

+229
-98
lines changed

32 files changed

+229
-98
lines changed

.gitlab-ci.yaml

+5-5
Original file line numberDiff line numberDiff line change
@@ -338,6 +338,7 @@ mirrornet_replay_test:
338338
script:
339339
- echo "Generating artifacts file for block_log."
340340
- time ./hived
341+
--block-log-split=-1
341342
-d "$MIRRORNET_SOURCE_5M_DATA_DIR"
342343
- echo "Compressing block log to $NUMBER_OF_BLOCKS blocks with $NUMBER_OF_PROCESSES processes"
343344
- time ./compress_block_log
@@ -357,6 +358,7 @@ mirrornet_replay_test:
357358
--jobs $NUMBER_OF_PROCESSES
358359
- echo "Starting hived replay"
359360
- ./hived
361+
--block-log-split=-1
360362
-d "$MIRRORNET_BLOCKCHAIN_DATA_DIR"
361363
--chain-id $MIRRORNET_CHAIN_ID
362364
--skeleton-key "$MIRRORNET_SKELETON_KEY"
@@ -473,7 +475,7 @@ formatting_with_black_check:
473475
after_script:
474476
- |
475477
shopt -s globstar
476-
tar --exclude='**/generated_during_*/**/block_log' --exclude='**/generated_during_*/**/block_log.artifacts' --exclude='**/generated_during_*/**/shared_memory.bin' --exclude='**/generated_during_*/**/*.sst' -cf - **/generated_during_* | 7z a -si generated_during.tar.7z
478+
tar --exclude='**/generated_during_*/**/block_log*' --exclude='**/generated_during_*/**/shared_memory.bin' --exclude='**/generated_during_*/**/*.sst' -cf - **/generated_during_* | 7z a -si generated_during.tar.7z
477479
tar -cf - **/generated_by_package_fixtures | 7z a -si generated_by_package_fixtures.tar.7z
478480
artifacts:
479481
reports:
@@ -823,8 +825,7 @@ python_pattern_mainnet_tests:
823825
- "**/generated_during_*"
824826
- "**/generated_by_package_fixtures"
825827
exclude:
826-
- "**/generated_during_*/**/block_log"
827-
- "**/generated_during_*/**/block_log.artifacts"
828+
- "**/generated_during_*/**/block_log*"
828829
- "**/generated_during_*/**/shared_memory.bin"
829830
- "**/generated_during_*/**/*.sst"
830831
when: always
@@ -896,8 +897,7 @@ message_format_mainnet_5m_tests:
896897
- "**/generated_during_*"
897898
- "**/generated_by_package_fixtures"
898899
exclude:
899-
- "**/generated_during_*/**/block_log"
900-
- "**/generated_during_*/**/block_log.artifacts"
900+
- "**/generated_during_*/**/block_log*"
901901
- "**/generated_during_*/**/shared_memory.bin"
902902
- "**/generated_during_*/**/*.sst"
903903
when: always

libraries/chain/block_log_wrapper.cpp

+59-36
Original file line numberDiff line numberDiff line change
@@ -258,12 +258,12 @@ std::shared_ptr<full_block_type> block_log_wrapper::get_block_by_number( uint32_
258258
return std::shared_ptr<full_block_type>();
259259

260260
const block_log_ptr_t log = get_block_log_corresponding_to( block_num );
261-
if( _block_log_split < MAX_FILES_OF_SPLIT_BLOCK_LOG )
261+
if( _block_log_split <= MAX_FILES_OF_SPLIT_BLOCK_LOG )
262262
{
263263
FC_ASSERT( log,
264264
"Block ${num} has been pruned (oldest stored block is ${old}). "
265265
"Consider disabling pruning or increasing block-log-split value (currently ${part_count}).",
266-
("num", block_num)("old", get_tail_block_num())("part_count", _block_log_split) );
266+
("num", block_num)("old", get_actual_tail_block_num())("part_count", _block_log_split) );
267267
}
268268
else
269269
{
@@ -392,6 +392,7 @@ block_id_type block_log_wrapper::read_block_id_by_num( uint32_t block_num ) cons
392392
const block_log_wrapper::block_log_ptr_t block_log_wrapper::get_block_log_corresponding_to(
393393
uint32_t block_num ) const
394394
{
395+
FC_ASSERT( get_head_log(), "Using unopened block log?" );
395396
uint32_t request_part_number = get_part_number_for_block( block_num, _max_blocks_in_log_file );
396397
if( request_part_number > _logs.size() )
397398
return block_log_ptr_t();
@@ -410,7 +411,12 @@ block_log_wrapper::full_block_range_t block_log_wrapper::read_block_range_by_num
410411
if( not current_log )
411412
return result;
412413

413-
auto part_result = current_log->read_block_range_by_num( starting_block_num, count );
414+
FC_ASSERT( current_log->head(), "Empty or unopened log! ${io}", ("io", current_log->is_open() ) );
415+
416+
// block_log class is not prepared for over-the-head count parameter.
417+
uint32_t actual_count =
418+
std::min<uint32_t>( count, current_log->head()->get_block_num() - starting_block_num +1);
419+
auto part_result = current_log->read_block_range_by_num( starting_block_num, actual_count );
414420
size_t part_result_count = part_result.size();
415421
if( part_result_count == 0 )
416422
return result;
@@ -584,27 +590,13 @@ void block_log_wrapper::common_open_and_init( bool read_only, bool allow_splitti
584590
if( _block_log_split == LEGACY_SINGLE_FILE_BLOCK_LOG )
585591
{
586592
auto single_part_log = std::make_shared<block_log>( _app );
587-
internal_open_and_init( single_part_log,
588-
_open_args.data_dir / block_log_file_name_info::_legacy_file_name,
589-
read_only );
593+
auto log_file_path = _open_args.data_dir / block_log_file_name_info::_legacy_file_name;
594+
ilog("Opening monolithic block log file ${log_file_path}", (log_file_path));
595+
internal_open_and_init( single_part_log, log_file_path, read_only );
590596
_logs.push_back( single_part_log );
591597
return;
592598
}
593599

594-
uint32_t actual_tail_number_needed = 0;
595-
if( _open_args.load_snapshot || _open_args.force_replay )
596-
{
597-
// When hard replay is in cards, ignore current state head (will be overridden anyway),
598-
// and split as many parts as it's possible from the source log.
599-
actual_tail_number_needed = 1;
600-
}
601-
else if ( _open_args.replay && not _open_args.force_replay && state_head_block )
602-
{
603-
// For regular replay require all parts beginning from state head block to be present & opened.
604-
actual_tail_number_needed =
605-
get_part_number_for_block( state_head_block->get_block_num(), _max_blocks_in_log_file );
606-
}
607-
608600
// Any log file created on previous run?
609601
part_file_names_t part_file_names;
610602
look_for_part_files( part_file_names );
@@ -656,26 +648,42 @@ void block_log_wrapper::common_open_and_init( bool read_only, bool allow_splitti
656648

657649
// Make sure all parts required by configuration are there.
658650
uint32_t head_part_number = part_file_names.crbegin()->part_number;
659-
// Determine actual needed tail part number (if not set earlier).
660-
if( actual_tail_number_needed == 0 )
651+
// Determine actual needed tail part number.
652+
uint32_t actual_tail_number_needed = head_part_number;
653+
if( _open_args.load_snapshot || _open_args.force_replay ||
654+
head_part_number <= (unsigned int)_block_log_split )
655+
{
656+
// When hard replay is in cards, ignore current state head (will be overridden anyway),
657+
// and split as many parts as it's possible from the source log.
658+
// Do the same when block log is effectively not pruned (yet).
659+
actual_tail_number_needed = 1;
660+
}
661+
else
661662
{
662-
// Is block log not pruned or effectively not pruned (yet)?
663-
if( _block_log_split == MAX_FILES_OF_SPLIT_BLOCK_LOG ||
664-
head_part_number <= (unsigned int)_block_log_split )
663+
if ( _open_args.replay && not _open_args.force_replay && state_head_block )
665664
{
666-
// Expected tail part is obviously 1 - we need each part.
667-
actual_tail_number_needed = 1;
665+
// For regular replay require all parts beginning from state head block to be present & opened.
666+
actual_tail_number_needed =
667+
get_part_number_for_block( state_head_block->get_block_num(), _max_blocks_in_log_file );
668668
}
669-
else // pruned log here
669+
670+
if( head_part_number > (uint32_t)_block_log_split )
670671
{
671-
// not all parts are needed here, i.e. head_part_number > _block_log_split
672-
actual_tail_number_needed = head_part_number - _block_log_split;
672+
// Make sure that we require sufficient number of parts.
673+
actual_tail_number_needed =
674+
std::min<uint32_t>( actual_tail_number_needed, head_part_number - _block_log_split);
673675
}
674676
}
675677
force_parts_exist( head_part_number, actual_tail_number_needed, part_file_names,
676678
allow_splitting_monolithic_log, state_head_block );
677679

678680
// Open all needed parts.
681+
if( actual_tail_number_needed < head_part_number )
682+
ilog( "Opening split block log files numbered ${from} to ${to}",
683+
("from", actual_tail_number_needed)("to", head_part_number) );
684+
else
685+
ilog( "Opening single split block log file numbered ${to}", ("to", head_part_number) );
686+
679687
_logs.resize( head_part_number, block_log_ptr_t() );
680688
for( auto cit = part_file_names.cbegin(); cit != part_file_names.cend(); ++cit )
681689
{
@@ -766,16 +774,31 @@ void block_log_wrapper::internal_append( uint32_t block_num, append_t do_appendi
766774
}
767775
}
768776

769-
uint32_t block_log_wrapper::get_tail_block_num() const
777+
uint32_t block_log_wrapper::get_actual_tail_block_num() const
770778
{
771-
if( _block_log_split == LEGACY_SINGLE_FILE_BLOCK_LOG ||
772-
_block_log_split == MAX_FILES_OF_SPLIT_BLOCK_LOG )
779+
//FC_ASSERT( get_head_log(), "Using unopened block log?" );
780+
size_t log_number = _logs.size();
781+
FC_ASSERT( log_number > 0, "Uninitialized block_log_wrapper object?" );
782+
FC_ASSERT( std::atomic_load( &(_logs[log_number -1]) ), "Head log not initialized?" );
783+
784+
uint32_t block_num = 0;
785+
do
773786
{
774-
return 1;
787+
block_num = 1 + (log_number-1) * BLOCKS_IN_SPLIT_BLOCK_LOG_FILE;
788+
--log_number;
789+
} while ( log_number > 0 && std::atomic_load( &(_logs[log_number -1]) ) );
790+
791+
return block_num;
792+
793+
/*uint32_t block_num = 1;
794+
block_log_ptr_t another_log;
795+
for(; not another_log; block_num += BLOCKS_IN_SPLIT_BLOCK_LOG_FILE )
796+
{
797+
another_log = get_block_log_corresponding_to( block_num );
775798
}
799+
while( not another_log ); // Will finally happen, see initial assertion.
776800
777-
int oldest_available_part = std::max<int>( _logs.size() - _block_log_split, 1 );
778-
return (oldest_available_part -1) * BLOCKS_IN_SPLIT_BLOCK_LOG_FILE + 1;
801+
return block_num;*/
779802
}
780803

781804
} } //hive::chain

libraries/chain/include/hive/chain/block_log_wrapper.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ namespace hive { namespace chain {
174174
}
175175

176176
/// Returns the number of oldest stored block.
177-
uint32_t get_tail_block_num() const;
177+
uint32_t get_actual_tail_block_num() const;
178178

179179
void dispose_garbage( bool closing_time );
180180

libraries/plugins/chain/chain_plugin.cpp

+7-1
Original file line numberDiff line numberDiff line change
@@ -1398,7 +1398,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip
13981398
#endif
13991399
("rc-stats-report-type", bpo::value<string>()->default_value( "REGULAR" ), "Level of detail of daily RC stat reports: NONE, MINIMAL, REGULAR, FULL. Default REGULAR." )
14001400
("rc-stats-report-output", bpo::value<string>()->default_value( "ILOG" ), "Where to put daily RC stat reports: DLOG, ILOG, NOTIFY, LOG_NOTIFY. Default ILOG." )
1401-
("block-log-split", bpo::value<int>()->default_value( -1 ), "Whether the block log should be single file (-1), not used at all & keeping only head block in memory (0), or split into files each containing 1M blocks & keeping N full million latest blocks (N). Default -1." )
1401+
("block-log-split", bpo::value<int>()->default_value( 9999 ), "Whether the block log should be single file (-1), not used at all & keeping only head block in memory (0), or split into files each containing 1M blocks & keeping N full million latest blocks (N). Default 9999." )
14021402
;
14031403
cli.add_options()
14041404
("replay-blockchain", bpo::bool_switch()->default_value(false), "clear chain database and replay all blocks" )
@@ -1426,6 +1426,12 @@ void chain_plugin::plugin_initialize(const variables_map& options)
14261426
my.reset( new detail::chain_plugin_impl( get_app() ) );
14271427

14281428
my->block_log_split = options.at( "block-log-split" ).as< int >();
1429+
std::string block_storage_description( "single block in memory" );
1430+
if( my->block_log_split < 0 )
1431+
block_storage_description = "legacy monolithic file";
1432+
else if( my->block_log_split > 0 )
1433+
block_storage_description = "split into multiple files";
1434+
ilog("Block storage is configured to be ${bs}.", ("bs", block_storage_description));
14291435
my->block_storage = block_storage_i::create_storage( my->block_log_split, get_app(), my->thread_pool );
14301436
my->default_block_writer =
14311437
std::make_unique< sync_block_writer >( *( my->block_storage.get() ), my->db, get_app() );

tests/integration/artifacts_generation/test_artifacts_generation.sh

+21
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,10 @@ set -xeuo pipefail
55
HIVED_BINARY_PATH=${1}
66
BLOCK_LOG_UTIL_BINARY_PATH=${2}
77

8+
# Note that 100k blocks fit within a single split block log file
9+
# thus block_log.100k file can be used as both block_log & block_log_part.0001
810
SOURCE_100K_BLOCK_LOG_PATTERN="blockchain/block_log.100k"
11+
# Same story with artifacts file, block_log.artifacts can be used as block_log_part.0001.artifacts here
912
SOURCE_ARTIFACTS_V_1_0_PATTERN="blockchain/block_log.artifacts.v_1_0"
1013
SOURCE_ARTIFACTS_V_1_1_INTERRUPTED_PATTERN="blockchain/block_log.artifacts.v_1_1.interrupted_at_19313"
1114

@@ -15,19 +18,26 @@ test_generate_artifacts_from_scratch_by_hived() {
1518
local TEST_BLOCKCHAIN_DIR="$TEST_DATA_DIR/blockchain"
1619
mkdir -p "$TEST_BLOCKCHAIN_DIR"
1720
cp "$SOURCE_100K_BLOCK_LOG_PATTERN" "$TEST_BLOCKCHAIN_DIR/block_log"
21+
# Note that both block_log.artifacts & block_log_part.0001.artifacts are created below
22+
# due to a combination of hived's default split setting & auto-split feature
1823
"$HIVED_BINARY_PATH" -d "$TEST_DATA_DIR" --replay --exit-before-sync
1924
"$BLOCK_LOG_UTIL_BINARY_PATH" --get-block-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log" --do-full-artifacts-verification-match-check --header-only
25+
"$BLOCK_LOG_UTIL_BINARY_PATH" --get-block-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log_part.0001" --do-full-artifacts-verification-match-check --header-only
2026
}
2127

2228
test_generate_artifacts_override_old_file_by_hived() {
2329
echo "Test 2 - override old artifacts file and create a new one by hived."
2430
local TEST_DATA_DIR="override_old_file_and_generate_from_scratch/by_hived"
2531
local TEST_BLOCKCHAIN_DIR="$TEST_DATA_DIR/blockchain"
2632
mkdir -p "$TEST_BLOCKCHAIN_DIR"
33+
# Note that both block_log.artifacts & block_log_part.0001.artifacts are overridden below
34+
# due to a combination of hived's default split setting & auto-split feature
2735
cp "$SOURCE_100K_BLOCK_LOG_PATTERN" "$TEST_BLOCKCHAIN_DIR/block_log"
2836
cp "$SOURCE_ARTIFACTS_V_1_0_PATTERN" "$TEST_BLOCKCHAIN_DIR/block_log.artifacts"
37+
cp "$SOURCE_ARTIFACTS_V_1_0_PATTERN" "$TEST_BLOCKCHAIN_DIR/block_log_part.0001.artifacts"
2938
"$HIVED_BINARY_PATH" -d "$TEST_DATA_DIR" --replay --exit-before-sync
3039
"$BLOCK_LOG_UTIL_BINARY_PATH" --get-block-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log" --do-full-artifacts-verification-match-check --header-only
40+
"$BLOCK_LOG_UTIL_BINARY_PATH" --get-block-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log_part.0001" --do-full-artifacts-verification-match-check --header-only
3141
}
3242

3343
test_resume_artifacts_generating_process_by_hived() {
@@ -48,6 +58,9 @@ test_generate_artifacts_from_scratch_by_block_log_util() {
4858
cp "$SOURCE_100K_BLOCK_LOG_PATTERN" "$TEST_BLOCKCHAIN_DIR/block_log"
4959
"$BLOCK_LOG_UTIL_BINARY_PATH" --generate-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log"
5060
"$BLOCK_LOG_UTIL_BINARY_PATH" --get-block-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log" --do-full-artifacts-verification-match-check --header-only
61+
cp "$SOURCE_100K_BLOCK_LOG_PATTERN" "$TEST_BLOCKCHAIN_DIR/block_log_part.0001"
62+
"$BLOCK_LOG_UTIL_BINARY_PATH" --generate-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log_part.0001"
63+
"$BLOCK_LOG_UTIL_BINARY_PATH" --get-block-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log_part.0001" --do-full-artifacts-verification-match-check --header-only
5164
}
5265

5366
test_generate_artifacts_override_old_file_by_block_log_util() {
@@ -58,6 +71,10 @@ test_generate_artifacts_override_old_file_by_block_log_util() {
5871
cp "$SOURCE_ARTIFACTS_V_1_0_PATTERN" "$TEST_BLOCKCHAIN_DIR/block_log.artifacts"
5972
"$BLOCK_LOG_UTIL_BINARY_PATH" --generate-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log"
6073
"$BLOCK_LOG_UTIL_BINARY_PATH" --get-block-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log" --do-full-artifacts-verification-match-check --header-only
74+
cp "$SOURCE_100K_BLOCK_LOG_PATTERN" "$TEST_BLOCKCHAIN_DIR/block_log_part.0001"
75+
cp "$SOURCE_ARTIFACTS_V_1_0_PATTERN" "$TEST_BLOCKCHAIN_DIR/block_log_part.0001.artifacts"
76+
"$BLOCK_LOG_UTIL_BINARY_PATH" --generate-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log_part.0001"
77+
"$BLOCK_LOG_UTIL_BINARY_PATH" --get-block-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log_part.0001" --do-full-artifacts-verification-match-check --header-only
6178
}
6279

6380
test_resume_artifacts_generating_process_by_block_log_util() {
@@ -68,6 +85,10 @@ test_resume_artifacts_generating_process_by_block_log_util() {
6885
cp "$SOURCE_ARTIFACTS_V_1_1_INTERRUPTED_PATTERN" "$TEST_BLOCKCHAIN_DIR/block_log.artifacts"
6986
"$BLOCK_LOG_UTIL_BINARY_PATH" --generate-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log"
7087
"$BLOCK_LOG_UTIL_BINARY_PATH" --get-block-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log" --do-full-artifacts-verification-match-check --header-only
88+
cp "$SOURCE_100K_BLOCK_LOG_PATTERN" "$TEST_BLOCKCHAIN_DIR/block_log_part.0001"
89+
cp "$SOURCE_ARTIFACTS_V_1_1_INTERRUPTED_PATTERN" "$TEST_BLOCKCHAIN_DIR/block_log_part.0001.artifacts"
90+
"$BLOCK_LOG_UTIL_BINARY_PATH" --generate-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log_part.0001"
91+
"$BLOCK_LOG_UTIL_BINARY_PATH" --get-block-artifacts --block-log "$TEST_BLOCKCHAIN_DIR/block_log_part.0001" --do-full-artifacts-verification-match-check --header-only
7192
}
7293

7394
test_generate_artifacts_from_scratch_by_hived

tests/python/api_tests/python_patterns_tests/cli_wallet_tests/output_formatter_tests/conftest.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,12 @@ def node(request):
1212
if request.node.get_closest_marker("replayed_node") is None:
1313
node = tt.InitNode()
1414
node.config.plugin.append("market_history_api")
15+
node.config.block_log_split = -1
1516
node.run()
1617
return node
1718
api_node = tt.FullApiNode()
18-
api_node.run(replay_from=Path(__file__).parent.joinpath("block_log/block_log"), wait_for_live=False)
19+
api_node.config.block_log_split = -1
20+
api_node.run(replay_from=Path(__file__).parent.joinpath("block_log"), wait_for_live=False)
1921
return api_node
2022

2123

tests/python/conftest.py

+1
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@ def __create_init_node() -> tt.InitNode:
5353
init_node.config.plugin.append(
5454
"condenser_api"
5555
) # FIXME eliminate condenser_api usage from other tests than this API specific
56+
init_node.config.block_log_split = -1
5657
init_node.run()
5758
return init_node
5859

tests/python/functional/beem_tests/test_assets.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,13 @@
1616
@pytest.fixture()
1717
def node(chain_id, skeleton_key):
1818
block_log_directory = Path(__file__).parent / "block_log_mirrornet_1k"
19-
block_log = tt.BlockLog(block_log_directory / "block_log")
19+
block_log = tt.BlockLog(block_log_directory, "auto")
2020

2121
init_node = tt.InitNode()
2222
init_node.config.private_key = skeleton_key
2323
init_node.config.plugin.append("account_history_api")
2424
init_node.config.plugin.append("condenser_api")
25+
init_node.config.block_log_split = -1
2526

2627
init_node.run(
2728
time_control=tt.StartTimeControl(start_time="head_block_time"),

tests/python/functional/colony/conftest.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,10 @@
77

88

99
@pytest.fixture()
10-
def alternate_chain_spec(block_log) -> tt.AlternateChainSpecs:
11-
return tt.AlternateChainSpecs.parse_file(block_log.path.parent / tt.AlternateChainSpecs.FILENAME)
10+
def alternate_chain_spec(block_log_single_sign) -> tt.AlternateChainSpecs:
11+
return tt.AlternateChainSpecs.parse_file(block_log_single_sign.path / tt.AlternateChainSpecs.FILENAME)
1212

1313

1414
@pytest.fixture()
15-
def block_log() -> tt.BlockLog:
16-
return tt.BlockLog(path=UNIVERSAL_BLOCK_LOGS_PATH / "block_log_single_sign" / "block_log")
15+
def block_log_single_sign() -> tt.BlockLog:
16+
return tt.BlockLog(path=UNIVERSAL_BLOCK_LOGS_PATH / "block_log_single_sign", mode="auto")

0 commit comments

Comments
 (0)