diff --git a/src/functions/ducklake_add_data_files.cpp b/src/functions/ducklake_add_data_files.cpp index 13f8ace3b15..f23d65ee2f9 100644 --- a/src/functions/ducklake_add_data_files.cpp +++ b/src/functions/ducklake_add_data_files.cpp @@ -168,7 +168,8 @@ struct DuckLakeFileProcessor { }; void DuckLakeFileProcessor::ReadParquetFullMetadata(const string &glob, vector &written_files) { - auto result = transaction.Query(StringUtil::Format(R"( + auto result = result_or_throw( + transaction.Query(StringUtil::Format(R"( SELECT list_transform(parquet_file_metadata, lambda x: struct_pack( file_name := x.file_name, @@ -198,10 +199,8 @@ SELECT )) AS parquet_schema FROM parquet_full_metadata(%s) )", - SQLString(glob))); - if (result->HasError()) { - result->GetErrorObject().Throw("Failed to add data files to DuckLake: "); - } + SQLString(glob))), + "Failed to add data files to DuckLake: "); for (auto &row : *result) { auto &chunk = row.GetChunk(); diff --git a/src/functions/ducklake_flush_inlined_data.cpp b/src/functions/ducklake_flush_inlined_data.cpp index 436f511820c..14599df16cc 100644 --- a/src/functions/ducklake_flush_inlined_data.cpp +++ b/src/functions/ducklake_flush_inlined_data.cpp @@ -429,7 +429,8 @@ static void FlushInlinedFileDeletions(ClientContext &context, DuckLakeCatalog &c } // Query the inlined deletions with file paths and existing delete file info - auto deletions_result = transaction.Query(snapshot, StringUtil::Format(R"( + auto deletions_result = result_or_throw( + transaction.Query(snapshot, StringUtil::Format(R"( SELECT del.file_id, data.path, data.path_is_relative, del.row_id, del.begin_snapshot, existing_del.delete_file_id, existing_del.path as del_path, existing_del.path_is_relative as del_path_is_relative, existing_del.begin_snapshot as del_begin_snapshot, existing_del.encryption_key as del_encryption_key, @@ -442,10 +443,8 @@ LEFT JOIN ( AND ({SNAPSHOT_ID} < end_snapshot OR end_snapshot IS NULL) ) existing_del ON del.file_id = existing_del.data_file_id )", - inlined_table_name, table_id.index)); - if (deletions_result->HasError()) { - deletions_result->GetErrorObject().Throw("Failed to query inlined file deletions for flush: "); - } + inlined_table_name, table_id.index)), + "Failed to query inlined file deletions for flush: "); unordered_map files_to_flush; diff --git a/src/include/storage/ducklake_transaction.hpp b/src/include/storage/ducklake_transaction.hpp index 015130a411d..cfc2c96326e 100644 --- a/src/include/storage/ducklake_transaction.hpp +++ b/src/include/storage/ducklake_transaction.hpp @@ -12,9 +12,12 @@ #include "common/ducklake_data_file.hpp" #include "common/ducklake_snapshot.hpp" #include "duckdb/common/case_insensitive_map.hpp" +#include "duckdb/common/exception.hpp" #include "duckdb/common/types/value_map.hpp" #include "duckdb/main/connection.hpp" +#include "duckdb/main/query_result.hpp" #include "duckdb/transaction/transaction.hpp" +#include #include "storage/ducklake_catalog_set.hpp" #include "storage/ducklake_inlined_data.hpp" #include "storage/ducklake_metadata_manager.hpp" @@ -41,6 +44,36 @@ struct DuckLakeCommitState; class DuckLakeFieldId; class LocalTableChangeIterationHelper; +inline unique_ptr result_or_throw(unique_ptr result, const string &err) { + if (result->HasError()) { + result->GetErrorObject().Throw(err); + } + return result; +} + +inline void execute_or_throw(unique_ptr result, const string &err) { + (void)result_or_throw(std::move(result), err); +} + +template +T query_scalar_or_throw(unique_ptr result, const string &err) { + auto checked_result = result_or_throw(std::move(result), err); + auto chunk = checked_result->Fetch(); + if (!chunk || chunk->size() == 0) { + throw InvalidInputException("Expected scalar result"); + } + return chunk->GetValue(0, 0).template GetValue(); +} + +template +T query_scalar_or_zero(unique_ptr result, const string &err) { + auto checked_result = result_or_throw(std::move(result), err); + for (auto &row : *checked_result) { + return row.template GetValue(0); + } + return T {}; +} + struct FlushedInlinedTableInfo { DuckLakeInlinedTableInfo inlined_table; idx_t flush_snapshot_id; diff --git a/src/storage/ducklake_initializer.cpp b/src/storage/ducklake_initializer.cpp index 3b930baceb5..6b4eee69f00 100644 --- a/src/storage/ducklake_initializer.cpp +++ b/src/storage/ducklake_initializer.cpp @@ -61,15 +61,13 @@ string DuckLakeInitializer::GetAttachOptions() { void DuckLakeInitializer::Initialize() { auto &transaction = DuckLakeTransaction::Get(context, catalog); // attach the metadata database - auto result = transaction.Query("ATTACH OR REPLACE {METADATA_PATH} AS {METADATA_CATALOG_NAME_IDENTIFIER}" + - GetAttachOptions()); - if (result->HasError()) { - auto &error_obj = result->GetErrorObject(); - error_obj.Throw("Failed to attach DuckLake MetaData \"" + catalog.MetadataDatabaseName() + "\" at path + \"" + - catalog.MetadataPath() + "\""); - } + execute_or_throw( + transaction.Query("ATTACH OR REPLACE {METADATA_PATH} AS {METADATA_CATALOG_NAME_IDENTIFIER}" + + GetAttachOptions()), + "Failed to attach DuckLake MetaData \"" + catalog.MetadataDatabaseName() + "\" at path + \"" + + catalog.MetadataPath() + "\""); // explicitly load all secrets - work-around to secret initialization bug - transaction.Query("FROM duckdb_secrets()"); + execute_or_throw(transaction.Query("FROM duckdb_secrets()"), "Failed to load DuckDB secrets"); bool has_explicit_schema = !options.metadata_schema.empty(); if (options.metadata_schema.empty()) { @@ -94,7 +92,7 @@ void DuckLakeInitializer::Initialize() { // directly query a known ducklake metadata table to avoid scanning all attached catalogs via duckdb_tables() // this prevents a corrupted ducklake catalog from blocking initialization of unrelated ducklake databases // FIXME: verify that all ducklake tables are in the correct format - result = transaction.Query("SELECT NULL FROM {METADATA_CATALOG}.ducklake_metadata LIMIT 1"); + auto result = transaction.Query("SELECT NULL FROM {METADATA_CATALOG}.ducklake_metadata LIMIT 1"); if (result->HasError()) { auto &error_obj = result->GetErrorObject(); if (error_obj.Type() == ExceptionType::CATALOG) { diff --git a/src/storage/ducklake_inlined_data_reader.cpp b/src/storage/ducklake_inlined_data_reader.cpp index e87f908c6c3..68eaa13022a 100644 --- a/src/storage/ducklake_inlined_data_reader.cpp +++ b/src/storage/ducklake_inlined_data_reader.cpp @@ -105,25 +105,26 @@ bool DuckLakeInlinedDataReader::TryInitializeScan(ClientContext &context, Global virtual_columns.push_back(InlinedVirtualColumn::NONE); } } - unique_ptr query_result; + unique_ptr scan_result; switch (read_info.scan_type) { case DuckLakeScanType::SCAN_TABLE: - query_result = metadata_manager.ReadInlinedData(read_info.snapshot, table_name, columns_to_read); + scan_result = metadata_manager.ReadInlinedData(read_info.snapshot, table_name, columns_to_read); break; case DuckLakeScanType::SCAN_INSERTIONS: - query_result = metadata_manager.ReadInlinedDataInsertions(*read_info.start_snapshot, read_info.snapshot, - table_name, columns_to_read); + scan_result = metadata_manager.ReadInlinedDataInsertions(*read_info.start_snapshot, read_info.snapshot, + table_name, columns_to_read); break; case DuckLakeScanType::SCAN_DELETIONS: - query_result = metadata_manager.ReadInlinedDataDeletions(*read_info.start_snapshot, read_info.snapshot, - table_name, columns_to_read); + scan_result = metadata_manager.ReadInlinedDataDeletions(*read_info.start_snapshot, read_info.snapshot, + table_name, columns_to_read); break; case DuckLakeScanType::SCAN_FOR_FLUSH: - query_result = metadata_manager.ReadAllInlinedDataForFlush(read_info.snapshot, table_name, columns_to_read); + scan_result = metadata_manager.ReadAllInlinedDataForFlush(read_info.snapshot, table_name, columns_to_read); break; default: throw InternalException("Unknown DuckLake scan type"); } + auto query_result = result_or_throw(std::move(scan_result), "Failed to read inlined data from DuckLake: "); data = metadata_manager.TransformInlinedData(*query_result, expected_types); if (!virtual_columns.empty()) { auto scan_types = data->data->Types(); diff --git a/src/storage/ducklake_metadata_manager.cpp b/src/storage/ducklake_metadata_manager.cpp index c752b71284e..ddcf6ad3ca1 100644 --- a/src/storage/ducklake_metadata_manager.cpp +++ b/src/storage/ducklake_metadata_manager.cpp @@ -458,7 +458,8 @@ SELECT begin_snapshot FROM {METADATA_CATALOG}.ducklake_table WHERE table_id = {TABLE_ID})"; query = StringUtil::Replace(query, "{TABLE_ID}", to_string(table_id.index)).c_str(); - auto result = transaction.Query(query); + auto result = result_or_throw(transaction.Query(query), + "Failed to query begin snapshot for DuckLake table: "); for (auto &row : *result) { return row.GetValue(0); } @@ -472,7 +473,8 @@ FROM {METADATA_CATALOG}.ducklake_schema_versions WHERE table_id = {TABLE_ID} AND schema_version = {SCHEMA_VERSION})"; query = StringUtil::Replace(query, "{TABLE_ID}", to_string(table_id.index)); query = StringUtil::Replace(query, "{SCHEMA_VERSION}", to_string(schema_version)); - auto result = transaction.Query(query); + auto result = result_or_throw(transaction.Query(query), + "Failed to query begin snapshot for DuckLake schema version: "); for (auto &row : *result) { return row.GetValue(0); } @@ -519,11 +521,9 @@ SELECT %s)", inlined_deletion_subquery); query = StringUtil::Replace(query, "{TABLE_ID}", to_string(table_id.index)); - auto result = transaction.Query(snapshot, query); - for (auto &row : *result) { - return row.GetValue(0); - } - return 0; + return query_scalar_or_zero( + transaction.Query(snapshot, query), + "Failed to query net data file row count for DuckLake table: "); } idx_t DuckLakeMetadataManager::GetNetInlinedRowCount(const string &inlined_table_name, DuckLakeSnapshot snapshot) { @@ -533,11 +533,9 @@ FROM {METADATA_CATALOG}.%s WHERE {SNAPSHOT_ID} >= begin_snapshot AND ({SNAPSHOT_ID} < end_snapshot OR end_snapshot IS NULL))", inlined_table_name); - auto result = transaction.Query(snapshot, query); - for (auto &row : *result) { - return row.GetValue(0); - } - return 0; + return query_scalar_or_zero( + transaction.Query(snapshot, query), + "Failed to query net inlined row count for DuckLake table: "); } DuckLakeCatalogInfo DuckLakeMetadataManager::GetCatalogForSnapshot(DuckLakeSnapshot snapshot) { @@ -545,14 +543,13 @@ DuckLakeCatalogInfo DuckLakeMetadataManager::GetCatalogForSnapshot(DuckLakeSnaps auto &base_data_path = ducklake_catalog.DataPath(); DuckLakeCatalogInfo catalog; // load the schema information - auto result = transaction.Query(snapshot, R"( + auto result = result_or_throw( + transaction.Query(snapshot, R"( SELECT schema_id, schema_uuid::VARCHAR, schema_name, path, path_is_relative FROM {METADATA_CATALOG}.ducklake_schema WHERE {SNAPSHOT_ID} >= begin_snapshot AND ({SNAPSHOT_ID} < end_snapshot OR end_snapshot IS NULL) -)"); - if (result->HasError()) { - result->GetErrorObject().Throw("Failed to get schema information from DuckLake: "); - } +)"), + "Failed to get schema information from DuckLake: "); map schema_map; for (auto &row : *result) { DuckLakeSchemaInfo schema; @@ -584,7 +581,8 @@ WHERE {SNAPSHOT_ID} >= begin_snapshot AND ({SNAPSHOT_ID} < end_snapshot OR end_s }; // load the table information - result = transaction.Query(snapshot, StringUtil::Format(R"( + result = result_or_throw( + transaction.Query(snapshot, StringUtil::Format(R"( SELECT schema_id, tbl.table_id, table_uuid::VARCHAR, table_name, ( SELECT %s @@ -613,10 +611,8 @@ ORDER BY table_id, parent_column NULLS FIRST, column_order )", ListAggregation(TAG_FIELDS), ListAggregation(INLINED_DATA_TABLES_FIELDS), - ListAggregation(TAG_FIELDS))); - if (result->HasError()) { - result->GetErrorObject().Throw("Failed to get table information from DuckLake: "); - } + ListAggregation(TAG_FIELDS))), + "Failed to get table information from DuckLake: "); const idx_t COLUMN_INDEX_START = 8; auto &tables = catalog.tables; for (auto &row : *result) { @@ -700,7 +696,8 @@ ORDER BY table_id, parent_column NULLS FIRST, column_order } } // load view information - result = transaction.Query(snapshot, StringUtil::Format(R"( + result = result_or_throw( + transaction.Query(snapshot, StringUtil::Format(R"( SELECT view_id, view_uuid, schema_id, view_name, dialect, sql, column_aliases, ( SELECT %s @@ -711,10 +708,8 @@ SELECT view_id, view_uuid, schema_id, view_name, dialect, sql, column_aliases, FROM {METADATA_CATALOG}.ducklake_view view WHERE {SNAPSHOT_ID} >= begin_snapshot AND ({SNAPSHOT_ID} < view.end_snapshot OR view.end_snapshot IS NULL) )", - ListAggregation(TAG_FIELDS))); - if (result->HasError()) { - result->GetErrorObject().Throw("Failed to get partition information from DuckLake: "); - } + ListAggregation(TAG_FIELDS))), + "Failed to get partition information from DuckLake: "); auto &views = catalog.views; for (auto &row : *result) { DuckLakeViewInfo view_info; @@ -749,7 +744,8 @@ WHERE {SNAPSHOT_ID} >= begin_snapshot AND ({SNAPSHOT_ID} < view.end_snapshot OR {"dialect", "dialect"}, {"sql", "sql"}, {"type", "type"}, {"params", macro_param_query}}; // load macro information - result = transaction.Query(snapshot, StringUtil::Format(R"( + result = result_or_throw( + transaction.Query(snapshot, StringUtil::Format(R"( SELECT schema_id, ducklake_macro.macro_id, macro_name, ( SELECT %s FROM {METADATA_CATALOG}.ducklake_macro_impl @@ -758,10 +754,8 @@ SELECT schema_id, ducklake_macro.macro_id, macro_name, ( FROM {METADATA_CATALOG}.ducklake_macro WHERE {SNAPSHOT_ID} >= ducklake_macro.begin_snapshot AND ({SNAPSHOT_ID} < ducklake_macro.end_snapshot OR ducklake_macro.end_snapshot IS NULL) )", - ListAggregation(MACRO_IMPL_FIELDS))); - if (result->HasError()) { - result->GetErrorObject().Throw("Failed to get macro information from DuckLake: "); - } + ListAggregation(MACRO_IMPL_FIELDS))), + "Failed to get macro information from DuckLake: "); auto ¯os = catalog.macros; for (auto &row : *result) { DuckLakeMacroInfo macro_info; @@ -774,16 +768,15 @@ WHERE {SNAPSHOT_ID} >= ducklake_macro.begin_snapshot AND ({SNAPSHOT_ID} < duckl } // load partition information - result = transaction.Query(snapshot, R"( + result = result_or_throw( + transaction.Query(snapshot, R"( SELECT partition_id, part.table_id, partition_key_index, column_id, transform FROM {METADATA_CATALOG}.ducklake_partition_info part JOIN {METADATA_CATALOG}.ducklake_partition_column part_col USING (partition_id) WHERE {SNAPSHOT_ID} >= part.begin_snapshot AND ({SNAPSHOT_ID} < part.end_snapshot OR part.end_snapshot IS NULL) ORDER BY part.table_id, partition_id, partition_key_index -)"); - if (result->HasError()) { - result->GetErrorObject().Throw("Failed to get partition information from DuckLake: "); - } +)"), + "Failed to get partition information from DuckLake: "); auto &partitions = catalog.partitions; for (auto &row : *result) { auto partition_id = row.GetValue(0); @@ -805,16 +798,15 @@ ORDER BY part.table_id, partition_id, partition_key_index } // load sort information - result = transaction.Query(snapshot, R"( + result = result_or_throw( + transaction.Query(snapshot, R"( SELECT sort.sort_id, sort.table_id, sort_expr.sort_key_index, sort_expr.expression, sort_expr.dialect, sort_expr.sort_direction, sort_expr.null_order FROM {METADATA_CATALOG}.ducklake_sort_info sort JOIN {METADATA_CATALOG}.ducklake_sort_expression sort_expr USING (sort_id) WHERE {SNAPSHOT_ID} >= sort.begin_snapshot AND ({SNAPSHOT_ID} < sort.end_snapshot OR sort.end_snapshot IS NULL) ORDER BY sort.table_id, sort.sort_id, sort_expr.sort_key_index -)"); - if (result->HasError()) { - result->GetErrorObject().Throw("Failed to get sort information from DuckLake: "); - } +)"), + "Failed to get sort information from DuckLake: "); auto &sorts = catalog.sorts; for (auto &row : *result) { auto sort_id = row.GetValue(0); @@ -2493,7 +2485,8 @@ WHERE table_id = %d AND schema_version=( WHERE table_id=%d );)", entry.table_id.index, entry.table_id.index); - auto result = transaction.Query(commit_snapshot, query); + auto result = result_or_throw(transaction.Query(commit_snapshot, query), + "Failed to query DuckLake inlined table name: "); for (auto &row : *result) { inlined_table_name = row.GetValue(0); insert_inlined_table_name_cache[entry.table_id.index] = inlined_table_name; @@ -2862,11 +2855,13 @@ string DuckLakeMetadataManager::GetPathForSchema(SchemaIndex schema_id, return FromRelativePath(path); } } - auto result = transaction.Query(StringUtil::Format(R"( + auto result = result_or_throw( + transaction.Query(StringUtil::Format(R"( SELECT path, path_is_relative FROM {METADATA_CATALOG}.ducklake_schema WHERE schema_id = %d;)", - schema_id.index)); + schema_id.index)), + "Failed to query DuckLake schema path: "); for (auto &row : *result) { DuckLakePath path; path.path = row.GetValue(0); @@ -2899,11 +2894,13 @@ string DuckLakeMetadataManager::GetPathForTable(TableIndex table_id, const vecto for (const auto &new_table : new_tables) { if (new_table.id == table_id) { // This is a table not yet in the catalog - auto result = transaction.Query(StringUtil::Format(R"( + auto result = result_or_throw( + transaction.Query(StringUtil::Format(R"( SELECT s.path, s.path_is_relative FROM {METADATA_CATALOG}.ducklake_schema s WHERE schema_id = %d;)", - new_table.schema_id.index)); + new_table.schema_id.index)), + "Failed to query DuckLake schema path for table: "); for (auto &row : *result) { DuckLakePath schema_path; schema_path.path = row.GetValue(0); @@ -2930,7 +2927,8 @@ WHERE schema_id = %d;)", } } } - auto result = transaction.Query(StringUtil::Format(R"( + auto result = result_or_throw( + transaction.Query(StringUtil::Format(R"( SELECT s.path AS s_path, s.path_is_relative AS s_path_is_relative, @@ -2940,7 +2938,8 @@ FROM {METADATA_CATALOG}.ducklake_schema s JOIN {METADATA_CATALOG}.ducklake_table t USING (schema_id) WHERE table_id = %d;)", - table_id.index)); + table_id.index)), + "Failed to query DuckLake table path: "); for (auto &row : *result) { DuckLakePath schema_path; schema_path.path = row.GetValue(0); @@ -3472,14 +3471,16 @@ vector DuckLakeMetadataManager::GetColumnMappings(opt if (start_from.IsValid()) { filter = "WHERE mapping_id >= " + to_string(start_from.GetIndex()); } - auto result = transaction.Query(StringUtil::Format(R"( + auto result = result_or_throw( + transaction.Query(StringUtil::Format(R"( SELECT mapping_id, table_id, type, column_id, source_name, target_field_id, parent_column, is_partition FROM {METADATA_CATALOG}.ducklake_column_mapping JOIN {METADATA_CATALOG}.ducklake_name_mapping USING (mapping_id) %s ORDER BY mapping_id, parent_column NULLS FIRST )", - filter)); + filter)), + "Failed to query DuckLake column mappings: "); vector column_maps; for (auto &row : *result) { MappingIndex mapping_id(row.GetValue(0)); @@ -3673,10 +3674,8 @@ string DuckLakeMetadataManager::GetLatestSnapshotQuery() const { } unique_ptr DuckLakeMetadataManager::GetSnapshot() { - auto result = transaction.Query(GetLatestSnapshotQuery()); - if (result->HasError()) { - result->GetErrorObject().Throw("Failed to query most recent snapshot for DuckLake: "); - } + auto result = result_or_throw(transaction.Query(GetLatestSnapshotQuery()), + "Failed to query most recent snapshot for DuckLake: "); auto snapshot = TryGetSnapshotInternal(*result); if (!snapshot) { throw InvalidInputException("No snapshot found in DuckLake"); @@ -3687,17 +3686,19 @@ unique_ptr DuckLakeMetadataManager::GetSnapshot() { unique_ptr DuckLakeMetadataManager::GetSnapshot(BoundAtClause &at_clause, SnapshotBound bound) { auto &unit = at_clause.Unit(); auto &val = at_clause.GetValue(); - unique_ptr result; const string timestamp_order = bound == SnapshotBound::LOWER_BOUND ? "ASC" : "DESC"; const string timestamp_condition = bound == SnapshotBound::LOWER_BOUND ? ">" : "<"; + auto err_text = StringUtil::Format( + "Failed to query snapshot at %s %s for DuckLake: ", StringUtil::Lower(unit), val.ToString()); + unique_ptr snapshot_result; if (StringUtil::CIEquals(unit, "version")) { - result = transaction.Query(StringUtil::Format(R"( + snapshot_result = transaction.Query(StringUtil::Format(R"( SELECT snapshot_id, schema_version, next_catalog_id, next_file_id FROM {METADATA_CATALOG}.ducklake_snapshot WHERE snapshot_id = %llu;)", - val.DefaultCastAs(LogicalType::UBIGINT).GetValue())); + val.DefaultCastAs(LogicalType::UBIGINT).GetValue())); } else if (StringUtil::CIEquals(unit, "timestamp")) { - result = transaction.Query(StringUtil::Format( + snapshot_result = transaction.Query(StringUtil::Format( R"( SELECT snapshot_id, schema_version, next_catalog_id, next_file_id FROM {METADATA_CATALOG}.ducklake_snapshot @@ -3711,10 +3712,7 @@ WHERE snapshot_id = ( } else { throw InvalidInputException("Unsupported AT clause unit - %s", unit); } - if (result->HasError()) { - result->GetErrorObject().Throw(StringUtil::Format( - "Failed to query snapshot at %s %s for DuckLake: ", StringUtil::Lower(unit), val.ToString())); - } + auto result = result_or_throw(std::move(snapshot_result), err_text); auto snapshot = TryGetSnapshotInternal(*result); if (!snapshot) { throw InvalidInputException("No snapshot found at %s %s", StringUtil::Lower(unit), val.ToString()); @@ -4213,15 +4211,14 @@ WHERE data_file_id IN (%s); } idx_t DuckLakeMetadataManager::GetNextColumnId(TableIndex table_id) { - auto result = transaction.Query(StringUtil::Format(R"( + auto result = result_or_throw( + transaction.Query(StringUtil::Format(R"( SELECT MAX(column_id) FROM {METADATA_CATALOG}.ducklake_column WHERE table_id=%d )", - table_id.index)); - if (result->HasError()) { - result->GetErrorObject().Throw("Failed to get next column id in DuckLake: "); - } + table_id.index)), + "Failed to get next column id in DuckLake: "); for (auto &row : *result) { if (row.IsNull(0)) { break; @@ -4348,7 +4345,8 @@ WHERE snapshot_id IN (%s); } } // get a list of tables that are no longer required after these deletions - result = transaction.Query(R"( + result = result_or_throw( + transaction.Query(R"( SELECT table_id FROM {METADATA_CATALOG}.ducklake_table t WHERE end_snapshot IS NOT NULL AND NOT EXISTS ( @@ -4363,7 +4361,8 @@ AND NOT EXISTS ( AND (t2.end_snapshot IS NULL OR EXISTS (SELECT snapshot_id FROM {METADATA_CATALOG}.ducklake_snapshot WHERE snapshot_id >= begin_snapshot AND snapshot_id < t2.end_snapshot)) - );)"); + );)"), + "Failed to query DuckLake tables for snapshot cleanup: "); vector cleanup_tables; for (auto &row : *result) { @@ -4383,7 +4382,8 @@ AND NOT EXISTS ( table_id_filter = StringUtil::Format("table_id IN (%s) OR", deleted_table_ids); } - result = transaction.Query(StringUtil::Format(R"( + result = result_or_throw( + transaction.Query(StringUtil::Format(R"( SELECT data_file_id, table_id, path, path_is_relative FROM {METADATA_CATALOG}.ducklake_data_file WHERE %s (end_snapshot IS NOT NULL AND NOT EXISTS( @@ -4391,7 +4391,8 @@ WHERE %s (end_snapshot IS NOT NULL AND NOT EXISTS( FROM {METADATA_CATALOG}.ducklake_snapshot WHERE snapshot_id >= begin_snapshot AND snapshot_id < end_snapshot ));)", - table_id_filter)); + table_id_filter)), + "Failed to query DuckLake data files for snapshot cleanup: "); vector cleanup_files; for (auto &row : *result) { DuckLakeFileForCleanup info; @@ -4451,7 +4452,8 @@ VALUES %s; file_id_filter = StringUtil::Format("data_file_id IN (%s) OR", deleted_file_ids); } - result = transaction.Query(StringUtil::Format(R"( + result = result_or_throw( + transaction.Query(StringUtil::Format(R"( SELECT delete_file_id, table_id, path, path_is_relative FROM {METADATA_CATALOG}.ducklake_delete_file WHERE %s %s (end_snapshot IS NOT NULL AND NOT EXISTS( @@ -4459,7 +4461,8 @@ WHERE %s %s (end_snapshot IS NOT NULL AND NOT EXISTS( FROM {METADATA_CATALOG}.ducklake_snapshot WHERE snapshot_id >= begin_snapshot AND snapshot_id < end_snapshot ));)", - table_id_filter, file_id_filter)); + table_id_filter, file_id_filter)), + "Failed to query DuckLake delete files for snapshot cleanup: "); vector cleanup_deletes; for (auto &row : *result) { DuckLakeFileForCleanup info; @@ -4619,7 +4622,8 @@ string DuckLakeMetadataManager::InsertNewSchema(const DuckLakeSnapshot &snapshot vector DuckLakeMetadataManager::GetTableSizes(DuckLakeSnapshot snapshot) { vector table_sizes; - auto result = transaction.Query(snapshot, R"( + auto result = result_or_throw( + transaction.Query(snapshot, R"( SELECT schema_id, table_id, table_name, table_uuid, data_file_info.file_count AS data_file_count, @@ -4636,7 +4640,8 @@ FROM {METADATA_CATALOG}.ducklake_table tbl, LATERAL ( WHERE df.table_id = tbl.table_id AND {SNAPSHOT_ID} >= begin_snapshot AND ({SNAPSHOT_ID} < end_snapshot OR end_snapshot IS NULL) ) delete_file_info WHERE {SNAPSHOT_ID} >= begin_snapshot AND ({SNAPSHOT_ID} < end_snapshot OR end_snapshot IS NULL) -)"); +)"), + "Failed to query DuckLake table sizes: "); for (auto &row : *result) { DuckLakeTableSizeInfo table_size; table_size.schema_id = SchemaIndex(row.GetValue(0)); @@ -4680,29 +4685,31 @@ void DuckLakeMetadataManager::SetConfigOption(const DuckLakeConfigOption &option scope_id = "NULL"; scope_filter = "scope IS NULL"; } - auto result = transaction.Query(StringUtil::Format(R"( + auto count = query_scalar_or_throw( + transaction.Query(StringUtil::Format(R"( SELECT COUNT(*) FROM {METADATA_CATALOG}.ducklake_metadata WHERE key = %s AND %s )", - SQLString(option_key), scope_filter)); + SQLString(option_key), scope_filter)), + "Failed to query config option in DuckLake: "); - auto count = result->Fetch()->GetValue(0, 0).GetValue(); if (count == 0) { // option does not yet exist - insert the value - result = transaction.Query(StringUtil::Format(R"( + execute_or_throw( + transaction.Query(StringUtil::Format(R"( INSERT INTO {METADATA_CATALOG}.ducklake_metadata VALUES (%s, %s, %s, %s) )", - SQLString(option_key), SQLString(option_value), scope, scope_id)); + SQLString(option_key), SQLString(option_value), scope, scope_id)), + "Failed to insert config option in DuckLake: "); } else { // option already exists - update it - result = transaction.Query(StringUtil::Format(R"( + execute_or_throw( + transaction.Query(StringUtil::Format(R"( UPDATE {METADATA_CATALOG}.ducklake_metadata SET value=%s WHERE key=%s AND %s )", - SQLString(option_value), SQLString(option_key), scope_filter)); - } - if (result->HasError()) { - result->GetErrorObject().Throw("Failed to insert config option in DuckLake: "); + SQLString(option_value), SQLString(option_key), scope_filter)), + "Failed to insert config option in DuckLake: "); } }