diff --git a/.github/workflows/MainDistributionPipeline.yml b/.github/workflows/MainDistributionPipeline.yml index 9aa00bfa..a44f45c5 100644 --- a/.github/workflows/MainDistributionPipeline.yml +++ b/.github/workflows/MainDistributionPipeline.yml @@ -17,7 +17,7 @@ jobs: uses: duckdb/extension-ci-tools/.github/workflows/_extension_distribution.yml@main with: extension_name: iceberg - duckdb_version: v1.3.2 + duckdb_version: main ci_tools_version: main exclude_archs: 'windows_amd64_mingw' extra_toolchains: 'python3' @@ -29,7 +29,7 @@ jobs: secrets: inherit with: extension_name: iceberg - duckdb_version: v1.3.2 + duckdb_version: main ci_tools_version: main exclude_archs: 'windows_amd64_mingw' deploy_latest: ${{ startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' }} diff --git a/duckdb b/duckdb index df0a3de7..302b2705 160000 --- a/duckdb +++ b/duckdb @@ -1 +1 @@ -Subproject commit df0a3de74429887333ec4af047e7aac2737e52d8 +Subproject commit 302b270570917572b1cb09e1514891b8215fecd7 diff --git a/extension-ci-tools b/extension-ci-tools index b808e513..006e1d39 160000 --- a/extension-ci-tools +++ b/extension-ci-tools @@ -1 +1 @@ -Subproject commit b808e5130cb6e1341ab968a2a9c0f5f236dd2ec8 +Subproject commit 006e1d3932d31274c1b0a86ec8aea526a6bf8416 diff --git a/extension_config.cmake b/extension_config.cmake index 834bab0c..15fbd506 100644 --- a/extension_config.cmake +++ b/extension_config.cmake @@ -12,13 +12,15 @@ duckdb_extension_load(icu) duckdb_extension_load(ducklake LOAD_TESTS GIT_URL https://github.com/duckdb/ducklake - GIT_TAG 9cc2d903c51d360ff3fc6afb10cf38f8eac2e25b + GIT_TAG d2392c36f33151cf5cdd7d006375b0b669bd44ac + APPLY_PATCHES ) duckdb_extension_load(avro - LOAD_TESTS - GIT_URL https://github.com/duckdb/duckdb-avro - GIT_TAG 180e41e8ad13b8712d207785a6bca0aa39341040 + LOAD_TESTS + GIT_URL https://github.com/duckdb/duckdb-avro + GIT_TAG 0d7af391bd0aa201b2bdcfb994b7a575ad810155 + APPLY_PATCHES ) if (NOT EMSCRIPTEN) @@ -27,14 +29,16 @@ if (NOT MINGW) duckdb_extension_load(aws LOAD_TESTS GIT_URL https://github.com/duckdb/duckdb-aws - GIT_TAG main + GIT_TAG 880da03202acc973d6ee7f3a0423dae5a6dea83b + APPLY_PATCHES ) endif () endif() duckdb_extension_load(httpfs - GIT_URL https://github.com/duckdb/duckdb-httpfs - GIT_TAG e9bb99189d93c8ce6e0755907c38d283c963ae61 + GIT_URL https://github.com/duckdb/duckdb-httpfs + GIT_TAG cb5b2825eff68fc91f47e917ba88bf2ed84c2dd3 INCLUDE_DIR extension/httpfs/include + APPLY_PATCHES ) diff --git a/src/avro_scan.cpp b/src/avro_scan.cpp index 94e242a1..10715687 100644 --- a/src/avro_scan.cpp +++ b/src/avro_scan.cpp @@ -2,6 +2,7 @@ #include "avro_scan.hpp" #include "iceberg_extension.hpp" #include "duckdb/main/extension_helper.hpp" +#include "duckdb/main/extension/extension_loader.hpp" #include "duckdb/main/database.hpp" #include "iceberg_multi_file_reader.hpp" @@ -13,7 +14,14 @@ AvroScan::AvroScan(const string &scan_name, ClientContext &context, const string auto &instance = DatabaseInstance::GetDatabase(context); ExtensionHelper::AutoLoadExtension(instance, "avro"); - auto &avro_scan_entry = ExtensionUtil::GetTableFunction(instance, "read_avro"); + auto &system_catalog = Catalog::GetSystemCatalog(instance); + auto data = CatalogTransaction::GetSystemTransaction(instance); + auto &schema = system_catalog.GetSchema(data, DEFAULT_SCHEMA); + auto catalog_entry = schema.GetEntry(data, CatalogType::TABLE_FUNCTION_ENTRY, "read_avro"); + if (!catalog_entry) { + throw InvalidInputException("Function with name \"read_avro\" not found!"); + } + auto &avro_scan_entry = catalog_entry->Cast(); avro_scan = avro_scan_entry.functions.functions[0]; avro_scan->get_multi_file_reader = IcebergAvroMultiFileReader::CreateInstance; diff --git a/src/common/api_utils.cpp b/src/common/api_utils.cpp index 71aeade9..535af527 100644 --- a/src/common/api_utils.cpp +++ b/src/common/api_utils.cpp @@ -3,6 +3,8 @@ #include "duckdb/common/exception/http_exception.hpp" #include "duckdb/common/string_util.hpp" #include "duckdb/main/client_data.hpp" +#include "duckdb/main/database.hpp" +#include "duckdb/main/extension/extension_loader.hpp" #include diff --git a/src/iceberg_extension.cpp b/src/iceberg_extension.cpp index 16b49130..6ab03e16 100644 --- a/src/iceberg_extension.cpp +++ b/src/iceberg_extension.cpp @@ -1,5 +1,3 @@ -#define DUCKDB_EXTENSION_MAIN - #include "iceberg_extension.hpp" #include "storage/irc_catalog.hpp" #include "storage/irc_transaction_manager.hpp" @@ -9,7 +7,7 @@ #include "duckdb/common/exception/http_exception.hpp" #include "duckdb/common/string_util.hpp" #include "duckdb/function/scalar_function.hpp" -#include "duckdb/main/extension_util.hpp" +#include "duckdb/main/extension/extension_loader.hpp" #include "duckdb/catalog/catalog_entry/macro_catalog_entry.hpp" #include "duckdb/catalog/default/default_functions.hpp" #include "duckdb/storage/storage_extension.hpp" @@ -23,8 +21,8 @@ namespace duckdb { -static unique_ptr CreateTransactionManager(StorageExtensionInfo *storage_info, AttachedDatabase &db, - Catalog &catalog) { +static unique_ptr CreateTransactionManager(optional_ptr storage_info, + AttachedDatabase &db, Catalog &catalog) { auto &ic_catalog = catalog.Cast(); return make_uniq(db, ic_catalog); } @@ -37,8 +35,10 @@ class IRCStorageExtension : public StorageExtension { } }; -static void LoadInternal(DatabaseInstance &instance) { +static void LoadInternal(ExtensionLoader &loader) { + auto &instance = loader.GetDatabaseInstance(); ExtensionHelper::AutoLoadExtension(instance, "parquet"); + if (!instance.ExtensionIsLoaded("parquet")) { throw MissingExtensionException("The iceberg extension requires the parquet extension to be loaded!"); } @@ -51,13 +51,13 @@ static void LoadInternal(DatabaseInstance &instance) { LogicalType::BOOLEAN, Value::BOOLEAN(false)); // Iceberg Table Functions - for (auto &fun : IcebergFunctions::GetTableFunctions(instance)) { - ExtensionUtil::RegisterFunction(instance, std::move(fun)); + for (auto &fun : IcebergFunctions::GetTableFunctions(loader)) { + loader.RegisterFunction(std::move(fun)); } // Iceberg Scalar Functions for (auto &fun : IcebergFunctions::GetScalarFunctions()) { - ExtensionUtil::RegisterFunction(instance, fun); + loader.RegisterFunction(fun); } SecretType secret_type; @@ -65,10 +65,10 @@ static void LoadInternal(DatabaseInstance &instance) { secret_type.deserializer = KeyValueSecret::Deserialize; secret_type.default_provider = "config"; - ExtensionUtil::RegisterSecretType(instance, secret_type); + loader.RegisterSecretType(secret_type); CreateSecretFunction secret_function = {"iceberg", "config", OAuth2Authorization::CreateCatalogSecretFunction}; OAuth2Authorization::SetCatalogSecretParameters(secret_function); - ExtensionUtil::RegisterFunction(instance, secret_function); + loader.RegisterFunction(secret_function); auto &log_manager = instance.GetLogManager(); log_manager.RegisterLogType(make_uniq()); @@ -76,8 +76,8 @@ static void LoadInternal(DatabaseInstance &instance) { config.storage_extensions["iceberg"] = make_uniq(); } -void IcebergExtension::Load(DuckDB &db) { - LoadInternal(*db.instance); +void IcebergExtension::Load(ExtensionLoader &loader) { + LoadInternal(loader); } string IcebergExtension::Name() { return "iceberg"; @@ -86,16 +86,7 @@ string IcebergExtension::Name() { } // namespace duckdb extern "C" { - -DUCKDB_EXTENSION_API void iceberg_init(duckdb::DatabaseInstance &db) { - LoadInternal(db); -} - -DUCKDB_EXTENSION_API const char *iceberg_version() { - return duckdb::DuckDB::LibraryVersion(); +DUCKDB_CPP_EXTENSION_ENTRY(iceberg, loader) { + LoadInternal(loader); } } - -#ifndef DUCKDB_EXTENSION_MAIN -#error DUCKDB_EXTENSION_MAIN not defined -#endif diff --git a/src/iceberg_functions.cpp b/src/iceberg_functions.cpp index 51989bf9..5ab230ec 100644 --- a/src/iceberg_functions.cpp +++ b/src/iceberg_functions.cpp @@ -5,13 +5,14 @@ #include "duckdb/function/cast/default_casts.hpp" #include "duckdb/parser/expression/function_expression.hpp" -namespace duckdb { +class ExtensionLoader; -vector IcebergFunctions::GetTableFunctions(DatabaseInstance &instance) { +namespace duckdb { +vector IcebergFunctions::GetTableFunctions(ExtensionLoader &loader) { vector functions; functions.push_back(std::move(GetIcebergSnapshotsFunction())); - functions.push_back(std::move(GetIcebergScanFunction(instance))); + functions.push_back(std::move(GetIcebergScanFunction(loader))); functions.push_back(std::move(GetIcebergMetadataFunction())); functions.push_back(std::move(GetIcebergToDuckLakeFunction())); diff --git a/src/iceberg_functions/iceberg_avro_multi_file_reader.cpp b/src/iceberg_functions/iceberg_avro_multi_file_reader.cpp index 4495c875..6c3fab62 100644 --- a/src/iceberg_functions/iceberg_avro_multi_file_reader.cpp +++ b/src/iceberg_functions/iceberg_avro_multi_file_reader.cpp @@ -1,7 +1,6 @@ #include "iceberg_avro_multi_file_reader.hpp" #include "duckdb/common/exception.hpp" -#include "duckdb/main/extension_util.hpp" #include "duckdb/parallel/thread_context.hpp" namespace duckdb { @@ -13,7 +12,7 @@ unique_ptr IcebergAvroMultiFileReader::CreateInstance(const Tab shared_ptr IcebergAvroMultiFileReader::CreateFileList(ClientContext &context, const vector &paths, - FileGlobOptions options) { + const FileGlobInput &glob_input) { vector open_files; for (auto &path : paths) { diff --git a/src/iceberg_functions/iceberg_multi_file_list.cpp b/src/iceberg_functions/iceberg_multi_file_list.cpp index a37919ef..94513ee9 100644 --- a/src/iceberg_functions/iceberg_multi_file_list.cpp +++ b/src/iceberg_functions/iceberg_multi_file_list.cpp @@ -8,7 +8,6 @@ #include "duckdb/common/exception.hpp" #include "duckdb/execution/execution_context.hpp" #include "duckdb/parallel/thread_context.hpp" -#include "duckdb/main/extension_util.hpp" #include "duckdb/parser/tableref/table_function_ref.hpp" #include "duckdb/parser/expression/constant_expression.hpp" #include "duckdb/planner/filter/struct_filter.hpp" @@ -656,7 +655,14 @@ void IcebergMultiFileList::ScanDeleteFile(const IcebergManifestEntry &entry, auto &instance = DatabaseInstance::GetDatabase(context); //! FIXME: delete files could also be made without row_ids, //! in which case we need to rely on the `'schema.column-mapping.default'` property just like data files do. - auto &parquet_scan_entry = ExtensionUtil::GetTableFunction(instance, "parquet_scan"); + auto &system_catalog = Catalog::GetSystemCatalog(instance); + auto data = CatalogTransaction::GetSystemTransaction(instance); + auto &schema = system_catalog.GetSchema(data, DEFAULT_SCHEMA); + auto catalog_entry = schema.GetEntry(data, CatalogType::TABLE_FUNCTION_ENTRY, "parquet_scan"); + if (!catalog_entry) { + throw InvalidInputException("Function with name \"parquet_scan\" not found!"); + } + auto &parquet_scan_entry = catalog_entry->Cast(); auto &parquet_scan = parquet_scan_entry.functions.functions[0]; // Prepare the inputs for the bind diff --git a/src/iceberg_functions/iceberg_multi_file_reader.cpp b/src/iceberg_functions/iceberg_multi_file_reader.cpp index 23679991..12493ffd 100644 --- a/src/iceberg_functions/iceberg_multi_file_reader.cpp +++ b/src/iceberg_functions/iceberg_multi_file_reader.cpp @@ -7,7 +7,6 @@ #include "duckdb/catalog/catalog_entry/table_function_catalog_entry.hpp" #include "duckdb/common/exception.hpp" #include "duckdb/execution/execution_context.hpp" -#include "duckdb/main/extension_util.hpp" #include "duckdb/parallel/thread_context.hpp" #include "duckdb/parser/tableref/table_function_ref.hpp" #include "duckdb/parser/expression/constant_expression.hpp" @@ -31,7 +30,7 @@ unique_ptr IcebergMultiFileReader::CreateInstance(const TableFu } shared_ptr IcebergMultiFileReader::CreateFileList(ClientContext &context, const vector &paths, - FileGlobOptions) { + const FileGlobInput &glob_input) { if (paths.size() != 1) { throw BinderException("'iceberg_scan' only supports single path as input"); } diff --git a/src/iceberg_functions/iceberg_scan.cpp b/src/iceberg_functions/iceberg_scan.cpp index 8cd526c0..43140898 100644 --- a/src/iceberg_functions/iceberg_scan.cpp +++ b/src/iceberg_functions/iceberg_scan.cpp @@ -19,7 +19,7 @@ #include "duckdb/planner/operator/logical_comparison_join.hpp" #include "duckdb/common/file_opener.hpp" #include "duckdb/common/file_system.hpp" -#include "duckdb/main/extension_util.hpp" +#include "duckdb/main/extension/extension_loader.hpp" #include "iceberg_metadata.hpp" #include "iceberg_utils.hpp" #include "iceberg_multi_file_reader.hpp" @@ -41,11 +41,11 @@ static void AddNamedParameters(TableFunction &fun) { fun.named_parameters["snapshot_from_id"] = LogicalType::UBIGINT; } -TableFunctionSet IcebergFunctions::GetIcebergScanFunction(DatabaseInstance &instance) { +TableFunctionSet IcebergFunctions::GetIcebergScanFunction(ExtensionLoader &loader) { // The iceberg_scan function is constructed by grabbing the parquet scan from the Catalog, then injecting the // IcebergMultiFileReader into it to create a Iceberg-based multi file read - auto &parquet_scan = ExtensionUtil::GetTableFunction(instance, "parquet_scan"); + auto &parquet_scan = loader.GetTableFunction("parquet_scan"); auto parquet_scan_copy = parquet_scan.functions; for (auto &function : parquet_scan_copy.functions) { diff --git a/src/iceberg_functions/iceberg_snapshots.cpp b/src/iceberg_functions/iceberg_snapshots.cpp index 3bb975a8..0be02c28 100644 --- a/src/iceberg_functions/iceberg_snapshots.cpp +++ b/src/iceberg_functions/iceberg_snapshots.cpp @@ -91,8 +91,8 @@ static void IcebergSnapshotsFunction(ClientContext &context, TableFunctionInput } auto &snapshot = it->second; - FlatVector::GetData(output.data[0])[i] = snapshot.sequence_number; - FlatVector::GetData(output.data[1])[i] = snapshot.snapshot_id; + FlatVector::GetData(output.data[0])[i] = snapshot.sequence_number; + FlatVector::GetData(output.data[1])[i] = snapshot.snapshot_id; FlatVector::GetData(output.data[2])[i] = snapshot.timestamp_ms; string_t manifest_string_t = StringVector::AddString(output.data[3], string_t(snapshot.manifest_list)); FlatVector::GetData(output.data[3])[i] = manifest_string_t; diff --git a/src/include/avro_scan.hpp b/src/include/avro_scan.hpp index 523fa0b7..f89861ae 100644 --- a/src/include/avro_scan.hpp +++ b/src/include/avro_scan.hpp @@ -10,7 +10,6 @@ #include "duckdb/catalog/catalog_entry/table_function_catalog_entry.hpp" #include "duckdb/common/exception.hpp" #include "duckdb/execution/execution_context.hpp" -#include "duckdb/main/extension_util.hpp" #include "duckdb/parallel/thread_context.hpp" #include "duckdb/parser/tableref/table_function_ref.hpp" #include "duckdb/parser/expression/constant_expression.hpp" diff --git a/src/include/iceberg_avro_multi_file_reader.hpp b/src/include/iceberg_avro_multi_file_reader.hpp index 70d5d360..d32c6ee4 100644 --- a/src/include/iceberg_avro_multi_file_reader.hpp +++ b/src/include/iceberg_avro_multi_file_reader.hpp @@ -14,7 +14,7 @@ namespace duckdb { struct IcebergAvroMultiFileReader : public MultiFileReader { shared_ptr CreateFileList(ClientContext &context, const vector &paths, - FileGlobOptions options) override; + const FileGlobInput &glob_input) override; static unique_ptr CreateInstance(const TableFunction &table); }; diff --git a/src/include/iceberg_extension.hpp b/src/include/iceberg_extension.hpp index 75da4f37..8ca2df6c 100644 --- a/src/include/iceberg_extension.hpp +++ b/src/include/iceberg_extension.hpp @@ -6,7 +6,7 @@ namespace duckdb { class IcebergExtension : public Extension { public: - void Load(DuckDB &db) override; + void Load(ExtensionLoader &db) override; string Name() override; }; diff --git a/src/include/iceberg_functions.hpp b/src/include/iceberg_functions.hpp index d5ffa81d..2170c602 100644 --- a/src/include/iceberg_functions.hpp +++ b/src/include/iceberg_functions.hpp @@ -13,15 +13,16 @@ #include "duckdb/parser/parsed_data/create_table_function_info.hpp" namespace duckdb { +class ExtensionLoader; class IcebergFunctions { public: - static vector GetTableFunctions(DatabaseInstance &instance); + static vector GetTableFunctions(ExtensionLoader &loader); static vector GetScalarFunctions(); private: static TableFunctionSet GetIcebergSnapshotsFunction(); - static TableFunctionSet GetIcebergScanFunction(DatabaseInstance &instance); + static TableFunctionSet GetIcebergScanFunction(ExtensionLoader &instance); static TableFunctionSet GetIcebergMetadataFunction(); static TableFunctionSet GetIcebergToDuckLakeFunction(); }; diff --git a/src/include/iceberg_multi_file_reader.hpp b/src/include/iceberg_multi_file_reader.hpp index b767ecd6..00377e19 100644 --- a/src/include/iceberg_multi_file_reader.hpp +++ b/src/include/iceberg_multi_file_reader.hpp @@ -40,7 +40,7 @@ struct IcebergMultiFileReader : public MultiFileReader { public: shared_ptr CreateFileList(ClientContext &context, const vector &paths, - FileGlobOptions options) override; + const FileGlobInput &glob_input) override; bool Bind(MultiFileOptions &options, MultiFileList &files, vector &return_types, vector &names, MultiFileReaderBindData &bind_data) override; void BindOptions(MultiFileOptions &options, MultiFileList &files, vector &return_types, diff --git a/src/include/metadata/iceberg_manifest.hpp b/src/include/metadata/iceberg_manifest.hpp index e016cb1a..7bca10a3 100644 --- a/src/include/metadata/iceberg_manifest.hpp +++ b/src/include/metadata/iceberg_manifest.hpp @@ -7,6 +7,7 @@ #include "duckdb/function/copy_function.hpp" #include "duckdb/execution/execution_context.hpp" +#include "duckdb/main/extension/extension_loader.hpp" #include "duckdb/parallel/thread_context.hpp" #include "duckdb/common/insertion_order_preserving_map.hpp" diff --git a/src/include/storage/iceberg_insert.hpp b/src/include/storage/iceberg_insert.hpp index 265f8131..e2e1d1a2 100644 --- a/src/include/storage/iceberg_insert.hpp +++ b/src/include/storage/iceberg_insert.hpp @@ -8,6 +8,7 @@ #pragma once +#include "duckdb/execution/physical_plan_generator.hpp" #include "duckdb/execution/operator/persistent/physical_copy_to_file.hpp" #include "duckdb/execution/physical_operator.hpp" #include "duckdb/common/index_vector.hpp" @@ -31,9 +32,11 @@ struct IcebergCopyInput { class IcebergInsert : public PhysicalOperator { public: //! INSERT INTO - IcebergInsert(LogicalOperator &op, TableCatalogEntry &table, physical_index_vector_t column_index_map); + IcebergInsert(PhysicalPlan &physical_plan, LogicalOperator &op, TableCatalogEntry &table, + physical_index_vector_t column_index_map); //! CREATE TABLE AS - IcebergInsert(LogicalOperator &op, SchemaCatalogEntry &schema, unique_ptr info); + IcebergInsert(PhysicalPlan &physical_plan, LogicalOperator &op, SchemaCatalogEntry &schema, + unique_ptr info); //! The table to insert into optional_ptr table; diff --git a/src/include/storage/irc_catalog.hpp b/src/include/storage/irc_catalog.hpp index a30cbe0e..d6f8b8be 100644 --- a/src/include/storage/irc_catalog.hpp +++ b/src/include/storage/irc_catalog.hpp @@ -50,8 +50,9 @@ class IRCatalog : public Catalog { case_insensitive_set_t &set_by_attach_options); public: - static unique_ptr Attach(StorageExtensionInfo *storage_info, ClientContext &context, AttachedDatabase &db, - const string &name, AttachInfo &info, AccessMode access_mode); + static unique_ptr Attach(optional_ptr storage_info, ClientContext &context, + AttachedDatabase &db, const string &name, AttachInfo &info, + AttachOptions &options); public: void Initialize(bool load_builtin) override; diff --git a/src/storage/iceberg_insert.cpp b/src/storage/iceberg_insert.cpp index 339c108d..cf7d87d7 100644 --- a/src/storage/iceberg_insert.cpp +++ b/src/storage/iceberg_insert.cpp @@ -9,8 +9,6 @@ #include "duckdb/common/sort/partition_state.hpp" #include "duckdb/catalog/catalog_entry/copy_function_catalog_entry.hpp" #include "duckdb/main/client_data.hpp" - -#include "duckdb/main/extension_util.hpp" #include "duckdb/planner/operator/logical_copy_to_file.hpp" #include "duckdb/execution/physical_operator_states.hpp" #include "duckdb/planner/operator/logical_insert.hpp" @@ -24,14 +22,15 @@ namespace duckdb { -IcebergInsert::IcebergInsert(LogicalOperator &op, TableCatalogEntry &table, +IcebergInsert::IcebergInsert(PhysicalPlan &physical_plan, LogicalOperator &op, TableCatalogEntry &table, physical_index_vector_t column_index_map_p) - : PhysicalOperator(PhysicalOperatorType::EXTENSION, op.types, 1), table(&table), schema(nullptr), + : PhysicalOperator(physical_plan, PhysicalOperatorType::EXTENSION, op.types, 1), table(&table), schema(nullptr), column_index_map(std::move(column_index_map_p)) { } -IcebergInsert::IcebergInsert(LogicalOperator &op, SchemaCatalogEntry &schema, unique_ptr info) - : PhysicalOperator(PhysicalOperatorType::EXTENSION, op.types, 1), table(nullptr), schema(&schema), +IcebergInsert::IcebergInsert(PhysicalPlan &physical_plan, LogicalOperator &op, SchemaCatalogEntry &schema, + unique_ptr info) + : PhysicalOperator(physical_plan, PhysicalOperatorType::EXTENSION, op.types, 1), table(nullptr), schema(&schema), info(std::move(info)) { } @@ -371,7 +370,8 @@ PhysicalOperator &IRCatalog::PlanInsert(ClientContext &context, PhysicalPlanGene if (op.return_chunk) { throw BinderException("RETURNING clause not yet supported for insertion into Iceberg table"); } - if (op.action_type != OnConflictAction::THROW) { + + if (op.on_conflict_info.action_type != OnConflictAction::THROW) { throw BinderException("ON CONFLICT clause not yet supported for insertion into Iceberg table"); } diff --git a/src/storage/irc_catalog.cpp b/src/storage/irc_catalog.cpp index df71d23c..09f19a89 100644 --- a/src/storage/irc_catalog.cpp +++ b/src/storage/irc_catalog.cpp @@ -459,8 +459,9 @@ void IRCatalog::SetAWSCatalogOptions(IcebergAttachOptions &attach_options, } } -unique_ptr IRCatalog::Attach(StorageExtensionInfo *storage_info, ClientContext &context, AttachedDatabase &db, - const string &name, AttachInfo &info, AccessMode access_mode) { +unique_ptr IRCatalog::Attach(optional_ptr storage_info, ClientContext &context, + AttachedDatabase &db, const string &name, AttachInfo &info, + AttachOptions &options) { IRCEndpointBuilder endpoint_builder; string endpoint_type_string; @@ -569,7 +570,7 @@ unique_ptr IRCatalog::Attach(StorageExtensionInfo *storage_info, Client } D_ASSERT(auth_handler); - auto catalog = make_uniq(db, access_mode, std::move(auth_handler), attach_options); + auto catalog = make_uniq(db, options.access_mode, std::move(auth_handler), attach_options); catalog->GetConfig(context, endpoint_type); return std::move(catalog); } diff --git a/src/storage/irc_table_entry.cpp b/src/storage/irc_table_entry.cpp index b8536846..aa2360a3 100644 --- a/src/storage/irc_table_entry.cpp +++ b/src/storage/irc_table_entry.cpp @@ -3,7 +3,7 @@ #include "storage/irc_table_entry.hpp" #include "duckdb/storage/statistics/base_statistics.hpp" #include "duckdb/storage/table_storage_info.hpp" -#include "duckdb/main/extension_util.hpp" +#include "duckdb/main/extension/extension_loader.hpp" #include "duckdb/main/database.hpp" #include "duckdb/main/secret/secret_manager.hpp" #include "duckdb/catalog/catalog_entry/table_function_catalog_entry.hpp" @@ -94,7 +94,14 @@ string ICTableEntry::PrepareIcebergScanFromEntry(ClientContext &context) const { TableFunction ICTableEntry::GetScanFunction(ClientContext &context, unique_ptr &bind_data, const EntryLookupInfo &lookup) { auto &db = DatabaseInstance::GetDatabase(context); - auto &iceberg_scan_function_set = ExtensionUtil::GetTableFunction(db, "iceberg_scan"); + auto &system_catalog = Catalog::GetSystemCatalog(db); + auto data = CatalogTransaction::GetSystemTransaction(db); + auto &catalog_schema = system_catalog.GetSchema(data, DEFAULT_SCHEMA); + auto catalog_entry = catalog_schema.GetEntry(data, CatalogType::TABLE_FUNCTION_ENTRY, "iceberg_scan"); + if (!catalog_entry) { + throw InvalidInputException("Function with name \"iceberg_scan\" not found!"); + } + auto &iceberg_scan_function_set = catalog_entry->Cast(); auto iceberg_scan_function = iceberg_scan_function_set.functions.GetFunctionByArguments(context, {LogicalType::VARCHAR}); auto storage_location = PrepareIcebergScanFromEntry(context); @@ -117,9 +124,9 @@ TableFunction ICTableEntry::GetScanFunction(ClientContext &context, unique_ptrschema_id; } - auto schema = metadata.GetSchemaFromId(schema_id); - auto scan_info = - make_shared_ptr(table_info.load_table_result.metadata_location, metadata, snapshot, *schema); + auto iceberg_schema = metadata.GetSchemaFromId(schema_id); + auto scan_info = make_shared_ptr(table_info.load_table_result.metadata_location, metadata, + snapshot, *iceberg_schema); if (table_info.transaction_data) { scan_info->transaction_data = table_info.transaction_data.get(); } diff --git a/test/sql/cloud/r2_catalog/test_r2_attach_and_read.test b/test/sql/cloud/r2_catalog/test_r2_attach_and_read.test index 4d83e20f..0e305fc6 100644 --- a/test/sql/cloud/r2_catalog/test_r2_attach_and_read.test +++ b/test/sql/cloud/r2_catalog/test_r2_attach_and_read.test @@ -32,7 +32,7 @@ attach '6b17833f308abc1e1cc343c552b51f51_r2-catalog' AS my_datalake ( ); statement ok -pragma enable_logging('HTTP'); +CALL enable_logging('HTTP'); statement ok select * from my_datalake.default.people; diff --git a/test/sql/cloud/s3tables/test_logging_aws.test b/test/sql/cloud/s3tables/test_logging_aws.test index 839a6f13..ef15163a 100644 --- a/test/sql/cloud/s3tables/test_logging_aws.test +++ b/test/sql/cloud/s3tables/test_logging_aws.test @@ -19,7 +19,7 @@ require httpfs require aws statement ok -pragma enable_logging('HTTP'); +CALL enable_logging('HTTP'); statement ok CREATE SECRET s3table_secret ( diff --git a/test/sql/local/iceberg_scans/filtering_on_bounds.test b/test/sql/local/iceberg_scans/filtering_on_bounds.test index ff36952e..f2f4b962 100644 --- a/test/sql/local/iceberg_scans/filtering_on_bounds.test +++ b/test/sql/local/iceberg_scans/filtering_on_bounds.test @@ -19,7 +19,7 @@ statement ok create view my_datalake.default.filtering_on_bounds as select * from ICEBERG_SCAN('__WORKING_DIRECTORY__/data/generated/iceberg/spark-local/default/filtering_on_bounds'); statement ok -pragma enable_logging('Iceberg'); +CALL enable_logging('Iceberg'); # 5 snapshots that each add 1000 rows (incremental) query I @@ -28,7 +28,7 @@ select count(*) from my_datalake.default.filtering_on_bounds; 5000 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select count(*) from my_datalake.default.filtering_on_bounds where col1 > 500; @@ -48,7 +48,7 @@ ON logs.msg = meta.file_path; 500 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select count(*) from my_datalake.default.filtering_on_bounds where col1 > 1500; @@ -67,7 +67,7 @@ ON logs.msg = meta.file_path; 1500 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select count(*) from my_datalake.default.filtering_on_bounds where col1 >= 2300 and col1 < 3500; diff --git a/test/sql/local/iceberg_scans/filtering_on_partition_bounds.test b/test/sql/local/iceberg_scans/filtering_on_partition_bounds.test index fd95e4eb..4ff2409f 100644 --- a/test/sql/local/iceberg_scans/filtering_on_partition_bounds.test +++ b/test/sql/local/iceberg_scans/filtering_on_partition_bounds.test @@ -19,7 +19,7 @@ statement ok create view my_datalake.default.filtering_on_partition_bounds as select * from ICEBERG_SCAN('__WORKING_DIRECTORY__/data/generated/iceberg/spark-local/default/filtering_on_partition_bounds'); statement ok -pragma enable_logging('Iceberg'); +CALL enable_logging('Iceberg'); # 5 snapshots that each add 1000 rows (incremental) query I @@ -28,7 +28,7 @@ select count(*) from my_datalake.default.filtering_on_partition_bounds; 5000 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select count(*) from my_datalake.default.filtering_on_partition_bounds where seq = 1 @@ -51,7 +51,7 @@ ON logs.msg = meta.manifest_path; statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select count(*) from my_datalake.default.filtering_on_partition_bounds where seq > 1 and seq < 4; @@ -70,7 +70,7 @@ ON logs.msg = meta.manifest_path; 3000 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select count(*) from my_datalake.default.filtering_on_partition_bounds where seq >= 2 and seq <= 3; diff --git a/test/sql/local/iceberg_scans/iceberg_cardinality_estimates.test b/test/sql/local/iceberg_scans/iceberg_cardinality_estimates.test index fbb42711..4985ce5b 100644 --- a/test/sql/local/iceberg_scans/iceberg_cardinality_estimates.test +++ b/test/sql/local/iceberg_scans/iceberg_cardinality_estimates.test @@ -35,7 +35,7 @@ select count(*) from ICEBERG_METADATA('__WORKING_DIRECTORY__/data/generated/iceb query II explain select * from my_datalake.default.lineitem_001_deletes; ---- -physical_plan :.*~60175.* +physical_plan :.*~60,175.* # check we estimated it correctlry @@ -59,7 +59,7 @@ select count(*) from ICEBERG_METADATA('__WORKING_DIRECTORY__/data/generated/iceb query II explain select count(*) from ICEBERG_SCAN('__WORKING_DIRECTORY__/data/generated/iceberg/spark-local/default/many_adds_deletes'); ---- -physical_plan :.*~40000.* +physical_plan :.*~40,000.* query I diff --git a/test/sql/local/iceberg_scans/iceberg_scan_generated_data_0_001.test b/test/sql/local/iceberg_scans/iceberg_scan_generated_data_0_001.test index fcff3ded..a693ad25 100644 --- a/test/sql/local/iceberg_scans/iceberg_scan_generated_data_0_001.test +++ b/test/sql/local/iceberg_scans/iceberg_scan_generated_data_0_001.test @@ -145,4 +145,4 @@ Binder Error query II EXPLAIN SELECT count(*) FROM my_datalake.default.pyspark_iceberg_table_v2; ---- -physical_plan :.*ICEBERG_SCAN.*Rows.* +physical_plan :.*ICEBERG_SCAN.*rows.* diff --git a/test/sql/local/iceberg_scans/is_not_null.test b/test/sql/local/iceberg_scans/is_not_null.test index a6830324..fb0426bd 100644 --- a/test/sql/local/iceberg_scans/is_not_null.test +++ b/test/sql/local/iceberg_scans/is_not_null.test @@ -19,7 +19,7 @@ statement ok create view my_datalake.default.is_not_null as select * from ICEBERG_SCAN('__WORKING_DIRECTORY__/data/persistent/is_null_is_not_null'); statement ok -pragma enable_logging('Iceberg'); +CALL enable_logging('Iceberg'); query II select * from my_datalake.default.is_not_null ORDER BY id ASC; @@ -34,7 +34,7 @@ select * from my_datalake.default.is_not_null ORDER BY id ASC; 8 blah statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query II select * from my_datalake.default.is_not_null WHERE value IS NULL ORDER BY id ASC; @@ -57,7 +57,7 @@ ON logs.msg = meta.file_path; 3 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query II select * from my_datalake.default.is_not_null WHERE value IS NOT NULL ORDER BY id ASC; diff --git a/test/sql/local/iceberg_scans/uuid_type.test b/test/sql/local/iceberg_scans/uuid_type.test index f00e5f08..7115ee5d 100644 --- a/test/sql/local/iceberg_scans/uuid_type.test +++ b/test/sql/local/iceberg_scans/uuid_type.test @@ -12,7 +12,7 @@ require httpfs require no_extension_autoloading "EXPECTED: enable logging is not aware of where 'Iceberg' option comes from" statement ok -pragma enable_logging('Iceberg'); +call enable_logging('Iceberg'); query I select * from ICEBERG_SCAN('__WORKING_DIRECTORY__/data/persistent/uuid'); @@ -29,7 +29,7 @@ f9f28465-51cf-45f1-8985-e01d9a82253c 7fae299c-cf05-4777-9b42-57a52e1415ed statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select * from ICEBERG_SCAN('__WORKING_DIRECTORY__/data/persistent/uuid') WHERE "uuid" = '1571effb-facd-42a3-90e9-0af522e9b6c2'; diff --git a/test/sql/local/irc/create/test_create_and_delete_schema_table.test b/test/sql/local/irc/create/test_create_and_delete_schema_table.test index d4b8b009..d6bd95d2 100644 --- a/test/sql/local/irc/create/test_create_and_delete_schema_table.test +++ b/test/sql/local/irc/create/test_create_and_delete_schema_table.test @@ -16,7 +16,7 @@ require httpfs set ignore_error_messages statement ok -pragma enable_logging('HTTP'); +CALL enable_logging('HTTP'); statement ok set logging_level='debug' diff --git a/test/sql/local/irc/create/test_create_schema.test b/test/sql/local/irc/create/test_create_schema.test index b1d795b8..1cdd49ff 100644 --- a/test/sql/local/irc/create/test_create_schema.test +++ b/test/sql/local/irc/create/test_create_schema.test @@ -16,7 +16,7 @@ require httpfs set ignore_error_messages statement ok -pragma enable_logging('HTTP'); +CALL enable_logging('HTTP'); statement ok set logging_level='debug' diff --git a/test/sql/local/irc/create/test_create_table.test b/test/sql/local/irc/create/test_create_table.test index 228cb378..cc4a830a 100644 --- a/test/sql/local/irc/create/test_create_table.test +++ b/test/sql/local/irc/create/test_create_table.test @@ -16,7 +16,7 @@ require httpfs set ignore_error_messages statement ok -pragma enable_logging('HTTP'); +CALL enable_logging('HTTP'); statement ok set logging_level='debug' diff --git a/test/sql/local/irc/create/test_create_table_as.test b/test/sql/local/irc/create/test_create_table_as.test index c32261fe..7ba4196c 100644 --- a/test/sql/local/irc/create/test_create_table_as.test +++ b/test/sql/local/irc/create/test_create_table_as.test @@ -16,7 +16,7 @@ require httpfs set ignore_error_messages statement ok -pragma enable_logging('HTTP'); +CALL enable_logging('HTTP'); statement ok set logging_level='debug' diff --git a/test/sql/local/irc/delete/delete_table.test b/test/sql/local/irc/delete/delete_table.test index db5d5c08..94ac568b 100644 --- a/test/sql/local/irc/delete/delete_table.test +++ b/test/sql/local/irc/delete/delete_table.test @@ -16,7 +16,7 @@ require httpfs set ignore_error_messages statement ok -pragma enable_logging('HTTP'); +CALL enable_logging('HTTP'); statement ok set logging_level='debug' @@ -96,7 +96,7 @@ statement ok create table my_datalake.default.test_purge_table as select range a from range(100); statement ok -pragma truncate_duckdb_logs +CALL truncate_duckdb_logs() # purgeRequested = true from attach option. statement ok @@ -124,7 +124,7 @@ statement ok create table my_datalake.default.test_purge_table as select range a from range(100); statement ok -pragma truncate_duckdb_logs +CALL truncate_duckdb_logs() # purgeRequested = false from attach option. statement ok diff --git a/test/sql/local/irc/iceberg_catalog_eager_refresh.test b/test/sql/local/irc/iceberg_catalog_eager_refresh.test index 1389b9f7..43a9ad0c 100644 --- a/test/sql/local/irc/iceberg_catalog_eager_refresh.test +++ b/test/sql/local/irc/iceberg_catalog_eager_refresh.test @@ -16,7 +16,7 @@ require httpfs set ignore_error_messages statement ok -pragma enable_logging('HTTP'); +CALL enable_logging('HTTP'); statement ok set logging_level='debug' @@ -41,7 +41,7 @@ ATTACH '' AS my_datalake ( ); statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); statement ok @@ -64,7 +64,7 @@ http://127.0.0.1:8181/v1/namespaces/default/tables/table_unpartitioned OK http://127.0.0.1:8181/v1/namespaces/default/tables/table_more_deletes OK statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); statement ok select * from my_datalake.default.table_more_deletes order by all; diff --git a/test/sql/local/irc/iceberg_catalog_read.test b/test/sql/local/irc/iceberg_catalog_read.test index 22fc8a95..929d70de 100644 --- a/test/sql/local/irc/iceberg_catalog_read.test +++ b/test/sql/local/irc/iceberg_catalog_read.test @@ -16,7 +16,7 @@ require httpfs set ignore_error_messages statement ok -pragma enable_logging('HTTP'); +CALL enable_logging('HTTP'); statement ok set logging_level='debug' @@ -56,7 +56,7 @@ SELECT request.url, response.reason FROM duckdb_logs_parsed('HTTP') WHERE reques http://127.0.0.1:8181/v1/oauth/tokens OK statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query III select * from my_datalake.default.table_unpartitioned order by all; diff --git a/test/sql/local/irc/inferred_endpoint_from_secret.test b/test/sql/local/irc/inferred_endpoint_from_secret.test index 8eaacbfc..b33da179 100644 --- a/test/sql/local/irc/inferred_endpoint_from_secret.test +++ b/test/sql/local/irc/inferred_endpoint_from_secret.test @@ -17,7 +17,7 @@ set ignore_error_messages # Test catalog secret and storage secret statement ok -pragma enable_logging('Iceberg') +CALL enable_logging('Iceberg') statement ok CREATE SECRET storage_secret( diff --git a/test/sql/local/irc/insert/iceberg_to_ducklake.test b/test/sql/local/irc/insert/iceberg_to_ducklake.test index 3e18ed7a..e381ef2f 100644 --- a/test/sql/local/irc/insert/iceberg_to_ducklake.test +++ b/test/sql/local/irc/insert/iceberg_to_ducklake.test @@ -16,6 +16,8 @@ require ducklake # Do not ignore 'HTTP' error messages! set ignore_error_messages +mode skip + statement ok set enable_logging=true diff --git a/test/sql/local/irc/test_basic_deletion_vectors.test b/test/sql/local/irc/test_basic_deletion_vectors.test index 35705ea5..ce4d6ecb 100644 --- a/test/sql/local/irc/test_basic_deletion_vectors.test +++ b/test/sql/local/irc/test_basic_deletion_vectors.test @@ -19,7 +19,7 @@ statement ok create view my_datalake.default.deletion_vectors as select * from ICEBERG_SCAN('data/generated/iceberg/spark-local/default/deletion_vectors'); statement ok -pragma enable_logging('Iceberg'); +CALL enable_logging('Iceberg'); query I select count(*) from my_datalake.default.deletion_vectors diff --git a/test/sql/local/irc/test_create_secret_attach_combinations.test b/test/sql/local/irc/test_create_secret_attach_combinations.test index c4cba217..5c4db9ad 100644 --- a/test/sql/local/irc/test_create_secret_attach_combinations.test +++ b/test/sql/local/irc/test_create_secret_attach_combinations.test @@ -17,7 +17,7 @@ set ignore_error_messages # Test catalog secret and storage secret statement ok -pragma enable_logging('HTTP'); +CALL enable_logging('HTTP'); statement ok set logging_level='debug' diff --git a/test/sql/local/irc/test_minimal_head_requests.test b/test/sql/local/irc/test_minimal_head_requests.test index 00215f1c..eff4496a 100644 --- a/test/sql/local/irc/test_minimal_head_requests.test +++ b/test/sql/local/irc/test_minimal_head_requests.test @@ -16,7 +16,7 @@ require httpfs set ignore_error_messages statement ok -pragma enable_logging('HTTP'); +CALL enable_logging('HTTP'); statement ok diff --git a/test/sql/local/partitioning/day/day_timestamp.test b/test/sql/local/partitioning/day/day_timestamp.test index 0f3b5f2e..2a023ad3 100644 --- a/test/sql/local/partitioning/day/day_timestamp.test +++ b/test/sql/local/partitioning/day/day_timestamp.test @@ -12,7 +12,7 @@ require httpfs require-env DUCKDB_ICEBERG_HAVE_GENERATED_DATA statement ok -pragma enable_logging('Iceberg'); +CALL enable_logging('Iceberg'); statement ok set enable_logging=false; @@ -31,7 +31,7 @@ select * from ICEBERG_SCAN('data/generated/iceberg/spark-local/default/day_times NULL 99999 null_event statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select user_id from ICEBERG_SCAN('data/generated/iceberg/spark-local/default/day_timestamp') @@ -63,7 +63,7 @@ ON logs.msg = meta.manifest_path; 5 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select user_id from ICEBERG_SCAN('data/generated/iceberg/spark-local/default/day_timestamp') @@ -88,7 +88,7 @@ ON logs.msg = meta.file_path; 1 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); # Test filtering by day query I @@ -109,7 +109,7 @@ ON logs.msg = meta.file_path; 2 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select user_id from ICEBERG_SCAN('data/generated/iceberg/spark-local/default/day_timestamp') @@ -129,7 +129,7 @@ ON logs.msg = meta.file_path; 2 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select user_id from ICEBERG_SCAN('data/generated/iceberg/spark-local/default/day_timestamp') @@ -149,7 +149,7 @@ ON logs.msg = meta.file_path; 2 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); # Test filtering by timestamp range query I @@ -170,4 +170,4 @@ ON logs.msg = meta.file_path; 2 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); diff --git a/test/sql/local/partitioning/year/year_timestamp.test b/test/sql/local/partitioning/year/year_timestamp.test index 589f534d..9b61ee08 100644 --- a/test/sql/local/partitioning/year/year_timestamp.test +++ b/test/sql/local/partitioning/year/year_timestamp.test @@ -12,7 +12,7 @@ require httpfs require-env DUCKDB_ICEBERG_HAVE_GENERATED_DATA statement ok -pragma enable_logging('Iceberg'); +CALL enable_logging('Iceberg'); statement ok set enable_logging=false; @@ -31,7 +31,7 @@ select * from ICEBERG_SCAN('__WORKING_DIRECTORY__/data/generated/iceberg/spark-l NULL 99999 null_event statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select user_id from ICEBERG_SCAN('__WORKING_DIRECTORY__/data/generated/iceberg/spark-local/default/year_timestamp') @@ -63,7 +63,7 @@ ON logs.msg = meta.manifest_path; 5 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select user_id from ICEBERG_SCAN('__WORKING_DIRECTORY__/data/generated/iceberg/spark-local/default/year_timestamp') @@ -88,7 +88,7 @@ ON logs.msg = meta.file_path; 1 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); # Test filtering by year query I @@ -109,7 +109,7 @@ ON logs.msg = meta.manifest_path; 2 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select user_id from ICEBERG_SCAN('__WORKING_DIRECTORY__/data/generated/iceberg/spark-local/default/year_timestamp') @@ -129,7 +129,7 @@ ON logs.msg = meta.manifest_path; 2 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query I select user_id from ICEBERG_SCAN('__WORKING_DIRECTORY__/data/generated/iceberg/spark-local/default/year_timestamp') @@ -149,7 +149,7 @@ ON logs.msg = meta.manifest_path; 2 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); # Test filtering by timestamp range query I @@ -170,4 +170,4 @@ ON logs.msg = meta.manifest_path; 2 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); diff --git a/test/sql/local/struct_filter_issue.test b/test/sql/local/struct_filter_issue.test index 56a276de..850c5155 100644 --- a/test/sql/local/struct_filter_issue.test +++ b/test/sql/local/struct_filter_issue.test @@ -21,7 +21,7 @@ statement ok set logging_level='debug' statement ok -pragma enable_logging('Iceberg'); +CALL enable_logging('Iceberg'); # FIXME: this becomes a ConjunctionAndFilter on 'partition', with the 2 children being StructExtract # Which we don't enter yet @@ -42,7 +42,7 @@ ON logs.msg = meta.file_path; NULL statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); # The issue here was that we expected to find redpanda.timestamp (that's where the partition is on), # now we correctly return nullptr instead of error @@ -63,7 +63,7 @@ ON logs.msg = meta.file_path; NULL statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query II SELECT * FROM iceberg_scan('__WORKING_DIRECTORY__/data/persistent/struct_filter_issue') WHERE redpanda.timestamp = '2026-06-26 19:45:32.478'; @@ -81,7 +81,7 @@ ON logs.msg = meta.manifest_path; 1 statement ok -pragma truncate_duckdb_logs; +CALL truncate_duckdb_logs(); query II SELECT * FROM iceberg_scan('__WORKING_DIRECTORY__/data/persistent/struct_filter_issue') WHERE redpanda.timestamp = '2025-06-26 19:45:32.478';