From 439fe941e1c840b7b7e8be73e3ba96fef768cc25 Mon Sep 17 00:00:00 2001 From: Yves Date: Fri, 15 Nov 2024 10:28:58 +0100 Subject: [PATCH] Fail compilation on warnings --- Makefile | 4 +- include/pgduckdb/scan/postgres_scan.hpp | 2 +- include/pgduckdb/types/decimal.hpp | 4 +- include/pgduckdb/utility/allocator.hpp | 2 +- src/catalog/pgduckdb_catalog.cpp | 47 ++++----- src/catalog/pgduckdb_schema.cpp | 35 +++---- src/catalog/pgduckdb_storage.cpp | 3 +- src/catalog/pgduckdb_table.cpp | 7 +- src/catalog/pgduckdb_transaction_manager.cpp | 3 +- src/pgduckdb_background_worker.cpp | 4 +- src/pgduckdb_ddl.cpp | 6 +- src/pgduckdb_detoast.cpp | 3 + src/pgduckdb_metadata_cache.cpp | 4 +- src/pgduckdb_node.cpp | 6 +- src/pgduckdb_options.cpp | 1 + src/pgduckdb_planner.cpp | 2 +- src/pgduckdb_table_am.cpp | 104 ++++++++----------- src/pgduckdb_types.cpp | 20 ++-- src/pgduckdb_xact.cpp | 2 +- src/scan/postgres_scan.cpp | 4 +- src/scan/postgres_seq_scan.cpp | 10 +- 21 files changed, 125 insertions(+), 148 deletions(-) diff --git a/Makefile b/Makefile index afef3475..283c320a 100644 --- a/Makefile +++ b/Makefile @@ -35,8 +35,8 @@ endif DUCKDB_LIB = libduckdb$(DLSUFFIX) FULL_DUCKDB_LIB = third_party/duckdb/build/$(DUCKDB_BUILD_TYPE)/src/$(DUCKDB_LIB) -override PG_CPPFLAGS += -Iinclude -Ithird_party/duckdb/src/include -Ithird_party/duckdb/third_party/re2 -override PG_CXXFLAGS += -std=c++17 -Wno-sign-compare -Wno-register ${DUCKDB_BUILD_CXX_FLAGS} +override PG_CPPFLAGS += -Iinclude -isystem third_party/duckdb/src/include -isystem third_party/duckdb/third_party/re2 +override PG_CXXFLAGS += -std=c++17 -Wno-sign-compare -Wno-register -Wshadow -Wswitch -Wunused-parameter -Wunreachable-code -Wno-unknown-pragmas ${DUCKDB_BUILD_CXX_FLAGS} -Wall -Wextra -Werror SHLIB_LINK += -Wl,-rpath,$(PG_LIB)/ -lpq -Lthird_party/duckdb/build/$(DUCKDB_BUILD_TYPE)/src -L$(PG_LIB) -lduckdb -lstdc++ -llz4 diff --git a/include/pgduckdb/scan/postgres_scan.hpp b/include/pgduckdb/scan/postgres_scan.hpp index defbdfb9..85fdf86e 100644 --- a/include/pgduckdb/scan/postgres_scan.hpp +++ b/include/pgduckdb/scan/postgres_scan.hpp @@ -43,7 +43,7 @@ class PostgresScanLocalState { } } - int m_output_vector_size; + uint32_t m_output_vector_size; bool m_exhausted_scan; std::vector> values; std::vector> nulls; diff --git a/include/pgduckdb/types/decimal.hpp b/include/pgduckdb/types/decimal.hpp index fa1af84f..6cd69516 100644 --- a/include/pgduckdb/types/decimal.hpp +++ b/include/pgduckdb/types/decimal.hpp @@ -294,7 +294,7 @@ struct DecimalConversionInteger { template static T - Finalize(const NumericVar &numeric, T result) { + Finalize(const NumericVar &, T result) { return result; } }; @@ -349,7 +349,7 @@ struct DecimalConversionHugeint { } static hugeint_t - Finalize(const NumericVar &numeric, hugeint_t result) { + Finalize(const NumericVar &, hugeint_t result) { return result; } }; diff --git a/include/pgduckdb/utility/allocator.hpp b/include/pgduckdb/utility/allocator.hpp index 35a4bd8f..f4f75f17 100644 --- a/include/pgduckdb/utility/allocator.hpp +++ b/include/pgduckdb/utility/allocator.hpp @@ -26,7 +26,7 @@ struct DuckDBMallocator { } void - deallocate(T *p, std::size_t n) noexcept { + deallocate(T *p, std::size_t) noexcept { duckdb_free(p); } }; diff --git a/src/catalog/pgduckdb_catalog.cpp b/src/catalog/pgduckdb_catalog.cpp index 6d62af1d..8c7665b2 100644 --- a/src/catalog/pgduckdb_catalog.cpp +++ b/src/catalog/pgduckdb_catalog.cpp @@ -15,18 +15,15 @@ PostgresCatalog::PostgresCatalog(duckdb::AttachedDatabase &db, const duckdb::str } duckdb::unique_ptr -PostgresCatalog::Attach(duckdb::StorageExtensionInfo *storage_info_p, duckdb::ClientContext &context, - duckdb::AttachedDatabase &db, const duckdb::string &name, duckdb::AttachInfo &info, - duckdb::AccessMode access_mode) { - auto connection_string = info.path; - return duckdb::make_uniq(db, connection_string, access_mode); +PostgresCatalog::Attach(duckdb::StorageExtensionInfo *, duckdb::ClientContext &, duckdb::AttachedDatabase &db, + const duckdb::string &, duckdb::AttachInfo &info, duckdb::AccessMode access_mode) { + return duckdb::make_uniq(db, info.path, access_mode); } // ------------------ Catalog API --------------------- void -PostgresCatalog::Initialize(bool load_builtin) { - return; +PostgresCatalog::Initialize(bool) { } duckdb::string @@ -35,14 +32,14 @@ PostgresCatalog::GetCatalogType() { } duckdb::optional_ptr -PostgresCatalog::CreateSchema(duckdb::CatalogTransaction transaction, duckdb::CreateSchemaInfo &info) { +PostgresCatalog::CreateSchema(duckdb::CatalogTransaction, duckdb::CreateSchemaInfo &) { throw duckdb::NotImplementedException("CreateSchema not supported yet"); } duckdb::optional_ptr -PostgresCatalog::GetSchema(duckdb::CatalogTransaction transaction, const duckdb::string &schema_name, - duckdb::OnEntryNotFound if_not_found, duckdb::QueryErrorContext error_context) { - auto &pg_transaction = transaction.transaction->Cast(); +PostgresCatalog::GetSchema(duckdb::CatalogTransaction catalog_transaction, const duckdb::string &schema_name, + duckdb::OnEntryNotFound, duckdb::QueryErrorContext) { + auto &pg_transaction = catalog_transaction.transaction->Cast(); auto res = pg_transaction.GetCatalogEntry(duckdb::CatalogType::SCHEMA_ENTRY, schema_name, ""); D_ASSERT(res); D_ASSERT(res->type == duckdb::CatalogType::SCHEMA_ENTRY); @@ -50,43 +47,41 @@ PostgresCatalog::GetSchema(duckdb::CatalogTransaction transaction, const duckdb: } void -PostgresCatalog::ScanSchemas(duckdb::ClientContext &context, - std::function callback) { - return; +PostgresCatalog::ScanSchemas(duckdb::ClientContext &, std::function) { } duckdb::unique_ptr -PostgresCatalog::PlanCreateTableAs(duckdb::ClientContext &context, duckdb::LogicalCreateTable &op, - duckdb::unique_ptr plan) { +PostgresCatalog::PlanCreateTableAs(duckdb::ClientContext &, duckdb::LogicalCreateTable &, + duckdb::unique_ptr) { throw duckdb::NotImplementedException("PlanCreateTableAs not supported yet"); } duckdb::unique_ptr -PostgresCatalog::PlanInsert(duckdb::ClientContext &context, duckdb::LogicalInsert &op, - duckdb::unique_ptr plan) { +PostgresCatalog::PlanInsert(duckdb::ClientContext &, duckdb::LogicalInsert &, + duckdb::unique_ptr) { throw duckdb::NotImplementedException("PlanInsert not supported yet"); } duckdb::unique_ptr -PostgresCatalog::PlanDelete(duckdb::ClientContext &context, duckdb::LogicalDelete &op, - duckdb::unique_ptr plan) { +PostgresCatalog::PlanDelete(duckdb::ClientContext &, duckdb::LogicalDelete &, + duckdb::unique_ptr) { throw duckdb::NotImplementedException("PlanDelete not supported yet"); } duckdb::unique_ptr -PostgresCatalog::PlanUpdate(duckdb::ClientContext &context, duckdb::LogicalUpdate &op, - duckdb::unique_ptr plan) { +PostgresCatalog::PlanUpdate(duckdb::ClientContext &, duckdb::LogicalUpdate &, + duckdb::unique_ptr) { throw duckdb::NotImplementedException("PlanUpdate not supported yet"); } duckdb::unique_ptr -PostgresCatalog::BindCreateIndex(duckdb::Binder &binder, duckdb::CreateStatement &stmt, - duckdb::TableCatalogEntry &table, duckdb::unique_ptr plan) { +PostgresCatalog::BindCreateIndex(duckdb::Binder &, duckdb::CreateStatement &, duckdb::TableCatalogEntry &, + duckdb::unique_ptr) { throw duckdb::NotImplementedException("BindCreateIndex not supported yet"); } duckdb::DatabaseSize -PostgresCatalog::GetDatabaseSize(duckdb::ClientContext &context) { +PostgresCatalog::GetDatabaseSize(duckdb::ClientContext &) { throw duckdb::NotImplementedException("GetDatabaseSize not supported yet"); } @@ -101,7 +96,7 @@ PostgresCatalog::GetDBPath() { } void -PostgresCatalog::DropSchema(duckdb::ClientContext &context, duckdb::DropInfo &info) { +PostgresCatalog::DropSchema(duckdb::ClientContext &, duckdb::DropInfo &) { throw duckdb::NotImplementedException("DropSchema not supported yet"); } diff --git a/src/catalog/pgduckdb_schema.cpp b/src/catalog/pgduckdb_schema.cpp index 126cb708..d032c1aa 100644 --- a/src/catalog/pgduckdb_schema.cpp +++ b/src/catalog/pgduckdb_schema.cpp @@ -12,81 +12,78 @@ PostgresSchema::PostgresSchema(duckdb::Catalog &catalog, duckdb::CreateSchemaInf } void -PostgresSchema::Scan(duckdb::ClientContext &context, duckdb::CatalogType type, - const std::function &callback) { - return; +PostgresSchema::Scan(duckdb::ClientContext &, duckdb::CatalogType, const std::function &) { } void -PostgresSchema::Scan(duckdb::CatalogType type, const std::function &callback) { +PostgresSchema::Scan(duckdb::CatalogType, const std::function &) { throw duckdb::NotImplementedException("Scan(no context) not supported yet"); } duckdb::optional_ptr -PostgresSchema::CreateIndex(duckdb::CatalogTransaction transaction, duckdb::CreateIndexInfo &info, - duckdb::TableCatalogEntry &table) { +PostgresSchema::CreateIndex(duckdb::CatalogTransaction, duckdb::CreateIndexInfo &, duckdb::TableCatalogEntry &) { throw duckdb::NotImplementedException("CreateIndex not supported yet"); } duckdb::optional_ptr -PostgresSchema::CreateFunction(duckdb::CatalogTransaction transaction, duckdb::CreateFunctionInfo &info) { +PostgresSchema::CreateFunction(duckdb::CatalogTransaction, duckdb::CreateFunctionInfo &) { throw duckdb::NotImplementedException("CreateFunction not supported yet"); } duckdb::optional_ptr -PostgresSchema::CreateTable(duckdb::CatalogTransaction transaction, duckdb::BoundCreateTableInfo &info) { +PostgresSchema::CreateTable(duckdb::CatalogTransaction, duckdb::BoundCreateTableInfo &) { throw duckdb::NotImplementedException("CreateTable not supported yet"); } duckdb::optional_ptr -PostgresSchema::CreateView(duckdb::CatalogTransaction transaction, duckdb::CreateViewInfo &info) { +PostgresSchema::CreateView(duckdb::CatalogTransaction, duckdb::CreateViewInfo &) { throw duckdb::NotImplementedException("CreateView not supported yet"); } duckdb::optional_ptr -PostgresSchema::CreateSequence(duckdb::CatalogTransaction transaction, duckdb::CreateSequenceInfo &info) { +PostgresSchema::CreateSequence(duckdb::CatalogTransaction, duckdb::CreateSequenceInfo &) { throw duckdb::NotImplementedException("CreateSequence not supported yet"); } duckdb::optional_ptr -PostgresSchema::CreateTableFunction(duckdb::CatalogTransaction transaction, duckdb::CreateTableFunctionInfo &info) { +PostgresSchema::CreateTableFunction(duckdb::CatalogTransaction, duckdb::CreateTableFunctionInfo &) { throw duckdb::NotImplementedException("CreateTableFunction not supported yet"); } duckdb::optional_ptr -PostgresSchema::CreateCopyFunction(duckdb::CatalogTransaction transaction, duckdb::CreateCopyFunctionInfo &info) { +PostgresSchema::CreateCopyFunction(duckdb::CatalogTransaction, duckdb::CreateCopyFunctionInfo &) { throw duckdb::NotImplementedException("CreateCopyFunction not supported yet"); } duckdb::optional_ptr -PostgresSchema::CreatePragmaFunction(duckdb::CatalogTransaction transaction, duckdb::CreatePragmaFunctionInfo &info) { +PostgresSchema::CreatePragmaFunction(duckdb::CatalogTransaction, duckdb::CreatePragmaFunctionInfo &) { throw duckdb::NotImplementedException("CreatePragmaFunction not supported yet"); } duckdb::optional_ptr -PostgresSchema::CreateCollation(duckdb::CatalogTransaction transaction, duckdb::CreateCollationInfo &info) { +PostgresSchema::CreateCollation(duckdb::CatalogTransaction, duckdb::CreateCollationInfo &) { throw duckdb::NotImplementedException("CreateCollation not supported yet"); } duckdb::optional_ptr -PostgresSchema::CreateType(duckdb::CatalogTransaction transaction, duckdb::CreateTypeInfo &info) { +PostgresSchema::CreateType(duckdb::CatalogTransaction, duckdb::CreateTypeInfo &) { throw duckdb::NotImplementedException("CreateType not supported yet"); } duckdb::optional_ptr -PostgresSchema::GetEntry(duckdb::CatalogTransaction transaction, duckdb::CatalogType type, +PostgresSchema::GetEntry(duckdb::CatalogTransaction catalog_transaction, duckdb::CatalogType type, const duckdb::string &entry_name) { - auto &pg_transaction = transaction.transaction->Cast(); + auto &pg_transaction = catalog_transaction.transaction->Cast(); return pg_transaction.GetCatalogEntry(type, name, entry_name); } void -PostgresSchema::DropEntry(duckdb::ClientContext &context, duckdb::DropInfo &info) { +PostgresSchema::DropEntry(duckdb::ClientContext &, duckdb::DropInfo &) { throw duckdb::NotImplementedException("DropEntry not supported yet"); } void -PostgresSchema::Alter(duckdb::CatalogTransaction transaction, duckdb::AlterInfo &info) { +PostgresSchema::Alter(duckdb::CatalogTransaction, duckdb::AlterInfo &) { throw duckdb::NotImplementedException("Alter not supported yet"); } diff --git a/src/catalog/pgduckdb_storage.cpp b/src/catalog/pgduckdb_storage.cpp index d3e1d982..2362f178 100644 --- a/src/catalog/pgduckdb_storage.cpp +++ b/src/catalog/pgduckdb_storage.cpp @@ -7,8 +7,7 @@ namespace pgduckdb { static duckdb::unique_ptr -CreateTransactionManager(duckdb::StorageExtensionInfo *storage_info, duckdb::AttachedDatabase &db, - duckdb::Catalog &catalog) { +CreateTransactionManager(duckdb::StorageExtensionInfo *, duckdb::AttachedDatabase &db, duckdb::Catalog &catalog) { return duckdb::make_uniq(db, catalog.Cast()); } diff --git a/src/catalog/pgduckdb_table.cpp b/src/catalog/pgduckdb_table.cpp index 1a3af005..05ecffa0 100644 --- a/src/catalog/pgduckdb_table.cpp +++ b/src/catalog/pgduckdb_table.cpp @@ -65,19 +65,18 @@ PostgresHeapTable::PostgresHeapTable(duckdb::Catalog &catalog, duckdb::SchemaCat } duckdb::unique_ptr -PostgresHeapTable::GetStatistics(duckdb::ClientContext &context, duckdb::column_t column_id) { +PostgresHeapTable::GetStatistics(duckdb::ClientContext &, duckdb::column_t) { throw duckdb::NotImplementedException("GetStatistics not supported yet"); } duckdb::TableFunction -PostgresHeapTable::GetScanFunction(duckdb::ClientContext &context, - duckdb::unique_ptr &bind_data) { +PostgresHeapTable::GetScanFunction(duckdb::ClientContext &, duckdb::unique_ptr &bind_data) { bind_data = duckdb::make_uniq(rel, cardinality, snapshot); return PostgresSeqScanFunction(); } duckdb::TableStorageInfo -PostgresHeapTable::GetStorageInfo(duckdb::ClientContext &context) { +PostgresHeapTable::GetStorageInfo(duckdb::ClientContext &) { throw duckdb::NotImplementedException("GetStorageInfo not supported yet"); } diff --git a/src/catalog/pgduckdb_transaction_manager.cpp b/src/catalog/pgduckdb_transaction_manager.cpp index 9317f991..7e6e4551 100644 --- a/src/catalog/pgduckdb_transaction_manager.cpp +++ b/src/catalog/pgduckdb_transaction_manager.cpp @@ -42,8 +42,7 @@ PostgresTransactionManager::RollbackTransaction(duckdb::Transaction &transaction } void -PostgresTransactionManager::Checkpoint(duckdb::ClientContext &context, bool force) { - return; +PostgresTransactionManager::Checkpoint(duckdb::ClientContext &, bool) { } } // namespace pgduckdb diff --git a/src/pgduckdb_background_worker.cpp b/src/pgduckdb_background_worker.cpp index 6f9a6631..41e6c077 100644 --- a/src/pgduckdb_background_worker.cpp +++ b/src/pgduckdb_background_worker.cpp @@ -58,7 +58,7 @@ static uint64 initial_cache_version = 0; extern "C" { PGDLLEXPORT void -pgduckdb_background_worker_main(Datum main_arg) { +pgduckdb_background_worker_main(Datum) { elog(LOG, "started pg_duckdb background worker"); // Set up a signal handler for SIGTERM pqsignal(SIGTERM, die); @@ -98,7 +98,7 @@ pgduckdb_background_worker_main(Datum main_arg) { ResetLatch(MyLatch); } - proc_exit(0); + // Unreachable } PG_FUNCTION_INFO_V1(force_motherduck_sync); diff --git a/src/pgduckdb_ddl.cpp b/src/pgduckdb_ddl.cpp index 610a2896..14682001 100644 --- a/src/pgduckdb_ddl.cpp +++ b/src/pgduckdb_ddl.cpp @@ -51,7 +51,7 @@ DuckdbTruncateTable(Oid relation_oid) { } void -DuckdbHandleDDL(Node *parsetree, const char *queryString) { +DuckdbHandleDDL(Node *parsetree, const char *) { if (!pgduckdb::IsExtensionRegistered()) { /* We're not installed, so don't mess with the query */ return; @@ -418,7 +418,7 @@ DECLARE_PG_FUNCTION(duckdb_drop_trigger) { * a new version. */ if (pgduckdb::IsMotherDuckEnabled() && !pgduckdb::doing_motherduck_sync) { - for (auto proc = 0; proc < SPI_processed; ++proc) { + for (uint64_t proc = 0; proc < SPI_processed; ++proc) { if (!connection) { /* * For now, we don't support DuckDB queries in transactions. To support @@ -453,7 +453,7 @@ DECLARE_PG_FUNCTION(duckdb_drop_trigger) { if (ret != SPI_OK_SELECT) elog(ERROR, "SPI_exec failed: error code %s", SPI_result_code_string(ret)); - for (auto proc = 0; proc < SPI_processed; ++proc) { + for (uint64_t proc = 0; proc < SPI_processed; ++proc) { HeapTuple tuple = SPI_tuptable->vals[proc]; bool isnull; diff --git a/src/pgduckdb_detoast.cpp b/src/pgduckdb_detoast.cpp index 1fb98414..eb8622c8 100644 --- a/src/pgduckdb_detoast.cpp +++ b/src/pgduckdb_detoast.cpp @@ -110,7 +110,10 @@ ToastFetchDatum(struct varlena *attr) { struct varlena *result = (struct varlena *)duckdb_malloc(attrsize + VARHDRSZ); +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wsign-compare" if (VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer)) { +#pragma GCC diagnostic pop SET_VARSIZE_COMPRESSED(result, attrsize + VARHDRSZ); } else { SET_VARSIZE(result, attrsize + VARHDRSZ); diff --git a/src/pgduckdb_metadata_cache.cpp b/src/pgduckdb_metadata_cache.cpp index 2b8af1e4..199721c5 100644 --- a/src/pgduckdb_metadata_cache.cpp +++ b/src/pgduckdb_metadata_cache.cpp @@ -75,7 +75,7 @@ uint32 schema_hash_value; * IsExtensionRegistered for details). */ static void -InvalidateCaches(Datum arg, int cache_id, uint32 hash_value) { +InvalidateCaches(Datum, int, uint32 hash_value) { if (hash_value != schema_hash_value) { return; } @@ -112,7 +112,7 @@ BuildDuckdbOnlyFunctions() { const char *function_names[] = {"read_parquet", "read_csv", "iceberg_scan", "iceberg_metadata", "iceberg_snapshots", "delta_scan", "read_json"}; - for (int i = 0; i < lengthof(function_names); i++) { + for (uint32_t i = 0; i < lengthof(function_names); i++) { CatCList *catlist = SearchSysCacheList1(PROCNAMEARGSNSP, CStringGetDatum(function_names[i])); for (int j = 0; j < catlist->n_members; j++) { diff --git a/src/pgduckdb_node.cpp b/src/pgduckdb_node.cpp index 2ebad7e7..2f1c12b9 100644 --- a/src/pgduckdb_node.cpp +++ b/src/pgduckdb_node.cpp @@ -71,7 +71,7 @@ Duckdb_CreateCustomScanState(CustomScan *cscan) { } void -Duckdb_BeginCustomScan_Cpp(CustomScanState *cscanstate, EState *estate, int eflags) { +Duckdb_BeginCustomScan_Cpp(CustomScanState *cscanstate, EState *estate, int) { DuckdbScanState *duckdb_scan_state = (DuckdbScanState *)cscanstate; duckdb::unique_ptr prepared_query = DuckdbPrepare(duckdb_scan_state->query); @@ -226,11 +226,11 @@ Duckdb_EndCustomScan(CustomScanState *node) { } void -Duckdb_ReScanCustomScan(CustomScanState *node) { +Duckdb_ReScanCustomScan(CustomScanState *) { } void -Duckdb_ExplainCustomScan_Cpp(CustomScanState *node, List *ancestors, ExplainState *es) { +Duckdb_ExplainCustomScan_Cpp(CustomScanState *node, List *, ExplainState *es) { DuckdbScanState *duckdb_scan_state = (DuckdbScanState *)node; ExecuteQuery(duckdb_scan_state); diff --git a/src/pgduckdb_options.cpp b/src/pgduckdb_options.cpp index ab983b11..efda16d7 100644 --- a/src/pgduckdb_options.cpp +++ b/src/pgduckdb_options.cpp @@ -200,6 +200,7 @@ DECLARE_PG_FUNCTION(cache) { } DECLARE_PG_FUNCTION(pgduckdb_recycle_ddb) { + (void)fcinfo; pgduckdb::DuckDBManager::Get().Reset(); PG_RETURN_BOOL(true); } diff --git a/src/pgduckdb_planner.cpp b/src/pgduckdb_planner.cpp index 0f463b39..d5e6a6c8 100644 --- a/src/pgduckdb_planner.cpp +++ b/src/pgduckdb_planner.cpp @@ -73,7 +73,7 @@ CreatePlan(Query *query, bool throw_error) { auto &prepared_result_types = prepared_query->GetTypes(); - for (auto i = 0; i < prepared_result_types.size(); i++) { + for (size_t i = 0; i < prepared_result_types.size(); i++) { auto &column = prepared_result_types[i]; Oid postgresColumnOid = pgduckdb::GetPostgresDuckDBType(column); diff --git a/src/pgduckdb_table_am.cpp b/src/pgduckdb_table_am.cpp index 37549efd..c00e971a 100644 --- a/src/pgduckdb_table_am.cpp +++ b/src/pgduckdb_table_am.cpp @@ -39,7 +39,7 @@ PG_FUNCTION_INFO_V1(duckdb_am_handler); */ static const TupleTableSlotOps * -duckdb_slot_callbacks(Relation relation) { +duckdb_slot_callbacks(Relation) { /* * Here we would most likely want to invent your own set of slot * callbacks for our AM. For now we just use the minimal tuple slot, we @@ -61,7 +61,7 @@ typedef struct DuckdbScanDescData { typedef struct DuckdbScanDescData *DuckdbScanDesc; static TableScanDesc -duckdb_scan_begin(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, +duckdb_scan_begin(Relation relation, Snapshot snapshot, int nkeys, ScanKey, ParallelTableScanDesc parallel_scan, uint32 flags) { DuckdbScanDesc scan = (DuckdbScanDesc)palloc(sizeof(DuckdbScanDescData)); @@ -82,13 +82,12 @@ duckdb_scan_end(TableScanDesc sscan) { } static void -duckdb_scan_rescan(TableScanDesc sscan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, - bool allow_pagemode) { +duckdb_scan_rescan(TableScanDesc, ScanKey, bool, bool, bool, bool) { NOT_IMPLEMENTED(); } static bool -duckdb_scan_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot) { +duckdb_scan_getnextslot(TableScanDesc, ScanDirection, TupleTableSlot *) { NOT_IMPLEMENTED(); } @@ -98,23 +97,22 @@ duckdb_scan_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTable */ static IndexFetchTableData * -duckdb_index_fetch_begin(Relation rel) { +duckdb_index_fetch_begin(Relation) { NOT_IMPLEMENTED(); } static void -duckdb_index_fetch_reset(IndexFetchTableData *scan) { +duckdb_index_fetch_reset(IndexFetchTableData *) { NOT_IMPLEMENTED(); } static void -duckdb_index_fetch_end(IndexFetchTableData *scan) { +duckdb_index_fetch_end(IndexFetchTableData *) { NOT_IMPLEMENTED(); } static bool -duckdb_index_fetch_tuple(struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, - bool *call_again, bool *all_dead) { +duckdb_index_fetch_tuple(struct IndexFetchTableData *, ItemPointer, Snapshot, TupleTableSlot *, bool *, bool *) { NOT_IMPLEMENTED(); } @@ -125,27 +123,27 @@ duckdb_index_fetch_tuple(struct IndexFetchTableData *scan, ItemPointer tid, Snap */ static bool -duckdb_fetch_row_version(Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot) { +duckdb_fetch_row_version(Relation, ItemPointer, Snapshot, TupleTableSlot *) { NOT_IMPLEMENTED(); } static void -duckdb_get_latest_tid(TableScanDesc sscan, ItemPointer tid) { +duckdb_get_latest_tid(TableScanDesc, ItemPointer) { NOT_IMPLEMENTED(); } static bool -duckdb_tuple_tid_valid(TableScanDesc scan, ItemPointer tid) { +duckdb_tuple_tid_valid(TableScanDesc, ItemPointer) { NOT_IMPLEMENTED(); } static bool -duckdb_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, Snapshot snapshot) { +duckdb_tuple_satisfies_snapshot(Relation, TupleTableSlot *, Snapshot) { NOT_IMPLEMENTED(); } static TransactionId -duckdb_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) { +duckdb_index_delete_tuples(Relation, TM_IndexDeleteOp *) { NOT_IMPLEMENTED(); } @@ -155,61 +153,56 @@ duckdb_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) { */ static void -duckdb_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid, int options, BulkInsertState bistate) { +duckdb_tuple_insert(Relation, TupleTableSlot *, CommandId, int, BulkInsertState) { NOT_IMPLEMENTED(); } static void -duckdb_tuple_insert_speculative(Relation relation, TupleTableSlot *slot, CommandId cid, int options, - BulkInsertState bistate, uint32 specToken) { +duckdb_tuple_insert_speculative(Relation, TupleTableSlot *, CommandId, int, BulkInsertState, uint32) { NOT_IMPLEMENTED(); } static void -duckdb_tuple_complete_speculative(Relation relation, TupleTableSlot *slot, uint32 spekToken, bool succeeded) { +duckdb_tuple_complete_speculative(Relation, TupleTableSlot *, uint32, bool) { NOT_IMPLEMENTED(); } static void -duckdb_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, int options, - BulkInsertState bistate) { +duckdb_multi_insert(Relation, TupleTableSlot **, int, CommandId, int, BulkInsertState) { NOT_IMPLEMENTED(); } static TM_Result -duckdb_tuple_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, - bool wait, TM_FailureData *tmfd, bool changingPart) { +duckdb_tuple_delete(Relation, ItemPointer, CommandId, Snapshot, Snapshot, bool, TM_FailureData *, bool) { NOT_IMPLEMENTED(); } #if PG_VERSION_NUM >= 160000 static TM_Result -duckdb_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, - Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, - TU_UpdateIndexes *update_indexes) { +duckdb_tuple_update(Relation, ItemPointer, TupleTableSlot *, CommandId, Snapshot, Snapshot, bool, TM_FailureData *, + LockTupleMode *, TU_UpdateIndexes *) { NOT_IMPLEMENTED(); } #else static TM_Result -duckdb_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, - Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, - bool *update_indexes) { +duckdb_tuple_update(Relation, ItemPointer, TupleTableSlot *, CommandId, Snapshot, Snapshot, bool, TM_FailureData *, + LockTupleMode *, bool *) { NOT_IMPLEMENTED(); } #endif static TM_Result -duckdb_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, CommandId cid, - LockTupleMode mode, LockWaitPolicy wait_policy, uint8 flags, TM_FailureData *tmfd) { +duckdb_tuple_lock(Relation, ItemPointer, Snapshot, TupleTableSlot *, CommandId, LockTupleMode, LockWaitPolicy, uint8, + TM_FailureData *) { NOT_IMPLEMENTED(); } static void -duckdb_finish_bulk_insert(Relation relation, int options) { +duckdb_finish_bulk_insert(Relation, int) { NOT_IMPLEMENTED(); } @@ -221,8 +214,7 @@ duckdb_finish_bulk_insert(Relation relation, int options) { #if PG_VERSION_NUM >= 160000 static void -duckdb_relation_set_new_filelocator(Relation rel, const RelFileLocator *newrnode, char persistence, - TransactionId *freezeXid, MultiXactId *minmulti) { +duckdb_relation_set_new_filelocator(Relation rel, const RelFileLocator *, char, TransactionId *, MultiXactId *) { HeapTuple tp = SearchSysCache1(RELOID, ObjectIdGetDatum(rel->rd_id)); if (!HeapTupleIsValid(tp)) { /* nothing to do, the table will be created in DuckDB later by the @@ -236,8 +228,7 @@ duckdb_relation_set_new_filelocator(Relation rel, const RelFileLocator *newrnode #else static void -duckdb_relation_set_new_filenode(Relation rel, const RelFileNode *newrnode, char persistence, TransactionId *freezeXid, - MultiXactId *minmulti) { +duckdb_relation_set_new_filenode(Relation rel, const RelFileNode *, char, TransactionId *, MultiXactId *) { HeapTuple tp = SearchSysCache1(RELOID, ObjectIdGetDatum(rel->rd_id)); if (!HeapTupleIsValid(tp)) { /* nothing to do, the table will be created in DuckDB later by the @@ -258,35 +249,34 @@ duckdb_relation_nontransactional_truncate(Relation rel) { #if PG_VERSION_NUM >= 160000 static void -duckdb_copy_data(Relation rel, const RelFileLocator *newrnode) { +duckdb_copy_data(Relation, const RelFileLocator *) { NOT_IMPLEMENTED(); } #else static void -duckdb_copy_data(Relation rel, const RelFileNode *newrnode) { +duckdb_copy_data(Relation, const RelFileNode *) { NOT_IMPLEMENTED(); } #endif static void -duckdb_copy_for_cluster(Relation OldTable, Relation NewTable, Relation OldIndex, bool use_sort, - TransactionId OldestXmin, TransactionId *xid_cutoff, MultiXactId *multi_cutoff, - double *num_tuples, double *tups_vacuumed, double *tups_recently_dead) { +duckdb_copy_for_cluster(Relation, Relation, Relation, bool, TransactionId, TransactionId *, MultiXactId *, double *, + double *, double *) { NOT_IMPLEMENTED(); } static void -duckdb_vacuum(Relation onerel, VacuumParams *params, BufferAccessStrategy bstrategy) { +duckdb_vacuum(Relation, VacuumParams *, BufferAccessStrategy) { NOT_IMPLEMENTED(); } #if PG_VERSION_NUM >= 170000 static bool -duckdb_scan_analyze_next_block(TableScanDesc scan, ReadStream *stream) { +duckdb_scan_analyze_next_block(TableScanDesc, ReadStream *) { /* no data in postgres, so no point to analyze next block */ return false; } @@ -294,28 +284,25 @@ duckdb_scan_analyze_next_block(TableScanDesc scan, ReadStream *stream) { #else static bool -duckdb_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno, BufferAccessStrategy bstrategy) { +duckdb_scan_analyze_next_block(TableScanDesc, BlockNumber, BufferAccessStrategy) { /* no data in postgres, so no point to analyze next block */ return false; } #endif static bool -duckdb_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin, double *liverows, double *deadrows, - TupleTableSlot *slot) { +duckdb_scan_analyze_next_tuple(TableScanDesc, TransactionId, double *, double *, TupleTableSlot *) { NOT_IMPLEMENTED(); } static double -duckdb_index_build_range_scan(Relation tableRelation, Relation indexRelation, IndexInfo *indexInfo, bool allow_sync, - bool anyvisible, bool progress, BlockNumber start_blockno, BlockNumber numblocks, - IndexBuildCallback callback, void *callback_state, TableScanDesc scan) { +duckdb_index_build_range_scan(Relation, Relation, IndexInfo *, bool, bool, bool, BlockNumber, BlockNumber, + IndexBuildCallback, void *, TableScanDesc) { NOT_IMPLEMENTED(); } static void -duckdb_index_validate_scan(Relation tableRelation, Relation indexRelation, IndexInfo *indexInfo, Snapshot snapshot, - ValidateIndexState *state) { +duckdb_index_validate_scan(Relation, Relation, IndexInfo *, Snapshot, ValidateIndexState *) { NOT_IMPLEMENTED(); } @@ -325,7 +312,7 @@ duckdb_index_validate_scan(Relation tableRelation, Relation indexRelation, Index */ static uint64 -duckdb_relation_size(Relation rel, ForkNumber forkNumber) { +duckdb_relation_size(Relation, ForkNumber) { /* * For now we just return 0. We should probably want return something more * useful in the future though. @@ -337,7 +324,7 @@ duckdb_relation_size(Relation rel, ForkNumber forkNumber) { * Check to see whether the table needs a TOAST table. */ static bool -duckdb_relation_needs_toast_table(Relation rel) { +duckdb_relation_needs_toast_table(Relation) { /* we don't need toast, because everything is stored in duckdb */ return false; @@ -349,7 +336,7 @@ duckdb_relation_needs_toast_table(Relation rel) { */ static void -duckdb_estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac) { +duckdb_estimate_rel_size(Relation, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac) { /* no data available */ if (attr_widths) *attr_widths = 0; @@ -367,22 +354,22 @@ duckdb_estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, d */ static bool -duckdb_scan_bitmap_next_block(TableScanDesc scan, TBMIterateResult *tbmres) { +duckdb_scan_bitmap_next_block(TableScanDesc, TBMIterateResult *) { NOT_IMPLEMENTED(); } static bool -duckdb_scan_bitmap_next_tuple(TableScanDesc scan, TBMIterateResult *tbmres, TupleTableSlot *slot) { +duckdb_scan_bitmap_next_tuple(TableScanDesc, TBMIterateResult *, TupleTableSlot *) { NOT_IMPLEMENTED(); } static bool -duckdb_scan_sample_next_block(TableScanDesc scan, SampleScanState *scanstate) { +duckdb_scan_sample_next_block(TableScanDesc, SampleScanState *) { NOT_IMPLEMENTED(); } static bool -duckdb_scan_sample_next_tuple(TableScanDesc scan, SampleScanState *scanstate, TupleTableSlot *slot) { +duckdb_scan_sample_next_tuple(TableScanDesc, SampleScanState *, TupleTableSlot *) { NOT_IMPLEMENTED(); } @@ -458,6 +445,7 @@ static const TableAmRoutine duckdb_methods = {.type = T_TableAmRoutine, Datum duckdb_am_handler(PG_FUNCTION_ARGS) { + (void)fcinfo; PG_RETURN_POINTER(&duckdb_methods); } } diff --git a/src/pgduckdb_types.cpp b/src/pgduckdb_types.cpp index c36857d4..c22dadea 100644 --- a/src/pgduckdb_types.cpp +++ b/src/pgduckdb_types.cpp @@ -140,7 +140,7 @@ ConvertNumeric(T value, idx_t scale) { // count the amount of digits required for the fractional part // note that while it is technically possible to leave out zeros here this adds even more complications // so we just always write digits for the full "scale", even if not strictly required - int32_t fractional_ndigits = (scale + DEC_DIGITS - 1) / DEC_DIGITS; + idx_t fractional_ndigits = (scale + DEC_DIGITS - 1) / DEC_DIGITS; // fractional digits are LEFT aligned (for some unknown reason) // that means if we write ".12" with a scale of 2 we actually need to write "1200", instead of "12" // this means we need to "correct" the number 12 by multiplying by 100 in this case @@ -466,7 +466,7 @@ struct PostgresArrayAppendState { AppendValueAtDimension(const duckdb::Value &value, idx_t dimension) { // FIXME: verify that the amount of values does not overflow an `int` ? auto &values = duckdb::ListValue::GetChildren(value); - idx_t to_append = values.size(); + int to_append = values.size(); D_ASSERT(dimension < number_of_dimensions); if (dimensions[dimension] == -1) { @@ -482,8 +482,7 @@ struct PostgresArrayAppendState { auto &child_type = duckdb::ListType::GetChildType(value.type()); if (child_type.id() == duckdb::LogicalTypeId::LIST) { - for (idx_t i = 0; i < to_append; i++) { - auto &child_val = values[i]; + for (auto &child_val : values) { if (child_val.IsNull()) { // Postgres arrays can not contains nulls at the array level // i.e {{1,2}, NULL, {3,4}} is not supported @@ -500,14 +499,13 @@ struct PostgresArrayAppendState { nulls = (bool *)palloc(expected_values * sizeof(bool)); } - for (idx_t i = 0; i < to_append; i++) { - auto &child_val = values[i]; - nulls[count + i] = child_val.IsNull(); - if (!nulls[count + i]) { - datums[count + i] = OP::ConvertToPostgres(values[i]); + for (auto &child_val : values) { + nulls[count] = child_val.IsNull(); + if (!nulls[count]) { + datums[count] = OP::ConvertToPostgres(child_val); } + ++count; } - count += to_append; } } @@ -949,7 +947,7 @@ ConvertDecimal(const NumericVar &numeric) { T integral_part = 0, fractional_part = 0; if (numeric.weight >= 0) { - idx_t digit_index = 0; + int32_t digit_index = 0; integral_part = numeric.digits[digit_index++]; for (; digit_index <= numeric.weight; digit_index++) { integral_part *= NBASE; diff --git a/src/pgduckdb_xact.cpp b/src/pgduckdb_xact.cpp index 03a95a6f..5ae5f44a 100644 --- a/src/pgduckdb_xact.cpp +++ b/src/pgduckdb_xact.cpp @@ -9,7 +9,7 @@ extern "C" { namespace pgduckdb { static void -DuckdbXactCallback_Cpp(XactEvent event, void *arg) { +DuckdbXactCallback_Cpp(XactEvent event, void *) { if (!started_duckdb_transaction) { return; } diff --git a/src/scan/postgres_scan.cpp b/src/scan/postgres_scan.cpp index db84adb7..151ff69f 100644 --- a/src/scan/postgres_scan.cpp +++ b/src/scan/postgres_scan.cpp @@ -146,8 +146,8 @@ ReplaceView(Oid view) { } duckdb::unique_ptr -PostgresReplacementScan(duckdb::ClientContext &context, duckdb::ReplacementScanInput &input, - duckdb::optional_ptr data) { +PostgresReplacementScan(duckdb::ClientContext &, duckdb::ReplacementScanInput &input, + duckdb::optional_ptr) { auto &schema_name = input.schema_name; auto &table_name = input.table_name; diff --git a/src/scan/postgres_seq_scan.cpp b/src/scan/postgres_seq_scan.cpp index 6b96faf6..597a52cc 100644 --- a/src/scan/postgres_seq_scan.cpp +++ b/src/scan/postgres_seq_scan.cpp @@ -68,8 +68,7 @@ PostgresSeqScanFunction::PostgresSeqScanFunction() } duckdb::unique_ptr -PostgresSeqScanFunction::PostgresSeqScanInitGlobal(duckdb::ClientContext &context, - duckdb::TableFunctionInitInput &input) { +PostgresSeqScanFunction::PostgresSeqScanInitGlobal(duckdb::ClientContext &, duckdb::TableFunctionInitInput &input) { auto &bind_data = input.bind_data->CastNoConst(); auto global_state = duckdb::make_uniq(bind_data.m_rel, input); global_state->m_global_state->m_snapshot = bind_data.m_snapshot; @@ -77,8 +76,7 @@ PostgresSeqScanFunction::PostgresSeqScanInitGlobal(duckdb::ClientContext &contex } duckdb::unique_ptr -PostgresSeqScanFunction::PostgresSeqScanInitLocal(duckdb::ExecutionContext &context, - duckdb::TableFunctionInitInput &input, +PostgresSeqScanFunction::PostgresSeqScanInitLocal(duckdb::ExecutionContext &, duckdb::TableFunctionInitInput &, duckdb::GlobalTableFunctionState *gstate) { auto global_state = reinterpret_cast(gstate); return duckdb::make_uniq( @@ -86,7 +84,7 @@ PostgresSeqScanFunction::PostgresSeqScanInitLocal(duckdb::ExecutionContext &cont } void -PostgresSeqScanFunction::PostgresSeqScanFunc(duckdb::ClientContext &context, duckdb::TableFunctionInput &data, +PostgresSeqScanFunction::PostgresSeqScanFunc(duckdb::ClientContext &, duckdb::TableFunctionInput &data, duckdb::DataChunk &output) { auto &local_state = data.local_state->Cast(); @@ -106,7 +104,7 @@ PostgresSeqScanFunction::PostgresSeqScanFunc(duckdb::ClientContext &context, duc } duckdb::unique_ptr -PostgresSeqScanFunction::PostgresSeqScanCardinality(duckdb::ClientContext &context, const duckdb::FunctionData *data) { +PostgresSeqScanFunction::PostgresSeqScanCardinality(duckdb::ClientContext &, const duckdb::FunctionData *data) { auto &bind_data = data->Cast(); return duckdb::make_uniq(bind_data.m_cardinality, bind_data.m_cardinality); }