diff --git a/CHANGELOG.md b/CHANGELOG.md index 162c7983f86..af0d058f5ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,34 @@ ---------------------------------------------- +# 14.12.0 Release notes + +### Enhancements +* Improve sync bootstrap performance by reducing the number of table selections in the replication logs for embedded objects. ([#7945](https://github.com/realm/realm-core/issues/7945)) +* Released a read lock which was pinned for the duration of a mutable subscription even after commit. This frees resources earlier, and may improve performance of sync bootstraps where the starting state is large. ([#7946](https://github.com/realm/realm-core/issues/7946)) +* Client reset cycle detection now checks if the previous recovery attempt was made by the same core version, and if not attempts recovery again ([PR #7944](https://github.com/realm/realm-core/pull/7944)). +* Updated bundled OpenSSL version to 3.3.1. (PR [#7947](https://github.com/realm/realm-core/pull/7947)) + +### Fixed +* Fixed an "invalid column key" exception when using a RQL "BETWEEN" query on an int or timestamp property across links. ([#7935](https://github.com/realm/realm-core/issues/7935), since v14.10.1) +* Fixed conflict resolution bug related to ArrayErase and Clear instructions, which could sometimes cause an "Invalid prior_size" exception to prevent synchronization ([#7893](https://github.com/realm/realm-core/issues/7893), since v14.8.0). +* Fixed bug which would prevent eventual consistency during conflict resolution. Affected clients would experience data divergence and potentially consistency errors as a result. ([PR #7955](https://github.com/realm/realm-core/pull/7955), since v14.8.0) +* Fixed issues loading the native Realm libraries on Linux ARMv7 systems when they linked against our bundled OpenSSL resulting in errors like `unexpected reloc type 0x03`. ([#7947](https://github.com/realm/realm-core/issues/7947), since v14.1.0) +* `Realm::convert()` would sometimes incorrectly throw an exception claiming that there were unuploaded local changes when the source Realm is a synchronized Realm ([#7966](https://github.com/realm/realm-core/issues/7966), since v10.7.0). + +### Breaking changes +* None. + +### Compatibility +* Fileformat: Generates files with format v24. Reads and automatically upgrade from fileformat v10. If you want to upgrade from an earlier file format version you will have to use RealmCore v13.x.y or earlier. + +----------- + +### Internals +* Reverted the bfd linker override in the Linux-armv7 toolchain file because the upstream OpenSSL issue it was working around was resolved. + +---------------------------------------------- + # 14.11.2 Release notes ### Enhancements diff --git a/Package.swift b/Package.swift index d3b9e090e15..7c1d1483691 100644 --- a/Package.swift +++ b/Package.swift @@ -3,7 +3,7 @@ import PackageDescription import Foundation -let versionStr = "14.11.2" +let versionStr = "14.12.0" let versionPieces = versionStr.split(separator: "-") let versionCompontents = versionPieces[0].split(separator: ".") let versionExtra = versionPieces.count > 1 ? versionPieces[1] : "" diff --git a/dependencies.yml b/dependencies.yml index 45bf8cf0afc..b309513d2a2 100644 --- a/dependencies.yml +++ b/dependencies.yml @@ -1,6 +1,6 @@ PACKAGE_NAME: realm-core -VERSION: 14.11.2 -OPENSSL_VERSION: 3.2.0 +VERSION: 14.12.0 +OPENSSL_VERSION: 3.3.1 ZLIB_VERSION: 1.2.13 # https://github.com/10gen/baas/commits # 2f308db is 2024 July 10 diff --git a/evergreen/config.yml b/evergreen/config.yml index f7d009de25c..2f0bfd89afe 100644 --- a/evergreen/config.yml +++ b/evergreen/config.yml @@ -305,24 +305,25 @@ functions: fi # NOTE: These two values will be ANDed together for matching tests - TEST_FLAGS= + TEST_FLAGS="--no-tests=error ${test_flags|} " if [[ -n "${test_label}" ]]; then - TEST_FLAGS="-L ${test_label} " + TEST_FLAGS+="-L ${test_label} " fi if [[ -n "${test_filter}" ]]; then TEST_FLAGS+="-R ${test_filter} " fi if [[ -n "${verbose_test_output}" ]]; then - TEST_FLAGS="$TEST_FLAGS -VV" export UNITTEST_THREADS=1 export UNITTEST_LOG_LEVEL="${test_logging_level|debug}" + TEST_FLAGS+="-VV " + if [[ "$UNITTEST_LOG_LEVEL" = "all" ]]; then + TEST_FLAGS+="-O $(./evergreen/abspath.sh test_${task_id}.log) -Q " + fi else - TEST_FLAGS="$TEST_FLAGS -V" + TEST_FLAGS+="-V " fi - TEST_FLAGS="--no-tests=error $TEST_FLAGS ${test_flags|}" - if [[ -n "${disable_tests_against_baas|}" ]]; then unset BAASAAS_API_KEY unset BAASAAS_REF_SPEC @@ -331,9 +332,18 @@ functions: if [[ -n "${c_compiler}" && "$(basename ${c_compiler})" = "clang" && -f "$(dirname ${c_compiler})/llvm-symbolizer" ]]; then LLVM_SYMBOLIZER="$(dirname ${c_compiler})/llvm-symbolizer" + # we don't want to put all of the llvm bin-dir onto the path, so make a new directory, as-needed, + # and put a symlink to llvm-symbolizer in it. This is for ubsan, which doesn't have an + # environment variable to specify the path to the llvm-symbolizer. + if [[ ! ./llvm_symbolizer_bindir/llvm-symbolizer -ef "$LLVM_SYMBOLIZER" ]]; then + mkdir llvm_symbolizer_bindir + ln -s "$(./evergreen/abspath.sh $LLVM_SYMBOLIZER)" llvm_symbolizer_bindir/llvm-symbolizer + fi + export PATH="$(./evergreen/abspath.sh llvm_symbolizer_bindir)":$PATH export ASAN_SYMBOLIZER_PATH="$(./evergreen/abspath.sh $LLVM_SYMBOLIZER)" export TSAN_OPTIONS="external_symbolizer_path=$(./evergreen/abspath.sh $LLVM_SYMBOLIZER)" fi + if [[ -n "${enable_llvm_coverage}" ]]; then if [[ -z "${test_executable_name}" ]]; then echo "Missing executable name" @@ -375,16 +385,12 @@ functions: fi cd build - if ! "$CTEST" -C ${cmake_build_type|Debug} $TEST_FLAGS; then - BAAS_PID=$(pgrep baas_server) - if [[ -n "$BAAS_PID" ]]; then - echo "Dumping baas to log file" - kill -3 $BAAS_PID - sleep 15 - fi - exit 1 - fi + "$CTEST" -C ${cmake_build_type|Debug} $TEST_FLAGS + if [[ -f ../test_${task_id}.log ]]; then + # If the test succeeded, then we don't need to save any trace level logs. + rm ../test_${task_id}.log + fi "upload test results": - command: attach.results params: @@ -477,6 +483,17 @@ functions: content_type: text/plain display_name: baas proxy logs optional: true + - command: s3.put + params: + aws_key: '${artifacts_aws_access_key}' + aws_secret: '${artifacts_aws_secret_key}' + local_file: 'realm-core/test_${task_id}.log' + remote_file: 'realm-core-stable/${branch_name}/${task_id}/${execution}/test.log' + bucket: mciuploads + permissions: public-read + content_type: text/plain + display_name: trace level test logs + optional: true "upload fuzzer results": - command: shell.exec @@ -1987,6 +2004,21 @@ buildvariants: tasks: - name: compile_test +- name: ubuntu-trace-logging + display_name: "Ubuntu (Trace Logging Enabled)" + run_on: ubuntu2204-arm64-large + allowed_requesters: [ "patch", "ad_hoc" ] + expansions: + fetch_missing_dependencies: On + cmake_build_type: Debug + c_compiler: "/opt/clang+llvm/bin/clang" + cxx_compiler: "/opt/clang+llvm/bin/clang++" + test_logging_level: all + enable_ubsan: On + verbose_test_output: true + tasks: + - name: compile_test + - name: ubuntu-no-app-services display_name: "Ubuntu (AppServices Disabled)" run_on: ubuntu2204-arm64-large diff --git a/src/realm/parser/driver.cpp b/src/realm/parser/driver.cpp index 0dae67ffa4a..e91f762df7c 100644 --- a/src/realm/parser/driver.cpp +++ b/src/realm/parser/driver.cpp @@ -644,7 +644,7 @@ Query BetweenNode::visit(ParserDriver* drv) auto tmp = prop->visit(drv); const ObjPropertyBase* obj_prop = dynamic_cast(tmp.get()); - if (obj_prop) { + if (obj_prop && !obj_prop->links_exist()) { if (tmp->get_type() == type_Int) { auto min_val = min->visit(drv, type_Int); auto max_val = max->visit(drv, type_Int); diff --git a/src/realm/query_conditions.hpp b/src/realm/query_conditions.hpp index d2298d5d15a..2522b09b00e 100644 --- a/src/realm/query_conditions.hpp +++ b/src/realm/query_conditions.hpp @@ -31,26 +31,8 @@ namespace realm { -// Quick hack to make "Queries with Integer null columns" able to compile in Visual Studio 2015 which doesn't full -// support sfinae -// (real cause hasn't been investigated yet, cannot exclude that we don't obey c++11 standard) -struct HackClass { - template - bool can_match(A, B, C) - { - REALM_ASSERT(false); - return false; - } - template - bool will_match(A, B, C) - { - REALM_ASSERT(false); - return false; - } -}; - // Does v2 contain v1? -struct Contains : public HackClass { +struct Contains { bool operator()(StringData v1, const char*, const char*, StringData v2, bool = false, bool = false) const { return v2.contains(v1); @@ -108,7 +90,7 @@ struct Contains : public HackClass { }; // Does v2 contain something like v1 (wildcard matching)? -struct Like : public HackClass { +struct Like { bool operator()(StringData v1, const char*, const char*, StringData v2, bool = false, bool = false) const { return v2.like(v1); @@ -172,7 +154,7 @@ struct Like : public HackClass { }; // Does v2 begin with v1? -struct BeginsWith : public HackClass { +struct BeginsWith { bool operator()(StringData v1, const char*, const char*, StringData v2, bool = false, bool = false) const { return v2.begins_with(v1); @@ -223,7 +205,7 @@ struct BeginsWith : public HackClass { }; // Does v2 end with v1? -struct EndsWith : public HackClass { +struct EndsWith { bool operator()(StringData v1, const char*, const char*, StringData v2, bool = false, bool = false) const { return v2.ends_with(v1); @@ -363,7 +345,7 @@ struct NotEqual { }; // Does v2 contain v1? -struct ContainsIns : public HackClass { +struct ContainsIns { bool operator()(StringData v1, const char* v1_upper, const char* v1_lower, StringData v2, bool = false, bool = false) const { @@ -449,7 +431,7 @@ struct ContainsIns : public HackClass { }; // Does v2 contain something like v1 (wildcard matching)? -struct LikeIns : public HackClass { +struct LikeIns { bool operator()(StringData v1, const char* v1_upper, const char* v1_lower, StringData v2, bool = false, bool = false) const { @@ -534,7 +516,7 @@ struct LikeIns : public HackClass { }; // Does v2 begin with v1? -struct BeginsWithIns : public HackClass { +struct BeginsWithIns { bool operator()(StringData v1, const char* v1_upper, const char* v1_lower, StringData v2, bool = false, bool = false) const { @@ -600,7 +582,7 @@ struct BeginsWithIns : public HackClass { }; // Does v2 end with v1? -struct EndsWithIns : public HackClass { +struct EndsWithIns { bool operator()(StringData v1, const char* v1_upper, const char* v1_lower, StringData v2, bool = false, bool = false) const { @@ -666,7 +648,7 @@ struct EndsWithIns : public HackClass { static const int condition = -1; }; -struct EqualIns : public HackClass { +struct EqualIns { bool operator()(StringData v1, const char* v1_upper, const char* v1_lower, StringData v2, bool = false, bool = false) const { @@ -738,7 +720,7 @@ struct EqualIns : public HackClass { static const int condition = -1; }; -struct NotEqualIns : public HackClass { +struct NotEqualIns { bool operator()(StringData v1, const char* v1_upper, const char* v1_lower, StringData v2, bool = false, bool = false) const { @@ -944,7 +926,7 @@ struct Less { } }; -struct LessEqual : public HackClass { +struct LessEqual { static const int avx = 0x12; // _CMP_LE_OQ template bool operator()(const T& v1, const T& v2, bool v1null = false, bool v2null = false) const @@ -984,7 +966,7 @@ struct LessEqual : public HackClass { static const int condition = -1; }; -struct GreaterEqual : public HackClass { +struct GreaterEqual { static const int avx = 0x1D; // _CMP_GE_OQ template bool operator()(const T& v1, const T& v2, bool v1null = false, bool v2null = false) const diff --git a/src/realm/replication.cpp b/src/realm/replication.cpp index 350eb61fd05..26826fb4990 100644 --- a/src/realm/replication.cpp +++ b/src/realm/replication.cpp @@ -140,19 +140,20 @@ void Replication::erase_column(const Table* t, ColKey col_key) m_encoder.erase_column(col_key); // Throws } -void Replication::track_new_object(ObjKey key) +void Replication::track_new_object(const Table* table, ObjKey key) { - m_selected_obj = key; - m_selected_collection = CollectionId(); - m_newly_created_object = true; + if (table == m_selected_table) { + m_selected_obj = key; + m_selected_obj_is_newly_created = true; + } - auto table_index = m_selected_table->get_index_in_group(); + auto table_index = table->get_index_in_group(); if (table_index >= m_most_recently_created_object.size()) { if (table_index >= m_most_recently_created_object.capacity()) m_most_recently_created_object.reserve(table_index * 2); m_most_recently_created_object.resize(table_index + 1); } - m_most_recently_created_object[table_index] = m_selected_obj; + m_most_recently_created_object[table_index] = key; } void Replication::create_object(const Table* t, GlobalKey id) @@ -162,7 +163,7 @@ void Replication::create_object(const Table* t, GlobalKey id) } select_table(t); // Throws m_encoder.create_object(id.get_local_key(0)); // Throws - track_new_object(id.get_local_key(0)); // Throws + track_new_object(t, id.get_local_key(0)); // Throws } void Replication::create_object_with_primary_key(const Table* t, ObjKey key, Mixed pk) @@ -173,13 +174,12 @@ void Replication::create_object_with_primary_key(const Table* t, ObjKey key, Mix } select_table(t); // Throws m_encoder.create_object(key); // Throws - track_new_object(key); + track_new_object(t, key); } void Replication::create_linked_object(const Table* t, ObjKey key) { - select_table(t); // Throws - track_new_object(key); // Throws + track_new_object(t, key); // Throws // Does not need to encode anything as embedded tables can't be observed } @@ -207,30 +207,37 @@ void Replication::do_select_table(const Table* table) m_selected_table = table; m_selected_collection = CollectionId(); m_selected_obj = ObjKey(); + m_selected_obj_is_newly_created = false; } -void Replication::do_select_obj(ObjKey key) +bool Replication::check_for_newly_created_object(ObjKey key, const Table* table) { - m_selected_obj = key; - m_selected_collection = CollectionId(); - - auto table_index = m_selected_table->get_index_in_group(); + auto table_index = table->get_index_in_group(); if (table_index < m_most_recently_created_object.size()) { - m_newly_created_object = m_most_recently_created_object[table_index] == key; + return m_most_recently_created_object[table_index] == key; } - else { - m_newly_created_object = false; + return false; +} + +bool Replication::do_select_obj(ObjKey key, const Table* table) +{ + bool newly_created = check_for_newly_created_object(key, table); + if (!newly_created) { + select_table(table); + m_selected_obj = key; + m_selected_obj_is_newly_created = false; + m_selected_collection = CollectionId(); } if (auto logger = would_log(LogLevel::debug)) { - auto class_name = m_selected_table->get_class_name(); - if (m_selected_table->get_primary_key_column()) { - auto pk = m_selected_table->get_primary_key(key); + auto class_name = table->get_class_name(); + if (table->get_primary_key_column()) { + auto pk = table->get_primary_key(key); logger->log(LogCategory::object, LogLevel::debug, "Mutating object '%1' with primary key %2", class_name, pk); } - else if (m_selected_table->is_embedded()) { - auto obj = m_selected_table->get_object(key); + else if (table->is_embedded()) { + auto obj = table->get_object(key); logger->log(LogCategory::object, LogLevel::debug, "Mutating object '%1' with path '%2'", class_name, obj.get_id()); } @@ -238,26 +245,21 @@ void Replication::do_select_obj(ObjKey key) logger->log(LogCategory::object, LogLevel::debug, "Mutating anonymous object '%1'[%2]", class_name, key); } } + return newly_created; } void Replication::do_select_collection(const CollectionBase& coll) { - select_table(coll.get_table().unchecked_ptr()); - ColKey col_key = coll.get_col_key(); - ObjKey key = coll.get_owner_key(); - auto path = coll.get_stable_path(); - - if (select_obj(key)) { - m_encoder.select_collection(col_key, key, path); // Throws + if (select_obj(coll.get_owner_key(), coll.get_table().unchecked_ptr())) { + m_encoder.select_collection(coll.get_col_key(), coll.get_owner_key(), coll.get_stable_path()); // Throws + m_selected_collection = CollectionId(coll); } - m_selected_collection = CollectionId(coll.get_table()->get_key(), key, std::move(path)); } void Replication::do_set(const Table* t, ColKey col_key, ObjKey key, _impl::Instruction variant) { if (variant != _impl::Instruction::instr_SetDefault) { - select_table(t); // Throws - if (select_obj(key)) { + if (select_obj(key, t)) { // Throws m_encoder.modify_object(col_key, key); // Throws } } @@ -294,8 +296,7 @@ void Replication::set(const Table* t, ColKey col_key, ObjKey key, Mixed value, _ void Replication::nullify_link(const Table* t, ColKey col_key, ObjKey key) { - select_table(t); // Throws - if (select_obj(key)) { + if (select_obj(key, t)) { // Throws m_encoder.modify_object(col_key, key); // Throws } if (auto logger = would_log(LogLevel::trace)) { @@ -311,10 +312,10 @@ void Replication::add_int(const Table* t, ColKey col_key, ObjKey key, int_fast64 } } -Path Replication::get_prop_name(Path&& path) const +Path Replication::get_prop_name(ConstTableRef table, Path&& path) const { auto col_key = path[0].get_col_key(); - auto prop_name = m_selected_table->get_column_name(col_key); + auto prop_name = table->get_column_name(col_key); path[0] = PathElement(prop_name); return std::move(path); } @@ -328,14 +329,15 @@ void Replication::log_collection_operation(const char* operation, const Collecti auto path = collection.get_short_path(); auto col_key = path[0].get_col_key(); - auto prop_name = m_selected_table->get_column_name(col_key); + ConstTableRef table = collection.get_table(); + auto prop_name = table->get_column_name(col_key); path[0] = PathElement(prop_name); std::string position; if (!index.is_null()) { position = util::format(" at position %1", index); } if (Table::is_link_type(col_key.get_type()) && value.is_type(type_Link)) { - auto target_table = m_selected_table->get_opposite_table(col_key); + auto target_table = table->get_opposite_table(col_key); if (target_table->is_embedded()) { logger->log(LogCategory::object, LogLevel::trace, " %1 embedded object '%2' in %3%4 ", operation, target_table->get_class_name(), path, position); @@ -381,7 +383,7 @@ void Replication::list_erase(const CollectionBase& list, size_t link_ndx) } if (auto logger = would_log(LogLevel::trace)) { logger->log(LogCategory::object, LogLevel::trace, " Erase '%1' at position %2", - get_prop_name(list.get_short_path()), link_ndx); + get_prop_name(list.get_table(), list.get_short_path()), link_ndx); } } @@ -392,7 +394,7 @@ void Replication::list_move(const CollectionBase& list, size_t from_link_ndx, si } if (auto logger = would_log(LogLevel::trace)) { logger->log(LogCategory::object, LogLevel::trace, " Move %1 to %2 in '%3'", from_link_ndx, to_link_ndx, - get_prop_name(list.get_short_path())); + get_prop_name(list.get_table(), list.get_short_path())); } } @@ -417,7 +419,8 @@ void Replication::list_clear(const CollectionBase& list) m_encoder.collection_clear(list.size()); // Throws } if (auto logger = would_log(LogLevel::trace)) { - logger->log(LogCategory::object, LogLevel::trace, " Clear '%1'", get_prop_name(list.get_short_path())); + logger->log(LogCategory::object, LogLevel::trace, " Clear '%1'", + get_prop_name(list.get_table(), list.get_short_path())); } } @@ -428,7 +431,7 @@ void Replication::link_list_nullify(const Lst& list, size_t link_ndx) } if (auto logger = would_log(LogLevel::trace)) { logger->log(LogCategory::object, LogLevel::trace, " Nullify '%1' position %2", - m_selected_table->get_column_name(list.get_col_key()), link_ndx); + list.get_table()->get_column_name(list.get_col_key()), link_ndx); } } @@ -455,7 +458,7 @@ void Replication::dictionary_erase(const CollectionBase& dict, size_t ndx, Mixed } if (auto logger = would_log(LogLevel::trace)) { logger->log(LogCategory::object, LogLevel::trace, " Erase %1 from '%2'", key, - get_prop_name(dict.get_short_path())); + get_prop_name(dict.get_table(), dict.get_short_path())); } } @@ -465,6 +468,7 @@ void Replication::dictionary_clear(const CollectionBase& dict) m_encoder.collection_clear(dict.size()); } if (auto logger = would_log(LogLevel::trace)) { - logger->log(LogCategory::object, LogLevel::trace, " Clear '%1'", get_prop_name(dict.get_short_path())); + logger->log(LogCategory::object, LogLevel::trace, " Clear '%1'", + get_prop_name(dict.get_table(), dict.get_short_path())); } } diff --git a/src/realm/replication.hpp b/src/realm/replication.hpp index 5bae6e7c6ee..41d6ff28dd8 100644 --- a/src/realm/replication.hpp +++ b/src/realm/replication.hpp @@ -392,33 +392,34 @@ class Replication { util::Logger* m_logger = nullptr; const Table* m_selected_table = nullptr; ObjKey m_selected_obj; + bool m_selected_obj_is_newly_created = false; CollectionId m_selected_collection; // The ObjKey of the most recently created object for each table (indexed // by the Table's index in the group). Most insertion patterns will only // ever update the most recently created object, so this is almost as // effective as tracking all newly created objects but much cheaper. std::vector m_most_recently_created_object; - // When true, the currently selected object was created in this transaction - // and we don't need to emit instructions for mutations on it - bool m_newly_created_object = false; void unselect_all() noexcept; void select_table(const Table*); // unselects link list and obj - bool select_obj(ObjKey key); - bool select_collection(const CollectionBase&); + [[nodiscard]] bool select_obj(ObjKey key, const Table*); + [[nodiscard]] bool select_collection(const CollectionBase&); void do_select_table(const Table*); - void do_select_obj(ObjKey key); - void do_select_collection(const CollectionBase&); + [[nodiscard]] bool do_select_obj(ObjKey key, const Table*); + void do_select_collection(const CollectionBase& coll); + // When true, the currently selected object was created in this transaction + // and we don't need to emit instructions for mutations on it + bool check_for_newly_created_object(ObjKey key, const Table* table); // Mark this ObjKey as being a newly created object that should not emit // mutation instructions - void track_new_object(ObjKey); + void track_new_object(const Table*, ObjKey); void do_set(const Table*, ColKey col_key, ObjKey key, _impl::Instruction variant = _impl::instr_Set); void log_collection_operation(const char* operation, const CollectionBase& collection, Mixed value, Mixed index) const; - Path get_prop_name(Path&&) const; + Path get_prop_name(ConstTableRef, Path&&) const; size_t transact_log_size(); }; @@ -463,7 +464,7 @@ inline void Replication::unselect_all() noexcept { m_selected_table = nullptr; m_selected_collection = CollectionId(); - m_newly_created_object = false; + m_selected_obj_is_newly_created = false; } inline void Replication::select_table(const Table* table) @@ -474,18 +475,20 @@ inline void Replication::select_table(const Table* table) inline bool Replication::select_collection(const CollectionBase& coll) { + bool newly_created_object = + check_for_newly_created_object(coll.get_owner_key(), coll.get_table().unchecked_ptr()); if (CollectionId(coll) != m_selected_collection) { do_select_collection(coll); // Throws } - return !m_newly_created_object; + return !newly_created_object; } -inline bool Replication::select_obj(ObjKey key) +inline bool Replication::select_obj(ObjKey key, const Table* table) { - if (key != m_selected_obj) { - do_select_obj(key); + if (key != m_selected_obj || table != m_selected_table) { + return !do_select_obj(key, table); } - return !m_newly_created_object; + return !m_selected_obj_is_newly_created; } inline void Replication::rename_class(TableKey table_key, StringData) diff --git a/src/realm/sync/CMakeLists.txt b/src/realm/sync/CMakeLists.txt index 19f49857bea..8896f15b50f 100644 --- a/src/realm/sync/CMakeLists.txt +++ b/src/realm/sync/CMakeLists.txt @@ -76,6 +76,7 @@ set(NOINST_HEADERS noinst/integer_codec.hpp noinst/migration_store.hpp noinst/pending_bootstrap_store.hpp + noinst/pending_reset_store.hpp noinst/protocol_codec.hpp noinst/root_certs.hpp noinst/sync_metadata_schema.hpp diff --git a/src/realm/sync/client.cpp b/src/realm/sync/client.cpp index 7e6f7afb45f..214a938969c 100644 --- a/src/realm/sync/client.cpp +++ b/src/realm/sync/client.cpp @@ -1655,7 +1655,7 @@ void SessionWrapper::handle_pending_client_reset_acknowledgement() { REALM_ASSERT(!m_finalized); - auto has_pending_reset = PendingResetStore::has_pending_reset(m_db->start_frozen()); + auto has_pending_reset = PendingResetStore::has_pending_reset(*m_db->start_frozen()); if (!has_pending_reset) { return; // nothing to do } @@ -1678,7 +1678,7 @@ void SessionWrapper::handle_pending_client_reset_acknowledgement() logger.debug(util::LogCategory::reset, "Server has acknowledged %1", pending_reset); auto tr = self->m_db->start_write(); - auto cur_pending_reset = PendingResetStore::has_pending_reset(tr); + auto cur_pending_reset = PendingResetStore::has_pending_reset(*tr); if (!cur_pending_reset) { logger.debug(util::LogCategory::reset, "Client reset cycle detection tracker already removed."); return; @@ -1689,7 +1689,7 @@ void SessionWrapper::handle_pending_client_reset_acknowledgement() else { logger.info(util::LogCategory::reset, "Found new %1", cur_pending_reset); } - PendingResetStore::clear_pending_reset(tr); + PendingResetStore::clear_pending_reset(*tr); tr->commit(); }); } diff --git a/src/realm/sync/noinst/client_history_impl.cpp b/src/realm/sync/noinst/client_history_impl.cpp index 022e381fa04..fbd51d88904 100644 --- a/src/realm/sync/noinst/client_history_impl.cpp +++ b/src/realm/sync/noinst/client_history_impl.cpp @@ -714,7 +714,7 @@ void ClientHistory::set_reciprocal_transform(version_type version, BinaryData da std::size_t index = size_t(version - m_sync_history_base_version) - 1; REALM_ASSERT(index < sync_history_size()); - if (data.is_null()) { + if (data.size() == 0) { m_arrays->reciprocal_transforms.set(index, BinaryData{"", 0}); // Throws return; } @@ -1064,7 +1064,12 @@ void ClientHistory::trim_sync_history() bool ClientHistory::no_pending_local_changes(version_type version) const { ensure_updated(version); - for (size_t i = 0; i < sync_history_size(); i++) { + size_t base_version = 0; + auto upload_client_version = + version_type(m_arrays->root.get_as_ref_or_tagged(s_progress_upload_client_version_iip).get_as_int()); + if (upload_client_version > m_sync_history_base_version) + base_version = size_t(upload_client_version - m_sync_history_base_version); + for (size_t i = base_version; i < sync_history_size(); i++) { if (m_arrays->origin_file_idents.get(i) == 0) { std::size_t pos = 0; BinaryData chunk = m_arrays->changesets.get_at(i, pos); diff --git a/src/realm/sync/noinst/client_reset.cpp b/src/realm/sync/noinst/client_reset.cpp index 8c00fed2d48..6a9e875e345 100644 --- a/src/realm/sync/noinst/client_reset.cpp +++ b/src/realm/sync/noinst/client_reset.cpp @@ -410,18 +410,17 @@ void transfer_group(const Transaction& group_src, Transaction& group_dst, util:: } } -ClientResyncMode reset_precheck_guard(const TransactionRef& wt_local, ClientResyncMode mode, - PendingReset::Action action, const std::optional& error, - util::Logger& logger) +static ClientResyncMode reset_precheck_guard(const TransactionRef& wt_local, ClientResyncMode mode, + PendingReset::Action action, const Status& error, util::Logger& logger) { - if (auto previous_reset = sync::PendingResetStore::has_pending_reset(wt_local)) { + if (auto previous_reset = sync::PendingResetStore::has_pending_reset(*wt_local)) { logger.info(util::LogCategory::reset, "Found a previous %1", *previous_reset); if (action != previous_reset->action) { // IF a different client reset is being performed, cler the pending client reset and start over. logger.info(util::LogCategory::reset, "New '%1' client reset of type: '%2' is incompatible - clearing previous reset", action, mode); - sync::PendingResetStore::clear_pending_reset(wt_local); + sync::PendingResetStore::clear_pending_reset(*wt_local); } else { switch (previous_reset->mode) { @@ -444,10 +443,10 @@ ClientResyncMode reset_precheck_guard(const TransactionRef& wt_local, ClientResy util::LogCategory::reset, "A previous '%1' mode reset from %2 downgrades this mode ('%3') to DiscardLocal", previous_reset->mode, previous_reset->time, mode); - sync::PendingResetStore::clear_pending_reset(wt_local); + sync::PendingResetStore::clear_pending_reset(*wt_local); break; case ClientResyncMode::DiscardLocal: - sync::PendingResetStore::clear_pending_reset(wt_local); + sync::PendingResetStore::clear_pending_reset(*wt_local); // previous mode Recover and this mode is Discard, this is not a cycle yet break; case ClientResyncMode::Manual: @@ -473,7 +472,7 @@ ClientResyncMode reset_precheck_guard(const TransactionRef& wt_local, ClientResy mode = ClientResyncMode::DiscardLocal; } } - sync::PendingResetStore::track_reset(wt_local, mode, action, error); + sync::PendingResetStore::track_reset(*wt_local, mode, action, error); // Ensure we save the tracker object even if we encounter an error and roll // back the client reset later wt_local->commit_and_continue_writing(); diff --git a/src/realm/sync/noinst/client_reset.hpp b/src/realm/sync/noinst/client_reset.hpp index 58cb0f9815d..d28714bb21f 100644 --- a/src/realm/sync/noinst/client_reset.hpp +++ b/src/realm/sync/noinst/client_reset.hpp @@ -62,10 +62,6 @@ namespace _impl::client_reset { void transfer_group(const Transaction& tr_src, Transaction& tr_dst, util::Logger& logger, bool allow_schema_additions); -ClientResyncMode reset_precheck_guard(const TransactionRef& wt_local, ClientResyncMode mode, - sync::ProtocolErrorInfo::Action action, const std::optional& error, - util::Logger& logger); - // preform_client_reset_diff() takes the Realm performs a client reset on // the Realm in 'path_local' given the Realm 'path_fresh' as the source of truth. // If the fresh path is not provided, discard mode is assumed and all data in the local diff --git a/src/realm/sync/noinst/migration_store.cpp b/src/realm/sync/noinst/migration_store.cpp index db27e510141..6f8e3d10d99 100644 --- a/src/realm/sync/noinst/migration_store.cpp +++ b/src/realm/sync/noinst/migration_store.cpp @@ -60,7 +60,7 @@ bool MigrationStore::load_data(bool read_only) throw RuntimeError(ErrorCodes::UnsupportedFileFormatVersion, "Invalid schema version for flexible sync migration store metadata"); } - load_sync_metadata_schema(tr, &internal_tables); + load_sync_metadata_schema(*tr, &internal_tables); } else { if (read_only) { @@ -72,7 +72,7 @@ bool MigrationStore::load_data(bool read_only) SyncMetadataSchemaVersions schema_versions(tr); // Create the metadata schema and set the version (in the same commit) schema_versions.set_version_for(tr, internal_schema_groups::c_flx_migration_store, c_schema_version); - create_sync_metadata_schema(tr, &internal_tables); + create_sync_metadata_schema(*tr, &internal_tables); tr->commit_and_continue_as_read(); } REALM_ASSERT(m_migration_table); diff --git a/src/realm/sync/noinst/pending_bootstrap_store.cpp b/src/realm/sync/noinst/pending_bootstrap_store.cpp index 10fcfed55d2..187de9b58fc 100644 --- a/src/realm/sync/noinst/pending_bootstrap_store.cpp +++ b/src/realm/sync/noinst/pending_bootstrap_store.cpp @@ -109,7 +109,7 @@ PendingBootstrapStore::PendingBootstrapStore(DBRef db, util::Logger& logger, throw RuntimeError(ErrorCodes::SchemaVersionMismatch, "Invalid schema version for FLX sync pending bootstrap table group"); } - load_sync_metadata_schema(tr, &internal_tables); + load_sync_metadata_schema(*tr, &internal_tables); } else { tr->promote_to_write(); @@ -117,7 +117,7 @@ PendingBootstrapStore::PendingBootstrapStore(DBRef db, util::Logger& logger, SyncMetadataSchemaVersions schema_versions(tr); // Create the metadata schema and set the version (in the same commit) schema_versions.set_version_for(tr, internal_schema_groups::c_pending_bootstraps, c_schema_version); - create_sync_metadata_schema(tr, &internal_tables); + create_sync_metadata_schema(*tr, &internal_tables); tr->commit_and_continue_as_read(); } REALM_ASSERT(m_table); diff --git a/src/realm/sync/noinst/pending_reset_store.cpp b/src/realm/sync/noinst/pending_reset_store.cpp index 266dbb36d39..7afa7083713 100644 --- a/src/realm/sync/noinst/pending_reset_store.cpp +++ b/src/realm/sync/noinst/pending_reset_store.cpp @@ -26,7 +26,6 @@ using namespace realm; using namespace _impl; -using namespace sync; namespace realm::sync { @@ -41,9 +40,7 @@ std::ostream& operator<<(std::ostream& os, const sync::PendingReset& pr) else { os << "pending client reset of type: '" << pr.mode << "' at: " << pr.time; } - if (pr.error) { - os << " for error: " << *pr.error; - } + os << " for error: " << pr.error; return os; } @@ -59,196 +56,112 @@ bool operator==(const sync::PendingReset& lhs, const PendingReset::Action& actio // A table without a "class_" prefix will not generate sync instructions. constexpr static std::string_view s_meta_reset_table_name("client_reset_metadata"); -constexpr static std::string_view s_pk_col_name("id"); -constexpr static std::string_view s_timestamp_col_name("reset_time"); -constexpr static std::string_view s_reset_recovery_mode_col_name("reset_mode"); -constexpr static std::string_view s_reset_action_col_name("reset_action"); -constexpr static std::string_view s_reset_error_code_col_name("reset_error_code"); -constexpr static std::string_view s_reset_error_msg_col_name("reset_error_msg"); -constexpr int64_t s_pending_reset_version = 2; - -void PendingResetStore::clear_pending_reset(const TransactionRef& wr_tr) +constexpr static std::string_view s_version_col_name("core_version"); +constexpr static std::string_view s_timestamp_col_name("time"); +constexpr static std::string_view s_reset_recovery_mode_col_name("mode"); +constexpr static std::string_view s_reset_action_col_name("action"); +constexpr static std::string_view s_reset_error_code_col_name("error_code"); +constexpr static std::string_view s_reset_error_msg_col_name("error_msg"); + +void PendingResetStore::clear_pending_reset(Group& group) { - // Write transaction required - REALM_ASSERT(wr_tr->get_transact_stage() == DB::TransactStage::transact_Writing); - auto reset_store = PendingResetStore::load_or_create_schema(wr_tr); - REALM_ASSERT(reset_store.m_pending_reset_table); - // Ensure the pending reset table is empty - if (auto table = wr_tr->get_table(reset_store.m_pending_reset_table); table && !table->is_empty()) { + if (auto table = group.get_table(s_meta_reset_table_name); table && !table->is_empty()) { table->clear(); } - // Don't commit at the end - allow the caller to do it when they are ready } -std::optional PendingResetStore::has_pending_reset(const TransactionRef& rd_tr) +std::optional PendingResetStore::has_pending_reset(const Group& group) { - // Make sure the schema has been loaded and try to read legacy data if it's not found - auto reset_store = PendingResetStore::load_schema(rd_tr); + auto reset_store = PendingResetStore::load_schema(group); if (!reset_store) { - return PendingResetStore::read_legacy_pending_reset(rd_tr); + // Table hasn't been created yet (or has the wrong schema) + return std::nullopt; } - // Otherwise, read the pending reset entry using the schema metadata REALM_ASSERT(reset_store->m_pending_reset_table); - auto table = rd_tr->get_table(reset_store->m_pending_reset_table); + auto table = group.get_table(reset_store->m_pending_reset_table); - if (!table || table->size() == 0) { + if (!table || table->size() != 1) { return std::nullopt; } - if (table->size() > 1) { - // this may happen if a future version of this code changes the format and expectations around reset metadata. - throw ClientResetFailed( - util::format("Previous client resets detected (%1) but only one is expected.", table->size())); - } auto reset_entry = *table->begin(); + if (reset_entry.get(reset_store->m_version) != REALM_VERSION_STRING) { + // Previous pending reset was written by a different version, so ignore it + return std::nullopt; + } + PendingReset pending; pending.time = reset_entry.get(reset_store->m_timestamp); pending.mode = to_resync_mode(reset_entry.get(reset_store->m_recovery_mode)); pending.action = to_reset_action(reset_entry.get(reset_store->m_action)); auto error_code = reset_entry.get(reset_store->m_error_code); - if (error_code > 0) { + if (error_code != 0) { pending.error = Status(static_cast(error_code), reset_entry.get(reset_store->m_error_message)); } return pending; } -void PendingResetStore::track_reset(const TransactionRef& wr_tr, ClientResyncMode mode, PendingReset::Action action, - const std::optional& error) +void PendingResetStore::track_reset(Group& group, ClientResyncMode mode, PendingReset::Action action, Status error) { REALM_ASSERT(mode != ClientResyncMode::Manual); - // Write transaction required - REALM_ASSERT(wr_tr->get_transact_stage() == DB::TransactStage::transact_Writing); - if (auto table = wr_tr->get_table(s_meta_reset_table_name); table && table->size() > 0) { - // this may happen if a future version of this code changes the format and expectations around reset - // metadata. - throw ClientResetFailed( - util::format("Previous client resets detected (%1) but only one is expected.", table->size())); - } - auto reset_store = PendingResetStore::load_or_create_schema(wr_tr); + auto reset_store = PendingResetStore::load_or_create_schema(group); REALM_ASSERT(reset_store.m_pending_reset_table); - auto table = wr_tr->get_table(reset_store.m_pending_reset_table); + auto table = group.get_table(reset_store.m_pending_reset_table); REALM_ASSERT(table); - // Create the new object - auto obj = table->create_object_with_primary_key( - ObjectId::gen(), { - {reset_store.m_timestamp, Timestamp(std::chrono::system_clock::now())}, - {reset_store.m_recovery_mode, from_resync_mode(mode)}, - {reset_store.m_action, from_reset_action(action)}, - }); - // Add the error, if provided - if (error) { - obj.set(reset_store.m_error_code, static_cast(error->code())); - obj.set(reset_store.m_error_message, error->reason()); - } - // Don't commit at the end - allow the caller to do it when they are ready + table->clear(); + table->create_object(null_key, { + {reset_store.m_version, Mixed(REALM_VERSION_STRING)}, + {reset_store.m_timestamp, Timestamp(std::chrono::system_clock::now())}, + {reset_store.m_recovery_mode, from_resync_mode(mode)}, + {reset_store.m_action, from_reset_action(action)}, + {reset_store.m_error_code, static_cast(error.code())}, + {reset_store.m_error_message, error.reason()}, + }); } -PendingResetStore::PendingResetStore(const TransactionRef& rd_tr) +PendingResetStore::PendingResetStore(const Group& g) : m_internal_tables{ {&m_pending_reset_table, s_meta_reset_table_name, - {&m_id, s_pk_col_name, type_ObjectId}, { + {&m_version, s_version_col_name, type_String}, {&m_timestamp, s_timestamp_col_name, type_Timestamp}, {&m_recovery_mode, s_reset_recovery_mode_col_name, type_Int}, {&m_action, s_reset_action_col_name, type_Int}, - {&m_error_code, s_reset_error_code_col_name, type_Int, true}, - {&m_error_message, s_reset_error_msg_col_name, type_String, true}, + {&m_error_code, s_reset_error_code_col_name, type_Int}, + {&m_error_message, s_reset_error_msg_col_name, type_String}, }}, } { - // Works with read, write, and frozen transactions - SyncMetadataSchemaVersionsReader schema_versions(rd_tr); - auto schema_version = schema_versions.get_version_for(rd_tr, internal_schema_groups::c_pending_reset_store); - - // Load the metadata schema info if a schema version was found - if (schema_version) { - if (*schema_version != s_pending_reset_version) { - // Unsupported schema version - throw RuntimeError(ErrorCodes::UnsupportedFileFormatVersion, - "Found invalid schema version for existing client reset cycle tracking metadata"); - } - load_sync_metadata_schema(rd_tr, &m_internal_tables); - if (m_pending_reset_table) { - // If the schema info was read, then store the schema version - m_schema_version = schema_version; - } + if (!try_load_sync_metadata_schema(g, &m_internal_tables).is_ok()) { + m_pending_reset_table = {}; } } -std::optional PendingResetStore::load_schema(const TransactionRef& rd_tr) +std::optional PendingResetStore::load_schema(const Group& group) { - PendingResetStore reset_store(rd_tr); - if (reset_store.m_schema_version) { + if (PendingResetStore reset_store(group); reset_store.m_pending_reset_table) { return reset_store; } return std::nullopt; } -PendingResetStore PendingResetStore::load_or_create_schema(const TransactionRef& wr_tr) +PendingResetStore PendingResetStore::load_or_create_schema(Group& group) { - PendingResetStore reset_store(wr_tr); - if (reset_store.m_schema_version) { - // If the schema metadata was found, return the initialized class - return reset_store; - } - // Otherwise, set it up from scratch - Make sure the transaction is set for writing - if (wr_tr->get_transact_stage() == DB::TransactStage::transact_Reading) { - wr_tr->promote_to_write(); - } - // Ensure writing - all other transaction stages are not allowed - REALM_ASSERT_EX(wr_tr->get_transact_stage() == DB::TransactStage::transact_Writing, wr_tr->get_transact_stage()); + PendingResetStore reset_store(group); + if (!reset_store.m_pending_reset_table) { + // If the table exists but has the wrong schema just drop it + if (group.has_table(s_meta_reset_table_name)) { + group.remove_table(s_meta_reset_table_name); + } - // Drop the old table and any stale pending resets - if (wr_tr->has_table(s_meta_reset_table_name)) { - wr_tr->remove_table(s_meta_reset_table_name); + // Create the table with the correct schema + create_sync_metadata_schema(group, &reset_store.m_internal_tables); } - - // Ensure the schema versions table is initialized (may add its own commit) - SyncMetadataSchemaVersions schema_versions(wr_tr); - // Create the metadata schema and set the version (in the same commit) - schema_versions.set_version_for(wr_tr, internal_schema_groups::c_pending_reset_store, s_pending_reset_version); - create_sync_metadata_schema(wr_tr, &reset_store.m_internal_tables); - REALM_ASSERT(reset_store.m_pending_reset_table); - reset_store.m_schema_version = s_pending_reset_version; - - // Don't commit yet return reset_store; } -std::optional PendingResetStore::read_legacy_pending_reset(const TransactionRef& rd_tr) -{ - // Try to read the pending reset info from v1 of the schema - constexpr static std::string_view s_v1_version_column_name("version"); - constexpr static std::string_view s_v1_timestamp_col_name("event_time"); - constexpr static std::string_view s_v1_reset_mode_col_name("type_of_reset"); - - // Check for pending reset v1 - does not use schema version - TableRef table = rd_tr->get_table(s_meta_reset_table_name); - if (table && table->size() > 0) { - ColKey version_col = table->get_column_key(s_v1_version_column_name); - ColKey timestamp_col = table->get_column_key(s_v1_timestamp_col_name); - ColKey mode_col = table->get_column_key(s_v1_reset_mode_col_name); - Obj reset_entry = *table->begin(); - - if (version_col && reset_entry.get(version_col) == 1LL) { - REALM_ASSERT(timestamp_col); - REALM_ASSERT(mode_col); - PendingReset pending; - pending.time = reset_entry.get(timestamp_col); - pending.mode = to_resync_mode(reset_entry.get(mode_col)); - // Create a fake action depending on the resync mode - pending.action = pending.mode == ClientResyncMode::DiscardLocal - ? sync::ProtocolErrorInfo::Action::ClientResetNoRecovery - : sync::ProtocolErrorInfo::Action::ClientReset; - return pending; - } - } - // Add checking for future schema versions here - return std::nullopt; -} - int64_t PendingResetStore::from_reset_action(PendingReset::Action action) { switch (action) { diff --git a/src/realm/sync/noinst/pending_reset_store.hpp b/src/realm/sync/noinst/pending_reset_store.hpp index a6e0878d8b7..b8e06ab84b9 100644 --- a/src/realm/sync/noinst/pending_reset_store.hpp +++ b/src/realm/sync/noinst/pending_reset_store.hpp @@ -37,7 +37,7 @@ struct PendingReset { Timestamp time; ClientResyncMode mode; Action action = Action::NoAction; - std::optional error; + Status error = Status::OK(); }; std::ostream& operator<<(std::ostream& os, const sync::PendingReset& pr); @@ -49,12 +49,11 @@ class PendingResetStore { // Store the pending reset tracking information - it is an error if the tracking info already // exists in the store // Requires a writable transaction and changes must be committed manually - static void track_reset(const TransactionRef& wr_tr, ClientResyncMode mode, PendingReset::Action action, - const std::optional& error = std::nullopt); + static void track_reset(Group& group, ClientResyncMode mode, PendingReset::Action action, Status error); // Clear the pending reset tracking information, if it exists // Requires a writable transaction and changes must be committed manually - static void clear_pending_reset(const TransactionRef& wr_tr); - static std::optional has_pending_reset(const TransactionRef& rd_tr); + static void clear_pending_reset(Group& group); + static std::optional has_pending_reset(const Group& group); static int64_t from_reset_action(PendingReset::Action action); static PendingReset::Action to_reset_action(int64_t action); @@ -63,27 +62,22 @@ class PendingResetStore { private: // The instantiated class is only used internally - PendingResetStore(const TransactionRef& rd_tr); + PendingResetStore(const Group& group); std::vector m_internal_tables; TableKey m_pending_reset_table; - ColKey m_id; ColKey m_version; ColKey m_timestamp; ColKey m_recovery_mode; ColKey m_action; ColKey m_error_code; ColKey m_error_message; - std::optional m_schema_version = std::nullopt; // Returns true if the schema was loaded - static std::optional load_schema(const TransactionRef& rd_tr); + static std::optional load_schema(const Group& group); // Loads the schema or creates it if it doesn't exist // Requires a writable transaction and changes must be committed manually - static PendingResetStore load_or_create_schema(const TransactionRef& wr_tr); - - // Try to read the pending reset info from v1 of the schema - static std::optional read_legacy_pending_reset(const TransactionRef& rd_tr); + static PendingResetStore load_or_create_schema(Group& group); }; } // namespace realm::sync diff --git a/src/realm/sync/noinst/protocol_codec.cpp b/src/realm/sync/noinst/protocol_codec.cpp index d1b1330c693..a91bf1f7e78 100644 --- a/src/realm/sync/noinst/protocol_codec.cpp +++ b/src/realm/sync/noinst/protocol_codec.cpp @@ -215,13 +215,16 @@ void ServerProtocol::insert_single_changeset_download_message(OutputBuffer& out, entry.changeset.write_to(out); if (logger.would_log(util::Logger::Level::trace)) { + util::AppendBuffer changeset_buffer; + entry.changeset.copy_to(changeset_buffer); + logger.trace(util::LogCategory::changeset, "DOWNLOAD: insert single changeset (server_version=%1, " "client_version=%2, timestamp=%3, client_file_ident=%4, " "original_changeset_size=%5, changeset_size=%6, changeset='%7').", changeset_info.server_version, changeset_info.client_version, entry.origin_timestamp, entry.origin_file_ident, changeset_info.original_size, entry.changeset.size(), - _impl::clamped_hex_dump(entry.changeset.get_first_chunk())); // Throws + _impl::clamped_hex_dump(BinaryData(changeset_buffer.data(), changeset_buffer.size()))); // Throws } } diff --git a/src/realm/sync/noinst/sync_metadata_schema.cpp b/src/realm/sync/noinst/sync_metadata_schema.cpp index 4b5f2c76617..90b101041bb 100644 --- a/src/realm/sync/noinst/sync_metadata_schema.cpp +++ b/src/realm/sync/noinst/sync_metadata_schema.cpp @@ -33,26 +33,26 @@ constexpr static std::string_view c_meta_schema_schema_group_field("schema_group } // namespace -void create_sync_metadata_schema(const TransactionRef& tr, std::vector* tables) +void create_sync_metadata_schema(Group& g, std::vector* tables) { util::FlatMap found_tables; for (auto& table : *tables) { - if (tr->has_table(table.name)) { + if (g.has_table(table.name)) { throw RuntimeError( ErrorCodes::RuntimeError, util::format("table %1 already existed when creating internal tables for sync", table.name)); } TableRef table_ref; if (table.is_embedded) { - table_ref = tr->add_table(table.name, Table::Type::Embedded); + table_ref = g.add_table(table.name, Table::Type::Embedded); } else if (table.pk_info) { - table_ref = tr->add_table_with_primary_key(table.name, table.pk_info->data_type, table.pk_info->name, - table.pk_info->is_optional); + table_ref = g.add_table_with_primary_key(table.name, table.pk_info->data_type, table.pk_info->name, + table.pk_info->is_optional); *table.pk_info->key_out = table_ref->get_primary_key_column(); } else { - table_ref = tr->add_table(table.name); + table_ref = g.add_table(table.name); } found_tables.insert({table.name, table_ref}); @@ -83,34 +83,41 @@ void create_sync_metadata_schema(const TransactionRef& tr, std::vector* tables) +void load_sync_metadata_schema(const Group& g, std::vector* tables) +{ + if (auto status = try_load_sync_metadata_schema(g, tables); !status.is_ok()) { + throw Exception(std::move(status)); + } +} + +Status try_load_sync_metadata_schema(const Group& g, std::vector* tables) { for (auto& table : *tables) { - auto table_ref = tr->get_table(table.name); + auto table_ref = g.get_table(table.name); if (!table_ref) { - throw RuntimeError(ErrorCodes::RuntimeError, - util::format("could not find internal sync table %1", table.name)); + return Status(ErrorCodes::RuntimeError, + util::format("could not find internal sync table %1", table.name)); } *table.key_out = table_ref->get_key(); if (table.pk_info) { auto pk_col = table_ref->get_primary_key_column(); if (auto pk_name = table_ref->get_column_name(pk_col); pk_name != table.pk_info->name) { - throw RuntimeError( + return Status( ErrorCodes::RuntimeError, util::format( "primary key name of sync internal table %1 does not match (stored: %2, defined: %3)", table.name, pk_name, table.pk_info->name)); } if (auto pk_type = table_ref->get_column_type(pk_col); pk_type != table.pk_info->data_type) { - throw RuntimeError( + return Status( ErrorCodes::RuntimeError, util::format( "primary key type of sync internal table %1 does not match (stored: %2, defined: %3)", table.name, pk_type, table.pk_info->data_type)); } if (auto is_nullable = table_ref->is_nullable(pk_col); is_nullable != table.pk_info->is_optional) { - throw RuntimeError( + return Status( ErrorCodes::RuntimeError, util::format( "primary key nullabilty of sync internal table %1 does not match (stored: %2, defined: %3)", @@ -119,12 +126,12 @@ void load_sync_metadata_schema(const TransactionRef& tr, std::vectorkey_out = pk_col; } else if (table.is_embedded && !table_ref->is_embedded()) { - throw RuntimeError(ErrorCodes::RuntimeError, - util::format("internal sync table %1 should be embedded, but is not", table.name)); + return Status(ErrorCodes::RuntimeError, + util::format("internal sync table %1 should be embedded, but is not", table.name)); } if (table.columns.size() + size_t(table.pk_info ? 1 : 0) != table_ref->get_column_count()) { - throw RuntimeError( + return Status( ErrorCodes::RuntimeError, util::format("sync internal table %1 has a different number of columns than its schema", table.name)); } @@ -132,20 +139,19 @@ void load_sync_metadata_schema(const TransactionRef& tr, std::vectorget_column_key(col.name); if (!col_key) { - throw RuntimeError( - ErrorCodes::RuntimeError, - util::format("column %1 is missing in sync internal table %2", col.name, table.name)); + return Status(ErrorCodes::RuntimeError, + util::format("column %1 is missing in sync internal table %2", col.name, table.name)); } auto found_col_type = table_ref->get_column_type(col_key); if (found_col_type != col.data_type) { - throw RuntimeError( + return Status( ErrorCodes::RuntimeError, util::format("column %1 in sync internal table %2 is the wrong type", col.name, table.name)); } if (col.is_optional != table_ref->is_nullable(col_key)) { - throw RuntimeError( + return Status( ErrorCodes::RuntimeError, util::format("column %1 in sync internal table %2 has different nullabilty than in its schema", col.name, table.name)); @@ -153,14 +159,16 @@ void load_sync_metadata_schema(const TransactionRef& tr, std::vectorget_link_target(col_key)->get_name() != col.target_table) { - RuntimeError(ErrorCodes::RuntimeError, - util::format("column %1 in sync internal table %2 links to the wrong table %3", - col.name, table.name, table_ref->get_link_target(col_key)->get_name())); + return Status(ErrorCodes::RuntimeError, + util::format("column %1 in sync internal table %2 links to the wrong table %3", + col.name, table.name, + table_ref->get_link_target(col_key)->get_name())); } } *col.key_out = col_key; } } + return Status::OK(); } SyncMetadataSchemaVersionsReader::SyncMetadataSchemaVersionsReader(const TransactionRef& tr) @@ -181,7 +189,7 @@ SyncMetadataSchemaVersionsReader::SyncMetadataSchemaVersionsReader(const Transac if (tr->has_table(c_sync_internal_schemas_table)) { // Load m_table with the table/schema information - load_sync_metadata_schema(tr, &unified_schema_version_table_def); + load_sync_metadata_schema(*tr, &unified_schema_version_table_def); } } @@ -197,7 +205,7 @@ std::optional SyncMetadataSchemaVersionsReader::get_legacy_version(cons {&legacy_table_key, c_flx_metadata_table, {{&legacy_version_key, c_meta_schema_version_field, type_Int}}}}; // Convert the legacy table to the regular schema versions table if it exists - load_sync_metadata_schema(tr, &legacy_table_def); + load_sync_metadata_schema(*tr, &legacy_table_def); if (auto legacy_meta_table = tr->get_table(legacy_table_key); legacy_meta_table && legacy_meta_table->size() > 0) { @@ -254,14 +262,14 @@ SyncMetadataSchemaVersions::SyncMetadataSchemaVersions(const TransactionRef& tr) // table should have already been initialized or needs to be created, // but re-initialize in case it isn't (e.g. both unified and legacy tables exist in DB) if (REALM_UNLIKELY(tr->has_table(c_sync_internal_schemas_table))) { - load_sync_metadata_schema(tr, &unified_schema_version_table_def); + load_sync_metadata_schema(*tr, &unified_schema_version_table_def); } else { // Only write the versions table if it doesn't exist if (tr->get_transact_stage() != DB::transact_Writing) { tr->promote_to_write(); } - create_sync_metadata_schema(tr, &unified_schema_version_table_def); + create_sync_metadata_schema(*tr, &unified_schema_version_table_def); modified = true; } } diff --git a/src/realm/sync/noinst/sync_metadata_schema.hpp b/src/realm/sync/noinst/sync_metadata_schema.hpp index fcbaa026b99..35f7d547231 100644 --- a/src/realm/sync/noinst/sync_metadata_schema.hpp +++ b/src/realm/sync/noinst/sync_metadata_schema.hpp @@ -27,6 +27,8 @@ #include namespace realm { +class Group; +class Status; class Transaction; using TransactionRef = std::shared_ptr; } // namespace realm @@ -121,8 +123,9 @@ struct SyncMetadataTable { }; -void create_sync_metadata_schema(const TransactionRef& tr, std::vector* tables); -void load_sync_metadata_schema(const TransactionRef& tr, std::vector* tables); +void create_sync_metadata_schema(Group& g, std::vector* tables); +void load_sync_metadata_schema(const Group& g, std::vector* tables); +Status try_load_sync_metadata_schema(const Group& g, std::vector* tables); class SyncMetadataSchemaVersionsReader { public: diff --git a/src/realm/sync/subscriptions.cpp b/src/realm/sync/subscriptions.cpp index 47682f288a5..349d2159138 100644 --- a/src/realm/sync/subscriptions.cpp +++ b/src/realm/sync/subscriptions.cpp @@ -282,7 +282,7 @@ MutableSubscriptionSet::MutableSubscriptionSet(std::weak_ptr void MutableSubscriptionSet::check_is_mutable() const { - if (m_tr->get_transact_stage() != DB::transact_Writing) { + if (!m_tr || m_tr->get_transact_stage() != DB::transact_Writing) { throw WrongTransactionState("Not a write transaction"); } } @@ -547,7 +547,7 @@ int64_t SubscriptionStore::get_downloading_query_version(Transaction& tr) const SubscriptionSet MutableSubscriptionSet::commit() { - if (m_tr->get_transact_stage() != DB::transact_Writing) { + if (!m_tr || m_tr->get_transact_stage() != DB::transact_Writing) { throw LogicError(ErrorCodes::WrongTransactionState, "SubscriptionSet has already been committed"); } auto mgr = get_flx_subscription_store(); // Throws @@ -577,7 +577,12 @@ SubscriptionSet MutableSubscriptionSet::commit() mgr->report_progress(m_tr); - return mgr->get_refreshed(m_obj.get_key(), flx_version, m_tr->get_version_of_current_transaction()); + DB::VersionID commit_version = m_tr->get_version_of_current_transaction(); + // release the read lock so that this instance doesn't keep a version pinned + // for the remainder of its lifetime + m_tr.reset(); + + return mgr->get_refreshed(m_obj.get_key(), flx_version, commit_version); } std::string SubscriptionSet::to_ext_json() const @@ -666,7 +671,7 @@ SubscriptionStore::SubscriptionStore(Private, DBRef db) throw RuntimeError(ErrorCodes::UnsupportedFileFormatVersion, "Invalid schema version for flexible sync metadata"); } - load_sync_metadata_schema(tr, &internal_tables); + load_sync_metadata_schema(*tr, &internal_tables); } else { tr->promote_to_write(); @@ -674,7 +679,7 @@ SubscriptionStore::SubscriptionStore(Private, DBRef db) SyncMetadataSchemaVersions schema_versions(tr); // Create the metadata schema and set the version (in the same commit) schema_versions.set_version_for(tr, internal_schema_groups::c_flx_subscription_store, c_flx_schema_version); - create_sync_metadata_schema(tr, &internal_tables); + create_sync_metadata_schema(*tr, &internal_tables); tr->commit_and_continue_as_read(); } REALM_ASSERT(m_sub_set_table); diff --git a/src/realm/sync/transform.cpp b/src/realm/sync/transform.cpp index f0fef18f043..2688e417571 100644 --- a/src/realm/sync/transform.cpp +++ b/src/realm/sync/transform.cpp @@ -2590,8 +2590,9 @@ size_t Transformer::transform_remote_changesets(TransformHistory& history, file_ Changeset& Transformer::get_reciprocal_transform(TransformHistory& history, file_ident_type local_file_ident, version_type version, const HistoryEntry& history_entry) { - auto& changeset = m_reciprocal_transform_cache[version]; // Throws - if (changeset.empty()) { + auto [it, success] = m_reciprocal_transform_cache.insert({version, Changeset{}}); // Throws + if (success) { + Changeset& changeset = it->second; bool is_compressed = false; ChunkedBinaryData data = history.get_reciprocal_transform(version, is_compressed); ChunkedBinaryInputStream in{data}; @@ -2613,7 +2614,7 @@ Changeset& Transformer::get_reciprocal_transform(TransformHistory& history, file origin_file_ident = local_file_ident; changeset.origin_file_ident = origin_file_ident; } - return changeset; + return it->second; } diff --git a/src/realm/util/serializer.cpp b/src/realm/util/serializer.cpp index c19e64b62a9..df4cc42c094 100644 --- a/src/realm/util/serializer.cpp +++ b/src/realm/util/serializer.cpp @@ -43,18 +43,18 @@ static constexpr long date_to_julian(int y, int m, int d) static void julian_to_date(int jd, int* y, int* m, int* d) { - int L = jd + 68569; - int n = (4 * L) / 146097; - int i, j; + uint64_t L = jd + 68569; + uint64_t n = (4 * L) / 146097; + uint64_t i, j; L = L - (146097 * n + 3) / 4; i = (4000 * (L + 1)) / 1461001; L = L - (1461 * i) / 4 + 31; j = (80 * L) / 2447; - *d = L - (2447 * j) / 80; + *d = static_cast(L - (2447 * j) / 80); L = j / 11; - *m = j + 2 - (12 * L); - *y = 100 * (n - 49) + i + L; + *m = static_cast(j + 2 - (12 * L)); + *y = static_cast(100 * (n - 49) + i + L); } // Confirmed to work for all val < 16389 diff --git a/test/object-store/realm.cpp b/test/object-store/realm.cpp index bfe434f856d..8228be8ec8b 100644 --- a/test/object-store/realm.cpp +++ b/test/object-store/realm.cpp @@ -1647,6 +1647,43 @@ TEST_CASE("SharedRealm: convert", "[sync][pbs][convert]") { // Check that the data also exists in the new realm REQUIRE(local_realm2->read_group().get_table("class_object")->size() == 1); } + + SECTION("synced realm must be fully uploaded") { + auto realm = Realm::get_shared_realm(sync_config1); + realm->sync_session()->pause(); + realm->begin_transaction(); + realm->read_group().get_table("class_object")->create_object_with_primary_key(0); + realm->commit_transaction(); + + SyncTestFile sync_config2(tsm, "default"); + sync_config2.schema = schema; + REQUIRE_EXCEPTION(realm->convert(sync_config2), IllegalOperation, + "All client changes must be integrated in server before writing copy"); + + realm->sync_session()->resume(); + wait_for_upload(*realm); + REQUIRE_NOTHROW(realm->convert(sync_config2)); + } + + SECTION("can convert synced realm from within upload complete callback") { + auto realm = Realm::get_shared_realm(sync_config1); + realm->sync_session()->pause(); + realm->begin_transaction(); + realm->read_group().get_table("class_object")->create_object_with_primary_key(0); + realm->commit_transaction(); + + SyncTestFile sync_config2(tsm, "default"); + sync_config2.schema = schema; + auto pf = util::make_promise_future(); + realm->sync_session()->wait_for_upload_completion([&](Status) { + sync_config1.scheduler = util::Scheduler::make_dummy(); + auto realm = Realm::get_shared_realm(sync_config1); + REQUIRE_NOTHROW(realm->convert(sync_config2)); + pf.promise.emplace_value(); + }); + realm->sync_session()->resume(); + pf.future.get(); + } } TEST_CASE("SharedRealm: convert - embedded objects", "[sync][pbs][convert][embedded objects]") { diff --git a/test/object-store/sync/app.cpp b/test/object-store/sync/app.cpp index 9820c626e26..113ca3c788d 100644 --- a/test/object-store/sync/app.cpp +++ b/test/object-store/sync/app.cpp @@ -3305,6 +3305,7 @@ TEST_CASE("app: sync logs contain baas coid", "[sync][app][baas]") { }; auto in_mem_logger = std::make_shared(); + in_mem_logger->set_level_threshold(InMemoryLogger::Level::all); TestAppSession app_session(get_runtime_app_session(), nullptr, DeleteApp{false}, ReconnectMode::normal, nullptr, in_mem_logger); diff --git a/test/object-store/sync/client_reset.cpp b/test/object-store/sync/client_reset.cpp index 05352002d39..b1eda09ff7d 100644 --- a/test/object-store/sync/client_reset.cpp +++ b/test/object-store/sync/client_reset.cpp @@ -1683,7 +1683,7 @@ TEST_CASE("sync: client reset", "[sync][pbs][client reset][baas]") { auto has_reset_cycle_flag = [](SharedRealm realm) -> util::Optional { auto db = TestHelper::get_db(realm); auto rd_tr = db->start_frozen(); - return sync::PendingResetStore::has_pending_reset(rd_tr); + return sync::PendingResetStore::has_pending_reset(*rd_tr); }; auto logger = util::Logger::get_default_logger(); ThreadSafeSyncError err; @@ -1697,8 +1697,8 @@ TEST_CASE("sync: client reset", "[sync][pbs][client reset][baas]") { local_config.sync_config->notify_before_client_reset = [mode, action](SharedRealm realm) { auto db = TestHelper::get_db(realm); auto wr_tr = db->start_write(); - sync::PendingResetStore::track_reset( - wr_tr, mode, action, {{ErrorCodes::SyncClientResetRequired, "Bad client file ident"}}); + sync::PendingResetStore::track_reset(*wr_tr, mode, action, + {ErrorCodes::SyncClientResetRequired, "Bad client file ident"}); wr_tr->commit(); }; }; diff --git a/test/test_alloc.cpp b/test/test_alloc.cpp index 332d2768488..f030e6f073e 100644 --- a/test/test_alloc.cpp +++ b/test/test_alloc.cpp @@ -246,7 +246,7 @@ TEST(Alloc_BadBuffer) GROUP_TEST_PATH(path); // Produce an invalid buffer - char buffer[32]; + alignas(8) char buffer[32]; for (size_t i = 0; i < sizeof buffer; ++i) buffer[i] = char((i + 192) % 128); diff --git a/test/test_client_reset.cpp b/test/test_client_reset.cpp index d99c365c028..fc297d68f56 100644 --- a/test/test_client_reset.cpp +++ b/test/test_client_reset.cpp @@ -780,10 +780,9 @@ TEST(ClientReset_DoNotRecoverSchema) CHECK(!compare_groups(rt_1, rt_2)); const Group& group = rt_1.get_group(); - CHECK_EQUAL(group.size(), 3); + CHECK_EQUAL(group.size(), 2); CHECK(group.get_table("class_table1")); CHECK(group.get_table("client_reset_metadata")); - CHECK(group.get_table("sync_internal_schemas")); CHECK_NOT(group.get_table("class_table2")); const Group& group2 = rt_2.get_group(); CHECK_EQUAL(group2.size(), 1); @@ -854,32 +853,11 @@ TEST(ClientReset_PinnedVersion) } #endif // !REALM_MOBILE -void mark_as_synchronized(DB& db) -{ - auto& history = static_cast(db.get_replication())->get_history(); - sync::version_type current_version; - sync::SaltedFileIdent file_ident; - sync::SyncProgress progress; - history.get_status(current_version, file_ident, progress); - progress.download.last_integrated_client_version = current_version; - progress.upload.client_version = current_version; - progress.upload.last_integrated_server_version = current_version; - sync::VersionInfo info_out; - history.set_sync_progress(progress, 0, info_out); - history.set_client_file_ident({1, 0}, false); -} - void expect_reset(unit_test::TestContext& test_context, DBRef& target, DBRef& fresh, ClientResyncMode mode, SubscriptionStore* sub_store = nullptr, bool allow_recovery = true) { CHECK(target); CHECK(fresh); - // Ensure the schema is initialized before starting the test - { - auto wr_tr = target->start_write(); - PendingResetStore::clear_pending_reset(wr_tr); - wr_tr->commit(); - } auto db_version = target->get_version_of_latest_snapshot(); auto fresh_path = fresh->get_path(); @@ -943,31 +921,16 @@ void expect_reset(unit_test::TestContext& test_context, DBRef& target, DBRef& fr // sync completion to avoid reset cycles { auto tr = target->start_read(); - auto pending_reset = PendingResetStore::has_pending_reset(tr); + auto pending_reset = PendingResetStore::has_pending_reset(*tr); CHECK(pending_reset); CHECK(pending_reset->action == action); CHECK(pending_reset->mode == expected_mode); CHECK(pending_reset->error == error); tr->promote_to_write(); - PendingResetStore::clear_pending_reset(tr); + PendingResetStore::clear_pending_reset(*tr); tr->commit_and_continue_as_read(); - CHECK_NOT(PendingResetStore::has_pending_reset(tr)); - } -} - -std::pair prepare_db(const std::string& path, const std::string& copy_path, - util::FunctionRef fn) -{ - DBRef db = DB::create(make_client_replication(), path); - { - auto wt = db->start_write(); - fn(*wt); - wt->commit(); + CHECK_NOT(PendingResetStore::has_pending_reset(*tr)); } - mark_as_synchronized(*db); - db->write_copy(copy_path, nullptr); - auto db_2 = DB::create(make_client_replication(), copy_path); - return {db, db_2}; } TEST(ClientReset_ConvertResyncMode) @@ -999,89 +962,8 @@ TEST(ClientReset_ConvertResetAction) sync::ClientResetFailed); } -DBRef setup_metadata_table_v1(test_util::unit_test::TestContext& test_context, std::string path, Timestamp ts, - int64_t type) -{ - DBRef db = DB::create(make_client_replication(), path); - auto wt = db->start_write(); - auto table = wt->add_table_with_primary_key("client_reset_metadata", type_ObjectId, "id"); - CHECK(table); - auto version_col = table->add_column(type_Int, "version"); - auto timestamp_col = table->add_column(type_Timestamp, "event_time"); - auto type_col = table->add_column(type_Int, "type_of_reset"); - wt->commit_and_continue_writing(); - auto id = ObjectId::gen(); - table->create_object_with_primary_key(id, { - {version_col, 1}, - {timestamp_col, ts}, - {type_col, type}, - }); - wt->commit_and_continue_as_read(); - table = wt->get_table("client_reset_metadata"); - size_t table_size = table->size(); - CHECK(table_size == 1); - return db; -} - -TEST_TYPES(ClientReset_V1Table, std::integral_constant, - std::integral_constant) -{ - SHARED_GROUP_TEST_PATH(path_v1); - auto timestamp = Timestamp(std::chrono::system_clock::now()); - auto reset_type = PendingResetStore::from_resync_mode(TEST_TYPE::value); - DBRef db = setup_metadata_table_v1(test_context, path_v1, timestamp, reset_type); - auto rd_tr = db->start_read(); - auto reset = PendingResetStore::has_pending_reset(rd_tr); - CHECK(reset); - CHECK(reset->time == timestamp); - CHECK(reset->mode == TEST_TYPE::value); - if (TEST_TYPE::value == ClientResyncMode::DiscardLocal) { - CHECK(reset->action == sync::ProtocolErrorInfo::Action::ClientResetNoRecovery); - } - else { - CHECK(reset->action == sync::ProtocolErrorInfo::Action::ClientReset); - } -} - -TEST(ClientReset_TrackReset_V1_EntryExists) -{ - SHARED_GROUP_TEST_PATH(path_v1); - auto timestamp = Timestamp(std::chrono::system_clock::now()); - auto reset_type = PendingResetStore::from_resync_mode(ClientResyncMode::Recover); - // Create a previous v1 entry - DBRef db = setup_metadata_table_v1(test_context, path_v1, timestamp, reset_type); - auto wr_tr = db->start_write(); - // Should throw an exception, since the table isn't empty - CHECK_THROW(PendingResetStore::track_reset(wr_tr, ClientResyncMode::DiscardLocal, - sync::ProtocolErrorInfo::Action::RevertToPBS), - sync::ClientResetFailed); -} - -TEST(ClientReset_TrackReset_Existing_empty_V1_table) -{ - SHARED_GROUP_TEST_PATH(path_v1); - auto timestamp = Timestamp(std::chrono::system_clock::now()); - auto reset_type = PendingResetStore::from_resync_mode(ClientResyncMode::Recover); - Status error{ErrorCodes::SyncClientResetRequired, "Bad client file ident"}; - DBRef db = setup_metadata_table_v1(test_context, path_v1, timestamp, reset_type); - auto wr_tr = db->start_write(); - PendingResetStore::clear_pending_reset(wr_tr); - wr_tr->commit_and_continue_writing(); - PendingResetStore::track_reset(wr_tr, ClientResyncMode::DiscardLocal, - sync::ProtocolErrorInfo::Action::RevertToPBS, error); - wr_tr->commit_and_continue_as_read(); - auto reset = PendingResetStore::has_pending_reset(wr_tr); - CHECK(reset); - CHECK(reset->mode == ClientResyncMode::DiscardLocal); - CHECK(reset->action == sync::ProtocolErrorInfo::Action::RevertToPBS); - CHECK(reset->error == error); - timestamp = Timestamp(std::chrono::system_clock::now()); - // Verify timestamp is at least close to current time - CHECK(abs(reset->time.get_seconds() - timestamp.get_seconds()) < 5); -} - TEST_TYPES( - ClientReset_TrackReset_v2, + ClientReset_TrackReset, std::integral_constant, std::integral_constant, std::integral_constant, @@ -1092,9 +974,9 @@ TEST_TYPES( Status error{ErrorCodes::SyncClientResetRequired, "Bad client file ident"}; sync::ProtocolErrorInfo::Action reset_action = TEST_TYPE::value; auto tr = db->start_write(); - PendingResetStore::track_reset(tr, ClientResyncMode::DiscardLocal, reset_action, error); + PendingResetStore::track_reset(*tr, ClientResyncMode::DiscardLocal, reset_action, error); tr->commit_and_continue_as_read(); - auto reset = PendingResetStore::has_pending_reset(tr); + auto reset = PendingResetStore::has_pending_reset(*tr); CHECK(reset); CHECK(reset->mode == ClientResyncMode::DiscardLocal); CHECK(reset->action == reset_action); @@ -1104,6 +986,86 @@ TEST_TYPES( CHECK((reset->time.get_seconds() - timestamp.get_seconds() < 5)); } +TEST(ClientReset_TrackReset_SchemaMismatches) +{ + SHARED_GROUP_TEST_PATH(test_path); + DBRef db = DB::create(make_client_replication(), test_path); + auto tr = db->start_write(); + + // Table does not exist + CHECK_NOT(PendingResetStore::has_pending_reset(*tr)); + PendingResetStore::track_reset(*tr, ClientResyncMode::DiscardLocal, sync::ProtocolErrorInfo::Action::ClientReset, + Status::OK()); + CHECK(PendingResetStore::has_pending_reset(*tr)); + + // Table exists but has no columns + tr->remove_table("client_reset_metadata"); + tr->add_table("client_reset_metadata"); + CHECK_NOT(PendingResetStore::has_pending_reset(*tr)); + PendingResetStore::track_reset(*tr, ClientResyncMode::DiscardLocal, sync::ProtocolErrorInfo::Action::ClientReset, + Status::OK()); + CHECK(PendingResetStore::has_pending_reset(*tr)); + + // Table has columns but is missing one + auto table = tr->get_table("client_reset_metadata"); + table->remove_column(*table->get_column_keys().begin()); + CHECK_NOT(PendingResetStore::has_pending_reset(*tr)); + PendingResetStore::track_reset(*tr, ClientResyncMode::DiscardLocal, sync::ProtocolErrorInfo::Action::ClientReset, + Status::OK()); + CHECK(PendingResetStore::has_pending_reset(*tr)); + + // Table has too many objects + tr->get_table("client_reset_metadata")->create_object(); + CHECK_NOT(PendingResetStore::has_pending_reset(*tr)); + PendingResetStore::track_reset(*tr, ClientResyncMode::DiscardLocal, sync::ProtocolErrorInfo::Action::ClientReset, + Status::OK()); + CHECK(PendingResetStore::has_pending_reset(*tr)); +} + +TEST(ClientReset_TrackReset_ChecksForMatchingVersion) +{ + SHARED_GROUP_TEST_PATH(test_path); + DBRef db = DB::create(make_client_replication(), test_path); + auto tr = db->start_write(); + PendingResetStore::track_reset(*tr, ClientResyncMode::DiscardLocal, sync::ProtocolErrorInfo::Action::ClientReset, + Status::OK()); + + CHECK(PendingResetStore::has_pending_reset(*tr)); + auto obj = *tr->get_table("client_reset_metadata")->begin(); + obj.set("core_version", "not a valid version"); + CHECK_NOT(PendingResetStore::has_pending_reset(*tr)); +} + +void mark_as_synchronized(DB& db) +{ + auto& history = static_cast(db.get_replication())->get_history(); + sync::version_type current_version; + sync::SaltedFileIdent file_ident; + sync::SyncProgress progress; + history.get_status(current_version, file_ident, progress); + progress.download.last_integrated_client_version = current_version; + progress.upload.client_version = current_version; + progress.upload.last_integrated_server_version = current_version; + sync::VersionInfo info_out; + history.set_sync_progress(progress, 0, info_out); + history.set_client_file_ident({1, 0}, false); +} + +std::pair prepare_db(const std::string& path, const std::string& copy_path, + util::FunctionRef fn) +{ + DBRef db = DB::create(make_client_replication(), path); + { + auto wt = db->start_write(); + fn(*wt); + wt->commit(); + } + mark_as_synchronized(*db); + db->write_copy(copy_path, nullptr); + auto db_2 = DB::create(make_client_replication(), copy_path); + return {db, db_2}; +} + TEST(ClientReset_UninitializedFile) { SHARED_GROUP_TEST_PATH(path_1); @@ -1125,7 +1087,7 @@ TEST(ClientReset_UninitializedFile) _impl::client_reset::perform_client_reset(*test_context.logger, *db_empty, std::move(cr_config), nullptr); CHECK_NOT(did_reset); auto rd_tr = db_empty->start_frozen(); - CHECK_NOT(PendingResetStore::has_pending_reset(rd_tr)); + CHECK_NOT(PendingResetStore::has_pending_reset(*rd_tr)); // Should still have closed and deleted the fresh realm CHECK_NOT(db_fresh->is_attached()); @@ -1297,7 +1259,7 @@ TEST(ClientReset_Recover_RecoveryDisabled) _impl::client_reset::perform_client_reset(*test_context.logger, *dbs.first, std::move(cr_config), nullptr), sync::ClientResetFailed); auto rd_tr = dbs.first->start_frozen(); - CHECK_NOT(PendingResetStore::has_pending_reset(rd_tr)); + CHECK_NOT(PendingResetStore::has_pending_reset(*rd_tr)); } TEST(ClientReset_Recover_ModificationsOnDeletedObject) diff --git a/test/test_parser.cpp b/test/test_parser.cpp index 233cfd2f88d..2db5738418b 100644 --- a/test/test_parser.cpp +++ b/test/test_parser.cpp @@ -6100,6 +6100,41 @@ TEST(Parser_Between) CHECK_THROW_ANY(verify_query(test_context, table, "NONE scores between {10, 12}", 1)); } +TEST(Test_Between_OverLinks) +{ + Group g; + TableRef parent = g.add_table("Parent"); + TableRef child = g.add_table("Child"); + + ColKey ck_child = parent->add_column(*child, "child"); + ColKey ck_int = child->add_column(type_Int, "int"); + ColKey ck_timestamp = child->add_column(type_Timestamp, "timestamp"); + + constexpr size_t num_children = 100; + for (size_t i = 0; i < num_children; ++i) { + auto obj = child->create_object(); + obj.set(ck_int, (int)i); + obj.set(ck_timestamp, Timestamp{int64_t(i), 0}); + parent->create_object().set(ck_child, obj.get_key()); + parent->create_object().set(ck_child, obj.get_key()); + } + parent->create_object(); // null link + + verify_query(test_context, child, "int BETWEEN {0, 100}", 100); + verify_query(test_context, child, "timestamp BETWEEN {$0, $1}", {Timestamp{0, 0}, Timestamp{100, 0}}, 100); + verify_query(test_context, child, "int BETWEEN {1, 2}", 2); + verify_query(test_context, child, "timestamp BETWEEN {$0, $1}", {Timestamp{1, 0}, Timestamp{2, 0}}, 2); + verify_query(test_context, child, "int BETWEEN {-1, -2}", 0); + verify_query(test_context, child, "timestamp BETWEEN {$0, $1}", {Timestamp{-1, 0}, Timestamp{-2, 0}}, 0); + + verify_query(test_context, parent, "child.int BETWEEN {0, 100}", 200); + verify_query(test_context, parent, "child.timestamp BETWEEN {$0, $1}", {Timestamp{0, 0}, Timestamp{100, 0}}, 200); + verify_query(test_context, parent, "child.int BETWEEN {1, 2}", 4); + verify_query(test_context, parent, "child.timestamp BETWEEN {$0, $1}", {Timestamp{1, 0}, Timestamp{2, 0}}, 4); + verify_query(test_context, parent, "child.int BETWEEN {-1, -2}", 0); + verify_query(test_context, parent, "child.timestamp BETWEEN {$0, $1}", {Timestamp{-1, 0}, Timestamp{-2, 0}}, 0); +} + TEST(Parser_PrimaryKey) { UUID u1("3b241101-e2bb-4255-8caf-4136c566a961"); diff --git a/test/test_replication.cpp b/test/test_replication.cpp index 5e0fe845dbf..2c9396af7b3 100644 --- a/test/test_replication.cpp +++ b/test/test_replication.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include "test.hpp" @@ -264,55 +265,204 @@ TEST(Replication_WriteWithoutHistory) } } -struct ObjectMutationObserver : _impl::NoOpTransactionLogParser { +struct Select { + TableKey table_key; +}; + +struct Create { + int64_t obj_key; +}; + +struct Mutate { + int64_t obj_key; + ColKey col_key; +}; + +struct Remove { + int64_t obj_key; +}; + +struct SelectColl { + int64_t obj_key; + ColKey col_key; +}; + +struct CollInsert { + size_t ndx; +}; + +struct CollSet { + size_t ndx; +}; + +using InstructionVariant = mpark::variant; + +std::ostream& print_instructions(std::ostream& os, const std::vector& ivs, + size_t first_difference) noexcept +{ + size_t ndx = 0; + for (auto& element : ivs) { + if (first_difference == ndx) { + os << "==> "; + } + util::format(os, "[%1]: ", ndx++); + auto print = overload{ + [&](Select st) { + util::format(os, "Select{%1}", st.table_key); + }, + [&](Create co) { + util::format(os, "CreateObject{%1}", co.obj_key); + }, + [&](Mutate mo) { + util::format(os, "Mutate{%1, %2}", mo.obj_key, mo.col_key); + }, + [&](Remove rm) { + util::format(os, "RemoveObject{%1}", rm.obj_key); + }, + [&](SelectColl sc) { + util::format(os, "SelectCollection{%1, %2}", sc.obj_key, sc.col_key); + }, + [&](CollInsert ci) { + util::format(os, "CollectionInsert{%1}", ci.ndx); + }, + [&](CollSet cs) { + util::format(os, "CollectionSet{%1}", cs.ndx); + }, + }; + mpark::visit(print, element); + os << '\n'; + } + return os; +} + +bool compare_instructions(const InstructionVariant& a, const InstructionVariant& b) +{ + bool equal = false; + auto comp = overload{ + [&](Select a_val) { + if (const Select* b_val = mpark::get_if