From 43d1a26efa4ba012691279406828a1bb6677ed81 Mon Sep 17 00:00:00 2001 From: Jonathan Reams Date: Tue, 23 Jul 2024 21:19:54 -0400 Subject: [PATCH 01/15] RCORE-2210 Remove unused websocket too new/old errors (#7917) --- CHANGELOG.md | 2 +- src/realm/error_codes.h | 3 --- src/realm/sync/network/default_socket.cpp | 25 +--------------------- src/realm/sync/network/websocket.cpp | 9 -------- src/realm/sync/network/websocket_error.hpp | 3 --- src/realm/sync/noinst/client_impl_base.cpp | 9 -------- src/realm/sync/noinst/server/server.cpp | 23 ++++---------------- 7 files changed, 6 insertions(+), 68 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e7c32b9cbfd..651aebfdc67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ * None. ### Breaking changes -* None. +* The websocket error codes `websocket_client_too_old`, `websocket_client_too_new`, and `websocket_protocol_mismatch` along with their C API constants were removed. These corresponded to errors the legacy C++ server could have sent, but the baas sync server never did. Any platform networking implementations that surfaced these errors can report a `websocket_fatal_error` instead if an unknown error occurs during the websocket handshake. If a client connects that is too old or too new, it will finish the websocket handshake and then receive an in-band sync `ERROR` message that will be handled by the sync error handler. [PR #7917](https://github.com/realm/realm-core/pull/7917) ### Compatibility * Fileformat: Generates files with format v24. Reads and automatically upgrade from fileformat v10. If you want to upgrade from an earlier file format version you will have to use RealmCore v13.x.y or earlier. diff --git a/src/realm/error_codes.h b/src/realm/error_codes.h index 7faf9f69a61..4b0a85bcd87 100644 --- a/src/realm/error_codes.h +++ b/src/realm/error_codes.h @@ -287,9 +287,6 @@ typedef enum realm_web_socket_errno { RLM_ERR_WEBSOCKET_UNAUTHORIZED = 4001, RLM_ERR_WEBSOCKET_FORBIDDEN = 4002, RLM_ERR_WEBSOCKET_MOVEDPERMANENTLY = 4003, - RLM_ERR_WEBSOCKET_CLIENT_TOO_OLD = 4004, - RLM_ERR_WEBSOCKET_CLIENT_TOO_NEW = 4005, - RLM_ERR_WEBSOCKET_PROTOCOL_MISMATCH = 4006, RLM_ERR_WEBSOCKET_RESOLVE_FAILED = 4400, RLM_ERR_WEBSOCKET_CONNECTION_FAILED = 4401, diff --git a/src/realm/sync/network/default_socket.cpp b/src/realm/sync/network/default_socket.cpp index 6905b480c43..c2b69d64d25 100644 --- a/src/realm/sync/network/default_socket.cpp +++ b/src/realm/sync/network/default_socket.cpp @@ -120,32 +120,9 @@ class DefaultWebSocketImpl final : public DefaultWebSocket, public Config { else { error = WebSocketError::websocket_fatal_error; was_clean = false; - if (!body.empty()) { - std::string_view identifier = "REALM_SYNC_PROTOCOL_MISMATCH"; - auto i = body.find(identifier); - if (i != std::string_view::npos) { - std::string_view rest = body.substr(i + identifier.size()); - // FIXME: Use std::string_view::begins_with() in C++20. - auto begins_with = [](std::string_view string, std::string_view prefix) { - return (string.size() >= prefix.size() && - std::equal(string.data(), string.data() + prefix.size(), prefix.data())); - }; - if (begins_with(rest, ":CLIENT_TOO_OLD")) { - error = WebSocketError::websocket_client_too_old; - } - else if (begins_with(rest, ":CLIENT_TOO_NEW")) { - error = WebSocketError::websocket_client_too_new; - } - else { - // Other more complicated forms of mismatch - error = WebSocketError::websocket_protocol_mismatch; - } - was_clean = true; - } - } } - websocket_error_and_close_handler(was_clean, error, ec.message()); + websocket_error_and_close_handler(was_clean, error, body.empty() ? ec.message() : body); } void websocket_protocol_error_handler(std::error_code ec) override { diff --git a/src/realm/sync/network/websocket.cpp b/src/realm/sync/network/websocket.cpp index a173a72fd4c..4a2668b0708 100644 --- a/src/realm/sync/network/websocket.cpp +++ b/src/realm/sync/network/websocket.cpp @@ -979,9 +979,6 @@ class WebSocket { case WebSocketError::websocket_unauthorized: case WebSocketError::websocket_forbidden: case WebSocketError::websocket_moved_permanently: - case WebSocketError::websocket_client_too_old: - case WebSocketError::websocket_client_too_new: - case WebSocketError::websocket_protocol_mismatch: break; default: error_code = 1008; @@ -1180,12 +1177,6 @@ std::ostream& operator<<(std::ostream& os, WebSocketError code) return "WebSocket: Forbidden"; case WebSocketError::websocket_moved_permanently: return "WebSocket: Moved Permanently"; - case WebSocketError::websocket_client_too_old: - return "WebSocket: Client Too Old"; - case WebSocketError::websocket_client_too_new: - return "WebSocket: Client Too New"; - case WebSocketError::websocket_protocol_mismatch: - return "WebSocket: Protocol Mismatch"; case WebSocketError::websocket_resolve_failed: return "WebSocket: Resolve Failed"; diff --git a/src/realm/sync/network/websocket_error.hpp b/src/realm/sync/network/websocket_error.hpp index 2c0dbbd0bc7..6ec2fc7268b 100644 --- a/src/realm/sync/network/websocket_error.hpp +++ b/src/realm/sync/network/websocket_error.hpp @@ -44,9 +44,6 @@ enum class WebSocketError { websocket_unauthorized = RLM_ERR_WEBSOCKET_UNAUTHORIZED, websocket_forbidden = RLM_ERR_WEBSOCKET_FORBIDDEN, websocket_moved_permanently = RLM_ERR_WEBSOCKET_MOVEDPERMANENTLY, - websocket_client_too_old = RLM_ERR_WEBSOCKET_CLIENT_TOO_OLD, - websocket_client_too_new = RLM_ERR_WEBSOCKET_CLIENT_TOO_NEW, - websocket_protocol_mismatch = RLM_ERR_WEBSOCKET_PROTOCOL_MISMATCH, websocket_resolve_failed = RLM_ERR_WEBSOCKET_RESOLVE_FAILED, websocket_connection_failed = RLM_ERR_WEBSOCKET_CONNECTION_FAILED, diff --git a/src/realm/sync/noinst/client_impl_base.cpp b/src/realm/sync/noinst/client_impl_base.cpp index 1dc59827610..32f99d39374 100644 --- a/src/realm/sync/noinst/client_impl_base.cpp +++ b/src/realm/sync/noinst/client_impl_base.cpp @@ -594,15 +594,6 @@ bool Connection::websocket_closed_handler(bool was_clean, WebSocketError error_c ConnectionTerminationReason::ssl_certificate_rejected); // Throws break; } - case WebSocketError::websocket_client_too_old: - [[fallthrough]]; - case WebSocketError::websocket_client_too_new: - [[fallthrough]]; - case WebSocketError::websocket_protocol_mismatch: { - close_due_to_client_side_error({ErrorCodes::SyncProtocolNegotiationFailed, msg}, IsFatal{true}, - ConnectionTerminationReason::http_response_says_fatal_error); // Throws - break; - } case WebSocketError::websocket_fatal_error: { // Error is fatal if the sync_route has already been verified - if the sync_route has not // been verified, then use a non-fatal error and try to perform a location update. diff --git a/src/realm/sync/noinst/server/server.cpp b/src/realm/sync/noinst/server/server.cpp index e317076d0d8..87f8c7e43c3 100644 --- a/src/realm/sync/noinst/server/server.cpp +++ b/src/realm/sync/noinst/server/server.cpp @@ -1786,17 +1786,6 @@ class HTTPConnection { Formatter& formatter = misc_buffers.formatter; if (REALM_UNLIKELY(best_match == 0)) { const char* elaboration = "No version supported by both client and server"; - const char* identifier_hint = nullptr; - if (overall_client_max < server_min) { - // Client is too old - elaboration = "Client is too old for server"; - identifier_hint = "CLIENT_TOO_OLD"; - } - else if (overall_client_min > server_max) { - // Client is too new - elaboration = "Client is too new for server"; - identifier_hint = "CLIENT_TOO_NEW"; - } auto format_ranges = [&](const auto& list) { bool nonfirst = false; for (auto range : list) { @@ -1825,12 +1814,8 @@ class HTTPConnection { formatter << "\n"; // Throws formatter << "Client supports: "; // Throws format_ranges(protocol_version_ranges); // Throws - formatter << "\n\n"; // Throws - formatter << "REALM_SYNC_PROTOCOL_MISMATCH"; // Throws - if (identifier_hint) - formatter << ":" << identifier_hint; // Throws - formatter << "\n"; // Throws - handle_400_bad_request({formatter.data(), formatter.size()}); // Throws + formatter << "\n"; // Throws + handle_400_bad_request({formatter.data(), formatter.size()}); // Throws return; } m_negotiated_protocol_version = best_match; @@ -3250,8 +3235,8 @@ void ServerFile::activate() {} void ServerFile::register_client_access(file_ident_type) {} -auto ServerFile::request_file_ident(FileIdentReceiver& receiver, file_ident_type proxy_file, ClientType client_type) - -> file_ident_request_type +auto ServerFile::request_file_ident(FileIdentReceiver& receiver, file_ident_type proxy_file, + ClientType client_type) -> file_ident_request_type { auto request = ++m_last_file_ident_request; m_file_ident_requests[request] = {&receiver, proxy_file, client_type}; // Throws From 793b8b5c9362e490d54b8f8b0be4daeffd9f3fe6 Mon Sep 17 00:00:00 2001 From: Jonathan Reams Date: Tue, 23 Jul 2024 21:55:31 -0400 Subject: [PATCH 02/15] RCORE-2205 Remove unused changeset compaction code (#7915) --- Package.swift | 1 - src/realm/sync/CMakeLists.txt | 2 - src/realm/sync/client_base.hpp | 8 - src/realm/sync/noinst/client_impl_base.cpp | 33 -- src/realm/sync/noinst/client_impl_base.hpp | 1 - src/realm/sync/noinst/compact_changesets.cpp | 431 ----------------- src/realm/sync/noinst/compact_changesets.hpp | 41 -- src/realm/sync/noinst/server/server.cpp | 11 +- src/realm/sync/noinst/server/server.hpp | 5 - .../sync/noinst/server/server_history.cpp | 39 +- .../sync/noinst/server/server_history.hpp | 6 +- test/CMakeLists.txt | 1 - test/peer.hpp | 11 - test/sync_fixtures.hpp | 3 - test/test_compact_changesets.cpp | 438 ------------------ test/test_embedded_objects.cpp | 12 - test/test_sync.cpp | 1 - test/test_transform.cpp | 6 - 18 files changed, 3 insertions(+), 1047 deletions(-) delete mode 100644 src/realm/sync/noinst/compact_changesets.cpp delete mode 100644 src/realm/sync/noinst/compact_changesets.hpp delete mode 100644 test/test_compact_changesets.cpp diff --git a/Package.swift b/Package.swift index 12ae750add8..0be83435842 100644 --- a/Package.swift +++ b/Package.swift @@ -118,7 +118,6 @@ let notSyncServerSources: [String] = [ "realm/sync/noinst/client_reset.cpp", "realm/sync/noinst/client_reset_operation.cpp", "realm/sync/noinst/client_reset_recovery.cpp", - "realm/sync/noinst/compact_changesets.cpp", "realm/sync/noinst/migration_store.cpp", "realm/sync/noinst/pending_bootstrap_store.cpp", "realm/sync/noinst/pending_reset_store.cpp", diff --git a/src/realm/sync/CMakeLists.txt b/src/realm/sync/CMakeLists.txt index ed2fce43446..19f49857bea 100644 --- a/src/realm/sync/CMakeLists.txt +++ b/src/realm/sync/CMakeLists.txt @@ -6,7 +6,6 @@ set(SYNC_SOURCES noinst/client_reset.cpp noinst/client_reset_operation.cpp noinst/client_reset_recovery.cpp - noinst/compact_changesets.cpp noinst/migration_store.cpp noinst/pending_bootstrap_store.cpp noinst/pending_reset_store.cpp @@ -74,7 +73,6 @@ set(NOINST_HEADERS noinst/client_reset.hpp noinst/client_reset_operation.hpp noinst/client_reset_recovery.hpp - noinst/compact_changesets.hpp noinst/integer_codec.hpp noinst/migration_store.hpp noinst/pending_bootstrap_store.hpp diff --git a/src/realm/sync/client_base.hpp b/src/realm/sync/client_base.hpp index 8493503f92b..4dafb8bd549 100644 --- a/src/realm/sync/client_base.hpp +++ b/src/realm/sync/client_base.hpp @@ -190,14 +190,6 @@ struct ClientConfig { /// For testing purposes only. bool disable_upload_activation_delay = false; - /// If `disable_upload_compaction` is true, every changeset will be - /// compacted before it is uploaded to the server. Compaction will - /// reduce the size of a changeset if the same field is set multiple - /// times or if newly created objects are deleted within the same - /// transaction. Log compaction increeses CPU usage and memory - /// consumption. - bool disable_upload_compaction = false; - /// The specified function will be called whenever a PONG message is /// received on any connection. The round-trip time in milliseconds will /// be pased to the function. The specified function will always be diff --git a/src/realm/sync/noinst/client_impl_base.cpp b/src/realm/sync/noinst/client_impl_base.cpp index 32f99d39374..89031d3ccdd 100644 --- a/src/realm/sync/noinst/client_impl_base.cpp +++ b/src/realm/sync/noinst/client_impl_base.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -150,7 +149,6 @@ ClientImpl::ClientImpl(ClientConfig config) , m_disable_upload_activation_delay{config.disable_upload_activation_delay} , m_dry_run{config.dry_run} , m_enable_default_port_hack{config.enable_default_port_hack} - , m_disable_upload_compaction{config.disable_upload_compaction} , m_fix_up_object_ids{config.fix_up_object_ids} , m_roundtrip_time_handler{std::move(config.roundtrip_time_handler)} , m_socket_provider{std::move(config.socket_provider)} @@ -184,8 +182,6 @@ ClientImpl::ClientImpl(ClientConfig config) config.pong_keepalive_timeout); // Throws logger.debug("Config param: fast_reconnect_limit = %1 ms", config.fast_reconnect_limit); // Throws - logger.debug("Config param: disable_upload_compaction = %1", - config.disable_upload_compaction); // Throws logger.debug("Config param: disable_sync_to_disk = %1", config.disable_sync_to_disk); // Throws logger.debug( @@ -2109,35 +2105,6 @@ void Session::send_upload_message() #endif } -#if 0 // Upload log compaction is currently not implemented - if (!get_client().m_disable_upload_compaction) { - ChangesetEncoder::Buffer encode_buffer; - - { - // Upload compaction only takes place within single changesets to - // avoid another client seeing inconsistent snapshots. - ChunkedBinaryInputStream stream{uc.changeset}; - Changeset changeset; - parse_changeset(stream, changeset); // Throws - // FIXME: What is the point of setting these? How can compaction care about them? - changeset.version = uc.progress.client_version; - changeset.last_integrated_remote_version = uc.progress.last_integrated_server_version; - changeset.origin_timestamp = uc.origin_timestamp; - changeset.origin_file_ident = uc.origin_file_ident; - - compact_changesets(&changeset, 1); - encode_changeset(changeset, encode_buffer); - - logger.debug(util::LogCategory::changeset, "Upload compaction: original size = %1, compacted size = %2", uc.changeset.size(), - encode_buffer.size()); // Throws - } - - upload_message_builder.add_changeset( - uc.progress.client_version, uc.progress.last_integrated_server_version, uc.origin_timestamp, - uc.origin_file_ident, BinaryData{encode_buffer.data(), encode_buffer.size()}); // Throws - } - else -#endif { upload_message_builder.add_changeset(uc.progress.client_version, uc.progress.last_integrated_server_version, uc.origin_timestamp, diff --git a/src/realm/sync/noinst/client_impl_base.hpp b/src/realm/sync/noinst/client_impl_base.hpp index 69d5fdbc4c6..c7e9717f302 100644 --- a/src/realm/sync/noinst/client_impl_base.hpp +++ b/src/realm/sync/noinst/client_impl_base.hpp @@ -246,7 +246,6 @@ class ClientImpl { const bool m_disable_upload_activation_delay; const bool m_dry_run; // For testing purposes only const bool m_enable_default_port_hack; - const bool m_disable_upload_compaction; const bool m_fix_up_object_ids; const std::function m_roundtrip_time_handler; const std::string m_user_agent_string; diff --git a/src/realm/sync/noinst/compact_changesets.cpp b/src/realm/sync/noinst/compact_changesets.cpp deleted file mode 100644 index 695dfaeca43..00000000000 --- a/src/realm/sync/noinst/compact_changesets.cpp +++ /dev/null @@ -1,431 +0,0 @@ -#include -#include - -using namespace realm; -using namespace realm::sync; - -namespace { - -// FIXME: Implement changeset compaction for embedded objects. -#if 0 -struct ChangesetCompactor { - using GlobalID = _impl::ChangesetIndex::GlobalID; - using RangeMap = _impl::ChangesetIndex::Ranges; - using InstructionPosition = std::pair; - - - struct ObjectInfo { - // The instruction that creates the object. - util::Optional create_instruction; - - // All instructions touching the object, not including Create/Erase. - // This also does not include instructions that *target* the object. - RangeMap instructions; - - // For dead objects, link list instructions can generally be removed. - // Due to merge semantics, when a target object is erased, there must - // be a preceding link list erase instruction. However if the link - // list is cleared and all other link list operations are removed, the - // compacted changeset is guaranteed to be mergable. ArraySwap and - // ArrayMove operations can be removed without problems. - // - // The approach taken for link lists of dead objects is to remove all - // ArraySwap and ArrayMove instructions, replace the first instruction - // of type ArraySet, ArrayInsert, ArrayErase or ArrayClear with an - // ArrayClear instruction, and remove all the remaining the - // instructions of this type. Of course, if the first instruction - // already is an ArrayClear the substitution is a no-op. - // - // The std::map field_to_link_list_op stores, for each field(column), - // the position of the first instruction of type ArraySet, ArrayInsert, - // ArrayErase or ArrayClear. The previous SelectField instruction and - // the prior size are also stored. This map is needed to substitute a - // set of link list operations with an ArrayClear instruction further - // down. - struct LinkListOp { - InstructionPosition select_field_instr; - InstructionPosition link_list_instr; - uint32_t prior_size; - }; - std::map field_to_link_list_op; - - // If the object is referenced by any alive object, it will become a - // ghost instead of dead. The object will also be a ghost if it has a - // primary key. - // FIXME: Define std::hash for StringData, GlobalID (waiting for Core to - // give us an implementation of std::hash for StringData). - std::map> referenced_by; - }; - - // Ordering guaranteed; Can use lower_bound() to find all objects belonging - // to a particular table. - std::map> m_objects; - std::vector m_changesets; - - void add_changeset(Changeset&); - void add_instruction_at(RangeMap&, Changeset&, Changeset::iterator); - void compact(); - void compact_dead_object(ObjectInfo&, InstructionPosition erase_instruction); - void compact_live_object(ObjectInfo&); - void remove_redundant_selects(); -}; - -void ChangesetCompactor::add_instruction_at(RangeMap& ranges, Changeset& changeset, - Changeset::iterator pos) -{ - _impl::ChangesetIndex::add_instruction_at(ranges, changeset, pos); // Throws -} - -void ChangesetCompactor::add_changeset(Changeset& changeset) -{ - m_changesets.push_back(&changeset); - - StringData selected_table; - StringData selected_link_target_table; - GlobalKey selected_container; - StringData selected_field; - InstructionPosition select_field_pos; - - for (auto it = changeset.begin(); it != changeset.end(); ++it) { - auto instr = *it; - if (!instr) - continue; - - switch (instr->type) { - case Instruction::Type::SelectTable: { - auto& select_table = instr->get_as(); - selected_table = changeset.get_string(select_table.table); - break; - } - case Instruction::Type::SelectField: { - auto& select_container = instr->get_as(); - selected_container = select_container.object; - selected_field = changeset.get_string(select_container.field); - select_field_pos = InstructionPosition {&changeset, it}; - selected_link_target_table = changeset.get_string(select_container.link_target_table); - auto& info = m_objects[selected_table][selected_container]; // Throws - add_instruction_at(info.instructions, changeset, it); // Throws - break; - } - case Instruction::Type::AddTable: break; - case Instruction::Type::EraseTable: break; - case Instruction::Type::CreateObject: { - auto& create_object = instr->get_as(); - GlobalKey object_id = create_object.object; - auto& info = m_objects[selected_table][object_id]; // Throws - info.create_instruction = InstructionPosition{&changeset, it}; - break; - } - case Instruction::Type::EraseObject: { - auto& erase_object = instr->get_as(); - GlobalKey object_id = erase_object.object; - auto it2 = m_objects.find(selected_table); - if (it2 != m_objects.end()) { - auto it3 = it2->second.find(object_id); - if (it3 != it2->second.end()) { - compact_dead_object(it3->second, InstructionPosition{ &changeset, it }); // Throws - it2->second.erase(it3); - } - } - break; - } - case Instruction::Type::Set: { - auto& set = instr->get_as(); - auto& info = m_objects[selected_table][set.object]; // Throws - if (set.payload.type == type_Link) { - StringData link_target_table = changeset.get_string(set.payload.data.link.target_table); - auto& link_target_info = m_objects[link_target_table][set.payload.data.link.target]; // Throws - link_target_info.referenced_by[selected_table].insert(set.object); // Throws - } - add_instruction_at(info.instructions, changeset, it); // Throws - break; - } - case Instruction::Type::AddInteger: { - auto& add_integer = instr->get_as(); - auto& info = m_objects[selected_table][add_integer.object]; // Throws - add_instruction_at(info.instructions, changeset, it); // Throws - break; - } - case Instruction::Type::AddColumn: break; - case Instruction::Type::EraseColumn: break; - case Instruction::Type::ArraySet: { - auto& container_set = instr->get_as(); - auto& info = m_objects[selected_table][selected_container]; // Throws - if (container_set.payload.type == type_Link) { - StringData link_target_table = changeset.get_string(container_set.payload.data.link.target_table); - REALM_ASSERT(link_target_table == selected_link_target_table); - // selected_link_target_table is used. - GlobalKey link_target = container_set.payload.data.link.target; - auto& target_info = m_objects[selected_link_target_table][link_target]; // Throws - target_info.referenced_by[selected_table].insert(selected_container); // Throws - ObjectInfo::LinkListOp& link_list_op = info.field_to_link_list_op[selected_field]; // Throws - if (!link_list_op.link_list_instr.first) - link_list_op = {select_field_pos, InstructionPosition{&changeset, it}, container_set.prior_size}; - } - add_instruction_at(info.instructions, changeset, it); // Throws - break; - } - case Instruction::Type::ArrayInsert: { - auto& container_insert = instr->get_as(); - auto& info = m_objects[selected_table][selected_container]; - if (container_insert.payload.type == type_Link) { - // selected_link_target_table is used. - // Realms created by sync version 2.1.0 or newer has a class_ prefix on the property - // container_insert.payload.data.link.target_table. By using selected_link_target_table - // the link target table is correct for both old and new Realms. - GlobalKey link_target = container_insert.payload.data.link.target; - auto& target_info = m_objects[selected_link_target_table][link_target]; // Throws - target_info.referenced_by[selected_table].insert(selected_container); // Throws - ObjectInfo::LinkListOp& link_list_op = info.field_to_link_list_op[selected_field]; // Throws - if (!link_list_op.link_list_instr.first) - link_list_op = {select_field_pos, InstructionPosition{&changeset, it}, container_insert.prior_size}; - } - add_instruction_at(info.instructions, changeset, it); // Throws - break; - } - case Instruction::Type::ArrayMove: - case Instruction::Type::ArrayErase: { - auto& info = m_objects[selected_table][selected_container]; // Throws - add_instruction_at(info.instructions, changeset, it); // Throws - ObjectInfo::LinkListOp& link_list_op = info.field_to_link_list_op[selected_field]; // Throws - if (!link_list_op.link_list_instr.first) - link_list_op = {select_field_pos, InstructionPosition{&changeset, it}, - instr->get_as().prior_size}; - break; - } - case Instruction::Type::ArrayClear: { - auto& info = m_objects[selected_table][selected_container]; // Throws - add_instruction_at(info.instructions, changeset, it); // Throws - ObjectInfo::LinkListOp& link_list_op = info.field_to_link_list_op[selected_field]; // Throws - if (!link_list_op.link_list_instr.first) - link_list_op = {select_field_pos, InstructionPosition{&changeset, it}, - instr->get_as().prior_size}; - break; - } - } - } -} - -void ChangesetCompactor::compact() -{ - for (auto& pair : m_objects) { - for (auto& pair2 : pair.second) { - compact_live_object(pair2.second); - } - } - - remove_redundant_selects(); -} - - -void ChangesetCompactor::compact_dead_object(ObjectInfo& info, InstructionPosition erase_instruction) -{ - // "Ghost" objects are objects that we know will be deleted in the - // end, but for which there are other references (such as link list - // instructions), necessitating that the objects are actually - // created. But all instructions populating the ghost object can - // still be discarded, meaning that a reference from a ghost object cannot - // produce more ghosts. - bool is_ghost = false; - if (info.create_instruction) { - if (info.create_instruction->second->get_as().has_primary_key) { - // If the object has a primary key, it must be considered a - // ghost, because the corresponding EraseObject instruction - // will have an effect in case of a primary key conflict. - is_ghost = true; - } - else { - for (auto& pair: info.referenced_by) { - auto oit = m_objects.find(pair.first); - if (oit != m_objects.end()) { - for (auto& object_id : pair.second) { - auto oit2 = oit->second.find(object_id); - if (oit2 != oit->second.end()) { - // Object is referenced by a surviving object, so - // mark it as a ghost. - is_ghost = true; - break; - } - } - } - } - } - } - - // Link list operations are only replaced with ArrayClear if the create - // instruction is kept. - bool maybe_link_list_substitution = is_ghost || !info.create_instruction; - - // Discard all instructions touching the object, other than - // CreateObject and the instruction that deleted the object. - // A link list clear will also be kept or substituted if needed to - // remove other link list operations. - for (auto& changeset_ranges : info.instructions) { - for (auto& range: changeset_ranges.second) { - REALM_ASSERT(range.begin.m_pos == 0); - REALM_ASSERT(range.end.m_pos == 0); - for (auto iter = range.begin; iter < range.end;) { - auto instr = *iter; - REALM_ASSERT(instr); - bool select_field_must_be_kept = false; - bool link_list_instr_must_be_substituted = false; - uint32_t prior_size = 0; - if (maybe_link_list_substitution) { - for (const auto& pair: info.field_to_link_list_op) { - if (instr == *pair.second.link_list_instr.second) { - link_list_instr_must_be_substituted = true; - prior_size = pair.second.prior_size; - break; - } - if (instr == *pair.second.select_field_instr.second) { - select_field_must_be_kept = true; - break; - } - } - } - REALM_ASSERT(!select_field_must_be_kept || !link_list_instr_must_be_substituted); - if (link_list_instr_must_be_substituted) { - Instruction::Type type = instr->type; - REALM_ASSERT(type == Instruction::Type::ArraySet - || type == Instruction::Type::ArrayInsert - || type == Instruction::Type::ArrayErase - || type == Instruction::Type::ArrayClear); - if (instr->type != Instruction::Type::ArrayClear) { - // Substitution takes place. - instr->type = Instruction::Type::ArrayClear; - instr->get_as().prior_size = prior_size; - } - ++iter; - } - else if (select_field_must_be_kept) { - REALM_ASSERT(instr->type == Instruction::Type::SelectField); - ++iter; - } - else { - iter = changeset_ranges.first->erase_stable(iter); - } - } - } - } - - if (!is_ghost) { - if (info.create_instruction) { - // We created the object, so we can safely discard the CreateObject - // instruction. - info.create_instruction->first->erase_stable(info.create_instruction->second); - - // The object might have been erased by a ClearTable, in which - // case the deleting instruction should not be discarded. - if (erase_instruction.second->type == Instruction::Type::EraseObject) { - erase_instruction.first->erase_stable(erase_instruction.second); - } - } - } -} - -void ChangesetCompactor::compact_live_object(ObjectInfo& info) -{ - // Look for redundant Set instructions, discard them. - - std::map> last_set_instructions; - - for (auto& changeset_ranges : info.instructions) { - auto& changeset = *changeset_ranges.first; - for (auto& range : changeset_ranges.second) { - for (auto iter = range.begin; iter != range.end; ++iter) { - auto instr = *iter; - REALM_ASSERT(instr); - Instruction::FieldInstructionBase* field_instr = nullptr; - - if (instr->type == Instruction::Type::Set) { - // If a previous Set instruction existed for this field, discard it - // and record the position of this instruction instead. - auto& set = instr->get_as(); - StringData field = changeset.get_string(set.field); - auto it = last_set_instructions.find(field); - - if (it != last_set_instructions.end() && it->second.first->origin_timestamp <= changeset.origin_timestamp) { - it->second.first->erase_stable(it->second.second); - it->second = {&changeset, iter}; - } - else { - last_set_instructions[field] = {&changeset, iter}; - } - } - else if (instr->type == Instruction::Type::AddInteger) { - field_instr = &instr->get_as(); - } - - if (field_instr != nullptr) { - // A non-Set field instruction was encountered, which requires the - // previous Set instruction to be preserved. - StringData field = changeset.get_string(field_instr->field); - last_set_instructions.erase(field); - } - } - } - } -} - -void ChangesetCompactor::remove_redundant_selects() { - // This removes sequences of SelectTable and SelectField instructions, - // except the last. Select instructions have no effect if followed - // immediately by another Select instruction at the same level. - - for (Changeset* changeset : m_changesets) { - util::Optional previous_select_table; - util::Optional previous_select_field; - - for (auto it = changeset->begin(); it != changeset->end(); ++it) { - auto instr = *it; - if (!instr) - continue; - - switch (instr->type) { - case Instruction::Type::SelectTable: { - if (previous_select_table) { - changeset->erase_stable(*previous_select_table); - } - if (previous_select_field) { - changeset->erase_stable(*previous_select_field); - previous_select_field = util::none; - } - previous_select_table = it; - break; - } - case Instruction::Type::SelectField: { - if (previous_select_field) { - changeset->erase_stable(*previous_select_field); - } - previous_select_field = it; - break; - } - default: { - previous_select_table = util::none; - previous_select_field = util::none; - break; - } - } - } - } -} - -#endif - -} // unnamed namespace - -void realm::_impl::compact_changesets(Changeset*, size_t) -{ - // FIXME: Implement changeset compaction for embedded objects. - return; - -#if 0 - ChangesetCompactor compactor; - - for (size_t i = 0; i < num_changesets; ++i) { - compactor.add_changeset(changesets[i]); // Throws - } - - compactor.compact(); // Throws -#endif -} diff --git a/src/realm/sync/noinst/compact_changesets.hpp b/src/realm/sync/noinst/compact_changesets.hpp deleted file mode 100644 index a5c93f7cb1c..00000000000 --- a/src/realm/sync/noinst/compact_changesets.hpp +++ /dev/null @@ -1,41 +0,0 @@ - -#ifndef REALM_NOINST_COMPACT_CHANGESETS_HPP -#define REALM_NOINST_COMPACT_CHANGESETS_HPP - -#include - -namespace realm { -namespace _impl { - -/// Compact changesets by removing redundant instructions. -/// -/// Instructions considered for removal: -/// - Set -/// - CreateObject -/// - EraseObject -/// -/// Instructions not (yet) considered for removal: -/// - AddInteger -/// - ContainerInsert -/// - ContainerSet -/// -/// NOTE: All changesets are considered, in the sense that an instruction from -/// and earlier changeset being made redundant by a different instruction in a -/// later changeset will be removed. In other words, it is assumed that the -/// changesets will at least eventually all be applied (for instance as a client -/// receives a batched DOWNLOAD message). -/// -/// NOTE: The input changesets are assumed to be in order of ascending versions, -/// but not necessarily in order of ascending timestamps. Callers must take care -/// to supply changesets in the same order in which they will be applied. -/// -/// This function may throw exceptions due to the fact that it allocates memory. -/// -/// This function is thread-safe, as long as its arguments are not modified by -/// other threads. -void compact_changesets(realm::sync::Changeset* changesets, size_t num_changesets); - -} // namespace _impl -} // namespace realm - -#endif // REALM_NOINST_COMPACT_CHANGESETS_HPP diff --git a/src/realm/sync/noinst/server/server.cpp b/src/realm/sync/noinst/server/server.cpp index 87f8c7e43c3..2aa26b47209 100644 --- a/src/realm/sync/noinst/server/server.cpp +++ b/src/realm/sync/noinst/server/server.cpp @@ -2869,7 +2869,6 @@ class Session final : private FileIdentReceiver { std::size_t accum_original_size; std::size_t accum_compacted_size; ServerProtocol& protocol = get_server_protocol(); - bool disable_download_compaction = config.disable_download_compaction; bool enable_cache = (config.enable_download_bootstrap_cache && m_download_progress.server_version == 0 && m_upload_progress.client_version == 0 && m_upload_threshold.client_version == 0); DownloadCache& cache = m_server_file->get_download_cache(); @@ -2902,7 +2901,7 @@ class Session final : private FileIdentReceiver { std::uint_fast64_t cumulative_byte_size_total; bool not_expired = history.fetch_download_info( m_client_file_ident, download_progress, end_version, upload_progress, handler, - cumulative_byte_size_current, cumulative_byte_size_total, disable_download_compaction, + cumulative_byte_size_current, cumulative_byte_size_total, max_download_size); // Throws REALM_ASSERT(upload_progress.client_version >= download_progress.last_integrated_client_version); SyncConnection& conn = get_connection(); @@ -2974,12 +2973,6 @@ class Session final : private FileIdentReceiver { upload_progress.last_integrated_server_version, downloadable_bytes, num_changesets, body, uncompressed_body_size, compressed_body_size, body_is_compressed, logger); // Throws - if (!disable_download_compaction) { - std::size_t saved = accum_original_size - accum_compacted_size; - double saved_2 = (accum_original_size == 0 ? 0 : std::round(saved * 100.0 / accum_original_size)); - logger.detail("Download compaction: Saved %1 bytes (%2%%)", saved, saved_2); // Throws - } - m_download_progress = download_progress; logger.debug("Setting of m_download_progress.server_version = %1", m_download_progress.server_version); // Throws @@ -3867,8 +3860,6 @@ void ServerImpl::start() logger.warn("Testing/debugging feature 'disable sync to disk' enabled - " "never do this in production!"); // Throws } - logger.info("Download compaction: %1", - (m_config.disable_download_compaction ? "No" : "Yes")); // Throws logger.info("Download bootstrap caching: %1", (m_config.enable_download_bootstrap_cache ? "Yes" : "No")); // Throws logger.info("Max download size: %1 bytes", m_config.max_download_size); // Throws diff --git a/src/realm/sync/noinst/server/server.hpp b/src/realm/sync/noinst/server/server.hpp index cc665c2321f..b455f7b41da 100644 --- a/src/realm/sync/noinst/server/server.hpp +++ b/src/realm/sync/noinst/server/server.hpp @@ -162,11 +162,6 @@ class Server { /// initiated. milliseconds_type soft_close_timeout = default_soft_close_timeout; - /// Unless disabled, the server will try to eliminate redundant - /// instructions from changesets before sending them to clients, - /// minimizing download sizes at the expense of server CPU usage. - bool disable_download_compaction = false; - /// If set to true, the server will cache the contents of the DOWNLOAD /// message(s) used for client bootstrapping. bool enable_download_bootstrap_cache = false; diff --git a/src/realm/sync/noinst/server/server_history.cpp b/src/realm/sync/noinst/server/server_history.cpp index e5764e1c574..902e71d0bae 100644 --- a/src/realm/sync/noinst/server/server_history.cpp +++ b/src/realm/sync/noinst/server/server_history.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -790,7 +789,6 @@ bool ServerHistory::fetch_download_info(file_ident_type client_file_ident, Downl HistoryEntryHandler& handler, std::uint_fast64_t& cumulative_byte_size_current, std::uint_fast64_t& cumulative_byte_size_total, - bool disable_download_compaction, std::size_t accum_byte_size_soft_limit) const { REALM_ASSERT(client_file_ident != 0); @@ -818,11 +816,6 @@ bool ServerHistory::fetch_download_info(file_ident_type client_file_ident, Downl std::vector changesets; std::vector original_changeset_sizes; - if (!disable_download_compaction) { - std::size_t reserve = to_size_t(end_version - download_progress_2.server_version); - changesets.reserve(reserve); // Throws - original_changeset_sizes.reserve(reserve); // Throws - } for (;;) { version_type begin_version = download_progress_2.server_version; @@ -842,20 +835,7 @@ bool ServerHistory::fetch_download_info(file_ident_type client_file_ident, Downl if (entry.origin_file_ident == 0) entry.origin_file_ident = m_local_file_ident; - if (!disable_download_compaction) { - ChunkedBinaryInputStream stream{entry.changeset}; - Changeset changeset; - parse_changeset(stream, changeset); // Throws - changeset.version = download_progress_2.server_version; - changeset.last_integrated_remote_version = entry.remote_version; - changeset.origin_timestamp = entry.origin_timestamp; - changeset.origin_file_ident = entry.origin_file_ident; - changesets.push_back(std::move(changeset)); // Throws - original_changeset_sizes.push_back(entry.changeset.size()); // Throws - } - else { - handler.handle(download_progress_2.server_version, entry, entry.changeset.size()); // Throws - } + handler.handle(download_progress_2.server_version, entry, entry.changeset.size()); // Throws accum_byte_size += entry.changeset.size(); @@ -863,23 +843,6 @@ bool ServerHistory::fetch_download_info(file_ident_type client_file_ident, Downl break; } - if (!disable_download_compaction) { - compact_changesets(changesets.data(), changesets.size()); - - ChangesetEncoder::Buffer encode_buffer; - for (std::size_t i = 0; i < changesets.size(); ++i) { - auto& changeset = changesets[i]; - encode_changeset(changeset, encode_buffer); // Throws - HistoryEntry entry; - entry.remote_version = changeset.last_integrated_remote_version; - entry.origin_file_ident = changeset.origin_file_ident; - entry.origin_timestamp = changeset.origin_timestamp; - entry.changeset = BinaryData{encode_buffer.data(), encode_buffer.size()}; - handler.handle(changeset.version, entry, original_changeset_sizes[i]); // Throws - encode_buffer.clear(); - } - } - // Set cumulative byte sizes. std::int_fast64_t cumulative_byte_size_current_2 = 0; std::int_fast64_t cumulative_byte_size_total_2 = 0; diff --git a/src/realm/sync/noinst/server/server_history.hpp b/src/realm/sync/noinst/server/server_history.hpp index c1d5f24c5ff..b8372c94d51 100644 --- a/src/realm/sync/noinst/server/server_history.hpp +++ b/src/realm/sync/noinst/server/server_history.hpp @@ -372,10 +372,6 @@ class ServerHistory : public sync::SyncReplication, /// \param cumulative_byte_size_total is the cumulative byte size of /// the entire history. /// - /// \param disable_download_compaction denotes whether the fetched changesets - /// should be compacted before they are handed off to the - /// HistoryEntryHandler. - /// /// \param accum_byte_size_soft_limit denotes a soft limit on the total size /// of the uncompacted changesets. When the total size of the changesets /// exceeds this limit, no more changesets will be added. @@ -385,7 +381,7 @@ class ServerHistory : public sync::SyncReplication, bool fetch_download_info(file_ident_type client_file_ident, DownloadCursor& download_progress, version_type end_version, UploadCursor& upload_progress, HistoryEntryHandler&, std::uint_fast64_t& cumulative_byte_size_current, - std::uint_fast64_t& cumulative_byte_size_total, bool disable_download_compaction, + std::uint_fast64_t& cumulative_byte_size_total, std::size_t accum_byte_size_soft_limit = 0x20000) const; /// The application must call this function before using the history as an diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 9b3e6dd2f68..97829133ecd 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -199,7 +199,6 @@ if(REALM_ENABLE_SYNC) test_array_sync.cpp test_changeset_encoding.cpp test_client_reset.cpp - test_compact_changesets.cpp test_crypto.cpp test_embedded_objects.cpp test_instruction_replication.cpp diff --git a/test/peer.hpp b/test/peer.hpp index aa6115b7957..de65a8f3974 100644 --- a/test/peer.hpp +++ b/test/peer.hpp @@ -31,7 +31,6 @@ #include #include #include -#include #include #include @@ -225,11 +224,6 @@ class ShortCircuitHistory : public SyncReplication { m_current_time += amount; } - void set_disable_compaction(bool b) - { - m_disable_compaction = b; - } - std::map> m_optimistic_object_id_collisions; ShortCircuitHistory(file_ident_type local_file_ident, TestDirNameGenerator* changeset_dump_dir_gen); @@ -271,7 +265,6 @@ class ShortCircuitHistory : public SyncReplication { std::vector m_entries; std::vector> m_entries_data_owner; std::map> m_reciprocal_transforms; - bool m_disable_compaction = false; ChunkedBinaryData get_reciprocal_transform(file_ident_type remote_file_ident, version_type version) const { @@ -473,10 +466,6 @@ inline auto ShortCircuitHistory::integrate_remote_changesets(file_ident_type rem sync::parse_remote_changeset(incoming_changesets[i], changesets[i]); // Throws } - if (!m_disable_compaction) { - _impl::compact_changesets(changesets.data(), changesets.size()); - } - TransformHistoryImpl transform_hist{*this, remote_file_ident}; auto apply = [&](const Changeset* c) -> bool { sync::InstructionApplier applier{*transact}; diff --git a/test/sync_fixtures.hpp b/test/sync_fixtures.hpp index c5e375baae6..b0a7dbe3db9 100644 --- a/test/sync_fixtures.hpp +++ b/test/sync_fixtures.hpp @@ -430,7 +430,6 @@ class MultiClientServerFixture { std::string server_ssl_certificate_path = get_test_resource_path() + "test_sync_ca.pem"; std::string server_ssl_certificate_key_path = get_test_resource_path() + "test_sync_key.pem"; - bool disable_download_compaction = false; bool disable_upload_compaction = false; bool disable_history_compaction = false; @@ -525,7 +524,6 @@ class MultiClientServerFixture { config_2.connection_reaper_timeout = config.server_connection_reaper_timeout; config_2.connection_reaper_interval = config.server_connection_reaper_interval; config_2.max_download_size = config.max_download_size; - config_2.disable_download_compaction = config.disable_download_compaction; config_2.tcp_no_delay = true; config_2.authorization_header_name = config.authorization_header_name; config_2.encryption_key = config.server_encryption_key; @@ -549,7 +547,6 @@ class MultiClientServerFixture { config_2.reconnect_mode = ReconnectMode::testing; config_2.ping_keepalive_period = config.client_ping_period; config_2.pong_keepalive_timeout = config.client_pong_timeout; - config_2.disable_upload_compaction = config.disable_upload_compaction; config_2.one_connection_per_session = config.one_connection_per_session; config_2.disable_upload_activation_delay = config.disable_upload_activation_delay; config_2.fix_up_object_ids = true; diff --git a/test/test_compact_changesets.cpp b/test/test_compact_changesets.cpp deleted file mode 100644 index 8f7e0a843f7..00000000000 --- a/test/test_compact_changesets.cpp +++ /dev/null @@ -1,438 +0,0 @@ -#include "test.hpp" -#include "util/random.hpp" - -#include -#include - -using namespace realm; -using namespace realm::sync; -using namespace realm::_impl; - -namespace { -struct InstructionBuilder : InstructionHandler { - using Instruction = realm::sync::Instruction; - - explicit InstructionBuilder(Changeset& log) - : m_log(log) - { - } - Changeset& m_log; - - void operator()(const Instruction& instr) final - { - m_log.push_back(instr); - } - - StringBufferRange add_string_range(StringData string) final - { - return m_log.append_string(string); - } - - void set_intern_string(uint32_t index, StringBufferRange range) final - { - m_log.interned_strings()[index] = range; - } - - InternString intern_string(StringData string) - { - return m_log.intern_string(string); - } -}; -} // unnamed namespace - -// FIXME: Compaction is disabled since path-based instructions. -TEST_IF(CompactChangesets_RedundantSets, false) -{ - using Instruction = realm::sync::Instruction; - Changeset changeset; - InstructionBuilder push(changeset); - - auto table = changeset.intern_string("Test"); - - Instruction::Update set1; - set1.table = table; - set1.object = GlobalKey{1, 1}; - set1.field = changeset.intern_string("foo"); - set1.value = Instruction::Payload(int64_t(123)); - push(set1); - - Instruction::Update set2; - set2.table = table; - set2.object = GlobalKey{1, 1}; - set2.field = changeset.intern_string("foo"); - set2.value = Instruction::Payload(int64_t(345)); - push(set2); - - Instruction::Update set3; - set3.table = table; - set3.object = GlobalKey{1, 1}; - set3.field = changeset.intern_string("foo"); - set3.value = Instruction::Payload(int64_t(123)); - push(set3); - - CHECK_EQUAL(changeset.size(), 4); - - compact_changesets(&changeset, 1); - - CHECK_EQUAL(changeset.size(), 2); -} - -// FIXME: Compaction is disabled since path-based instructions. -TEST_IF(CompactChangesets_DiscardsCreateErasePair, false) -{ - using Instruction = realm::sync::Instruction; - Changeset changeset; - InstructionBuilder push(changeset); - - auto table = changeset.intern_string("Test"); - - Instruction::CreateObject create_object; - create_object.table = table; - create_object.object = GlobalKey{1, 1}; - push(create_object); - - Instruction::Update set; - set.table = table; - set.object = GlobalKey{1, 1}; - set.field = changeset.intern_string("foo"); - set.value = Instruction::Payload{int64_t(123)}; - push(set); - - Instruction::EraseObject erase_object; - erase_object.table = table; - erase_object.object = GlobalKey{1, 1}; - push(erase_object); - - CHECK_EQUAL(changeset.size(), 4); - - compact_changesets(&changeset, 1); - - CHECK_EQUAL(changeset.size(), 1); -} - -// FIXME: Compaction is disabled since path-based instructions. -TEST_IF(CompactChangesets_LinksRescueObjects, false) -{ - using Instruction = realm::sync::Instruction; - Changeset changeset; - InstructionBuilder push(changeset); - - auto table = changeset.intern_string("Test"); - auto other = changeset.intern_string("Other"); - - Instruction::CreateObject create_object; - create_object.table = table; - create_object.object = GlobalKey{1, 1}; - push(create_object); - - Instruction::Update set; - set.table = table; - set.field = changeset.intern_string("foo"); - set.object = GlobalKey{1, 1}; - set.value = Instruction::Payload{int64_t(123)}; - push(set); - - Instruction::ArrayInsert link_list_insert; - link_list_insert.table = other; - link_list_insert.object = GlobalKey{1, 2}; - link_list_insert.field = changeset.intern_string("field"); - link_list_insert.prior_size = 0; - link_list_insert.path.push_back(0); - link_list_insert.value = Instruction::Payload(Instruction::Payload::Link{table, GlobalKey{1, 1}}); - push(link_list_insert); - - // slightly unrealistic; this would always be preceded by a LinkListErase - // (nullify) instruction, but whatever. - // FIXME: ... until dangling links are implemented. - Instruction::EraseObject erase_object; - erase_object.table = table; - erase_object.object = GlobalKey{1, 1}; - push(erase_object); - - CHECK_EQUAL(changeset.size(), 8); - - compact_changesets(&changeset, 1); - - CHECK_EQUAL(changeset.size(), 7); -} - -// FIXME: Compaction is disabled since path-based instructions. -TEST_IF(CompactChangesets_EliminateSubgraphs, false) -{ - using Instruction = realm::sync::Instruction; - Changeset changeset; - InstructionBuilder push(changeset); - - auto table = changeset.intern_string("Test"); - - Instruction::CreateObject create_object; - create_object.table = table; - create_object.object = GlobalKey{1, 1}; - push(create_object); - - Instruction::CreateObject create_object_2; - create_object_2.table = table; - create_object_2.object = GlobalKey{1, 2}; - push(create_object_2); - - // Create a link from {1, 1} to {1, 2} - Instruction::ArrayInsert link_list_insert; - link_list_insert.table = table; - link_list_insert.object = GlobalKey{1, 1}; - link_list_insert.field = changeset.intern_string("field"); - link_list_insert.prior_size = 0; - link_list_insert.path.push_back(0); - link_list_insert.value = Instruction::Payload(Instruction::Payload::Link{table, GlobalKey{1, 2}}); - push(link_list_insert); - - // slightly unrealistic; this would always be preceded by a LinkListErase - // (nullify) instruction, but whatever. - Instruction::EraseObject erase_object; - erase_object.table = table; - erase_object.object = GlobalKey{1, 1}; - push(erase_object); - - Instruction::EraseObject erase_object_2; - erase_object_2.table = table; - erase_object_2.object = GlobalKey{1, 2}; - push(erase_object_2); - - CHECK_EQUAL(changeset.size(), 7); - - compact_changesets(&changeset, 1); - - CHECK_EQUAL(changeset.size(), 1); // Only the SelectTable remains -} - - -// FIXME: Compaction is disabled since path-based instructions. -TEST_IF(CompactChangesets_EraseRecreate, false) -{ - using Instruction = realm::sync::Instruction; - Changeset changeset; - InstructionBuilder push(changeset); - - auto table = changeset.intern_string("Test"); - auto field = changeset.intern_string("foo"); - - Instruction::CreateObject create_1; - create_1.table = table; - create_1.object = GlobalKey{1, 1}; - push(create_1); - - Instruction::Update set_1; - set_1.table = table; - set_1.object = GlobalKey{1, 1}; - set_1.field = field; - set_1.value = Instruction::Payload{int64_t(123)}; - push(set_1); - - Instruction::EraseObject erase; - erase.table = table; - erase.object = GlobalKey{1, 1}; - push(erase); - - Instruction::CreateObject create_2; - create_2.table = table; - create_2.object = GlobalKey{1, 1}; - push(create_2); - - Instruction::Update set_2; - set_2.table = table; - set_2.object = GlobalKey{1, 1}; - set_2.field = field; - set_2.value = Instruction::Payload{int64_t(123)}; - push(set_2); - - CHECK_EQUAL(changeset.size(), 6); - - compact_changesets(&changeset, 1); - - CHECK_EQUAL(changeset.size(), 3); // Only the first Set instruction should be removed -} - - -#if 0 -TEST(CompactChangesets_PrimaryKeysRescueObjects) -{ - using Instruction = realm::sync::Instruction; - Changeset changeset; - InstructionBuilder push(changeset); - - push(Instruction::SelectTable{changeset.intern_string("Test")}); - - Instruction::CreateObject create_object; - create_object.has_primary_key = true; - create_object.payload = Instruction::Payload{int64_t(123)}; - create_object.object = object_id_for_primary_key(123); - push(create_object); - - - Instruction::Update set; - set.field = changeset.intern_string("foo"); - set.object = object_id_for_primary_key(123); - set.payload = Instruction::Payload{int64_t(123)}; - push(set); - - Instruction::EraseObject erase_object; - erase_object.object = object_id_for_primary_key(123); - push(erase_object); - - CHECK_EQUAL(changeset.size(), 4); - - compact_changesets(&changeset, 1); - - CHECK_EQUAL(changeset.size(), 3); -} - -namespace { - -GlobalKey make_object_id(test_util::Random& random) -{ - return GlobalKey{random.draw_int(1, 3), random.draw_int(1, 5)}; -} - -void select_table(StringData table, InstructionBuilder& builder, StringData& selected_table) -{ - using Instruction = realm::sync::Instruction; - if (selected_table != table) { - builder(Instruction::SelectTable{builder.intern_string(table)}); - selected_table = table; - } -} -} // unnamed namespace - -TEST_IF(CompactChangesets_Measure, false) -{ - using Instruction = realm::sync::Instruction; - using B = InstructionBuilder; - using R = test_util::Random; - - static const size_t changeset_size = 10000; - - auto generate_large_changeset = [](Changeset& changeset) { - InstructionBuilder builder(changeset); - - // Assuming schema: - // - // class Foo { - // string foo; - // int bar; - // } - // - // class Bar { - // Link foo; - // int bar; - // } - - - auto create_foo = [](B& builder, R& random, StringData& selected_table) { - select_table("class_Foo", builder, selected_table); - Instruction::CreateObject create_object; - create_object.object = make_object_id(random); - create_object.has_primary_key = false; - builder(create_object); - }; - - auto create_bar = [](B& builder, R& random, StringData& selected_table) { - select_table("class_Bar", builder, selected_table); - Instruction::CreateObject create_object; - create_object.object = make_object_id(random); - create_object.has_primary_key = false; - builder(create_object); - }; - - auto erase_foo = [](B& builder, R& random, StringData& selected_table) { - select_table("class_Foo", builder, selected_table); - Instruction::EraseObject erase_object; - erase_object.object = make_object_id(random); - builder(erase_object); - }; - - auto erase_bar = [](B& builder, R& random, StringData& selected_table) { - select_table("class_Bar", builder, selected_table); - Instruction::EraseObject erase_object; - erase_object.object = make_object_id(random); - builder(erase_object); - }; - - auto set_foo_foo = [](B& builder, R& random, StringData& selected_table) { - select_table("class_Foo", builder, selected_table); - Instruction::Update set; - set.object = make_object_id(random); - set.field = builder.intern_string("foo"); - set.payload = Instruction::Payload(random.draw_int(0, 10)); - builder(set); - }; - - auto set_foo_bar = [](B& builder, R& random, StringData& selected_table) { - select_table("class_Foo", builder, selected_table); - Instruction::Update set; - set.object = make_object_id(random); - set.field = builder.intern_string("bar"); - set.payload = Instruction::Payload(builder.add_string_range( - "VERY LONG STRING VERY LONG STRING VERY LONG STRING VERY LONG STRING VERY LONG STRING VERY LONG " - "STRING VERY LONG STRING VERY LONG STRING VERY LONG STRING VERY LONG STRING VERY LONG STRING VERY " - "LONG STRING VERY LONG STRING VERY LONG STRING VERY LONG STRING VERY LONG STRING")); - builder(set); - }; - - auto set_bar_foo = [](B& builder, R& random, StringData& selected_table) { - select_table("class_Bar", builder, selected_table); - Instruction::Update set; - set.object = make_object_id(random); - set.field = builder.intern_string("foo"); - Instruction::Payload::Link link; - link.target_table = builder.intern_string("class_Foo"); - link.target = make_object_id(random); - set.payload = Instruction::Payload(link); - builder(set); - }; - - auto set_bar_bar = [](B& builder, R& random, StringData& selected_table) { - select_table("class_Bar", builder, selected_table); - Instruction::Update set; - set.object = make_object_id(random); - set.field = builder.intern_string("bar"); - set.payload = Instruction::Payload(random.draw_int(0, 10)); - builder(set); - }; - - static const size_t num_actions = 8; - void (*actions[num_actions])(InstructionBuilder&, test_util::Random&, StringData&) = { - create_foo, create_bar, erase_foo, erase_bar, set_foo_foo, set_foo_bar, set_bar_foo, set_bar_bar, - }; - - test_util::Random random; - StringData selected_table; - - random.seed(123); - - for (size_t i = 0; i < changeset_size; ++i) { - size_t action_ndx = random.draw_int_mod(num_actions); - auto action = actions[action_ndx]; - action(builder, random, selected_table); - } - }; - - Changeset changeset; - generate_large_changeset(changeset); - size_t num_instructions_before = changeset.size(); - - util::AppendBuffer encoded_uncompacted; - encode_changeset(changeset, encoded_uncompacted); - - std::cout << "Encoded, uncompacted: " << encoded_uncompacted.size() << " bytes\n"; - - compact_changesets(&changeset, 1); - size_t num_instructions_after = changeset.size(); - - util::AppendBuffer encoded_compacted; - encode_changeset(changeset, encoded_compacted); - - std::cout << "Encoded, compacted: " << encoded_compacted.size() << " bytes\n"; - std::cout << "# instructions discarded: " << num_instructions_before - num_instructions_after << "\n"; - std::cout << "\n"; -} - -#endif diff --git a/test/test_embedded_objects.cpp b/test/test_embedded_objects.cpp index bc2d5101873..441aef61c98 100644 --- a/test/test_embedded_objects.cpp +++ b/test/test_embedded_objects.cpp @@ -600,12 +600,6 @@ TEST(EmbeddedObjects_CreateEraseCreateSequencePreservesObject) auto client_1 = &*it.clients[0]; auto client_2 = &*it.clients[1]; - // Disable history compaction to be certain that create-erase-create - // cycles are not eliminated. - server->history.set_disable_compaction(true); - client_1->history.set_disable_compaction(true); - client_2->history.set_disable_compaction(true); - // Create baseline client_1->transaction([&](Peer& c) { auto& tr = *c.group; @@ -667,12 +661,6 @@ TEST(EmbeddedObjects_CreateEraseCreateSequencePreservesObject_Nested) auto client_1 = &*it.clients[0]; auto client_2 = &*it.clients[1]; - // Disable history compaction to be certain that create-erase-create - // cycles are not eliminated. - server->history.set_disable_compaction(true); - client_1->history.set_disable_compaction(true); - client_2->history.set_disable_compaction(true); - // Create baseline client_1->transaction([&](Peer& c) { auto& tr = *c.group; diff --git a/test/test_sync.cpp b/test/test_sync.cpp index ed7a20f511c..92933661667 100644 --- a/test/test_sync.cpp +++ b/test/test_sync.cpp @@ -5390,7 +5390,6 @@ TEST(Sync_LogCompaction_EraseObject_LinkList) // Log comapction is true by default, but we emphasize it. config.disable_upload_compaction = false; - config.disable_download_compaction = false; ClientServerFixture fixture(dir, test_context, std::move(config)); fixture.start(); diff --git a/test/test_transform.cpp b/test/test_transform.cpp index 9cb07fa8fdc..1551e708f60 100644 --- a/test/test_transform.cpp +++ b/test/test_transform.cpp @@ -1562,12 +1562,6 @@ TEST(Transform_CreateEraseCreateSequencePreservesObject) auto client_1 = &*it.clients[0]; auto client_2 = &*it.clients[1]; - // Disable history compaction to be certain that create-erase-create - // cycles are not eliminated. - server->history.set_disable_compaction(true); - client_1->history.set_disable_compaction(true); - client_2->history.set_disable_compaction(true); - // Create baseline client_1->transaction([&](Peer& c) { auto table = c.group->add_table_with_primary_key("class_table", type_Int, "pk"); From 337a9c05c7aeb7d55941a3b6b22ce1ddf1eff476 Mon Sep 17 00:00:00 2001 From: Michael Wilkerson-Barker Date: Wed, 24 Jul 2024 09:12:17 -0400 Subject: [PATCH 03/15] Fix intermittent failure of the role change client reset tests (#7919) --- test/object-store/sync/flx_role_change.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/object-store/sync/flx_role_change.cpp b/test/object-store/sync/flx_role_change.cpp index f8321ed3c81..c6e7a8b3d69 100644 --- a/test/object-store/sync/flx_role_change.cpp +++ b/test/object-store/sync/flx_role_change.cpp @@ -957,7 +957,10 @@ TEST_CASE("flx: role changes during client resets complete successfully", const SyncClientHookData& data) { bool is_fresh_path; auto session = weak_session.lock(); - REQUIRE(session); // Should always be valid + if (!session) { + // Session is being closed while this function was called + return SyncClientHookAction::NoAction; + } is_fresh_path = _impl::client_reset::is_fresh_path(session->path()); client_reset_state.transition_with( From 3339bbb8c84cc2ddcf66af7792f40bceeb8d13d2 Mon Sep 17 00:00:00 2001 From: Gagik Amaryan Date: Wed, 24 Jul 2024 21:32:18 +0200 Subject: [PATCH 04/15] RCORE-2200: Fix switch_user and get_profile during log_in_with_credentials (#7894) --- CHANGELOG.md | 2 +- src/realm/object-store/sync/app.cpp | 10 ++++++++-- test/object-store/sync/app.cpp | 15 +++++++++++++++ 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 651aebfdc67..7d8e6d20a6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ ### Fixed * ([#????](https://github.com/realm/realm-core/issues/????), since v?.?.?) -* None. +* App subscription callback was getting fired before the user profile was retrieved on login, leading to an empty user profile when using the callback. ([#7889](https://github.com/realm/realm-core/issues/7889), since v14.7.0) ### Breaking changes * The websocket error codes `websocket_client_too_old`, `websocket_client_too_new`, and `websocket_protocol_mismatch` along with their C API constants were removed. These corresponded to errors the legacy C++ server could have sent, but the baas sync server never did. Any platform networking implementations that surfaced these errors can report a `websocket_fatal_error` instead if an unknown error occurs during the websocket handshake. If a client connects that is too old or too new, it will finish the websocket handshake and then receive an in-band sync `ERROR` message that will be handled by the sync error handler. [PR #7917](https://github.com/realm/realm-core/pull/7917) diff --git a/src/realm/object-store/sync/app.cpp b/src/realm/object-store/sync/app.cpp index ebb121d2861..2b2a8b0fca0 100644 --- a/src/realm/object-store/sync/app.cpp +++ b/src/realm/object-store/sync/app.cpp @@ -835,8 +835,14 @@ void App::log_in_with_credentials(const AppCredentials& credentials, const std:: return completion(nullptr, AppError(ErrorCodes::BadToken, "Could not log in user: received malformed JWT")); } - switch_user(user); - get_profile(user, std::move(completion)); + + get_profile(user, [this, completion = std::move(completion)](const std::shared_ptr& user, + Optional error) { + if (!error) { + switch_user(user); + } + completion(user, error); + }); }, false); } diff --git a/test/object-store/sync/app.cpp b/test/object-store/sync/app.cpp index 680ae6742b8..36811ac317d 100644 --- a/test/object-store/sync/app.cpp +++ b/test/object-store/sync/app.cpp @@ -4172,6 +4172,17 @@ TEST_CASE("app: jwt login and metadata tests", "[sync][app][user][metadata][func SECTION("jwt happy path") { bool processed = false; + bool logged_in_once = false; + + auto token = app->subscribe([&logged_in_once, &app](auto&) { + REQUIRE(!logged_in_once); + auto user = app->current_user(); + auto metadata = user->user_profile(); + + // Ensure that the JWT metadata fields are available when the callback is fired on login. + CHECK(metadata["name"] == "Foo Bar"); + logged_in_once = true; + }); std::shared_ptr user = log_in(app, AppCredentials::custom(jwt)); @@ -4192,6 +4203,10 @@ TEST_CASE("app: jwt login and metadata tests", "[sync][app][user][metadata][func auto custom_data = *user->custom_data(); CHECK(custom_data["name"] == "Not Foo Bar"); CHECK(metadata["name"] == "Foo Bar"); + + REQUIRE(logged_in_once); + + app->unsubscribe(token); } } From dea51e1e9b2219689cf27b44a0206135b984f9cf Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 25 Jul 2024 11:27:24 +0200 Subject: [PATCH 05/15] Prepare for release 14.11.1 (#7924) Co-authored-by: nicola-cab <1497069+nicola-cab@users.noreply.github.com> --- CHANGELOG.md | 4 +--- Package.swift | 2 +- dependencies.yml | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d8e6d20a6d..64c23c5c904 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,9 @@ -# NEXT RELEASE +# 14.11.1 Release notes ### Enhancements -* (PR [#????](https://github.com/realm/realm-core/pull/????)) * None. ### Fixed -* ([#????](https://github.com/realm/realm-core/issues/????), since v?.?.?) * App subscription callback was getting fired before the user profile was retrieved on login, leading to an empty user profile when using the callback. ([#7889](https://github.com/realm/realm-core/issues/7889), since v14.7.0) ### Breaking changes diff --git a/Package.swift b/Package.swift index 0be83435842..13f36481e45 100644 --- a/Package.swift +++ b/Package.swift @@ -3,7 +3,7 @@ import PackageDescription import Foundation -let versionStr = "14.11.0" +let versionStr = "14.11.1" let versionPieces = versionStr.split(separator: "-") let versionCompontents = versionPieces[0].split(separator: ".") let versionExtra = versionPieces.count > 1 ? versionPieces[1] : "" diff --git a/dependencies.yml b/dependencies.yml index 8b064ac979a..e9f49bfa898 100644 --- a/dependencies.yml +++ b/dependencies.yml @@ -1,5 +1,5 @@ PACKAGE_NAME: realm-core -VERSION: 14.11.0 +VERSION: 14.11.1 OPENSSL_VERSION: 3.2.0 ZLIB_VERSION: 1.2.13 # https://github.com/10gen/baas/commits From 0546ccb5e9c41501b7f211afb6f849eef9360452 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 25 Jul 2024 11:28:58 +0200 Subject: [PATCH 06/15] New changelog section to prepare for vNext (#7925) Co-authored-by: nicola-cab <1497069+nicola-cab@users.noreply.github.com> --- CHANGELOG.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64c23c5c904..c2824f7605a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,26 @@ +# NEXT RELEASE + +### Enhancements +* (PR [#????](https://github.com/realm/realm-core/pull/????)) +* None. + +### Fixed +* ([#????](https://github.com/realm/realm-core/issues/????), since v?.?.?) +* None. + +### Breaking changes +* None. + +### Compatibility +* Fileformat: Generates files with format v24. Reads and automatically upgrade from fileformat v10. If you want to upgrade from an earlier file format version you will have to use RealmCore v13.x.y or earlier. + +----------- + +### Internals +* None. + +---------------------------------------------- + # 14.11.1 Release notes ### Enhancements From 00208931f58ddd207c275ac0762b2dcf1a2ebf9b Mon Sep 17 00:00:00 2001 From: Jonathan Reams Date: Thu, 25 Jul 2024 11:27:19 -0400 Subject: [PATCH 07/15] RCORE-2187 Prefix sync connection log messages with co_id when possible (#7849) --- CHANGELOG.md | 3 +- src/realm/sync/client.cpp | 12 +++-- src/realm/sync/noinst/client_impl_base.cpp | 53 ++++++++++++---------- src/realm/sync/noinst/client_impl_base.hpp | 36 ++++++++++----- src/realm/util/logger.hpp | 5 ++ test/object-store/sync/app.cpp | 45 ++++++++++++++++++ test/object-store/util/test_file.cpp | 8 +++- test/object-store/util/test_file.hpp | 3 +- 8 files changed, 120 insertions(+), 45 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c2824f7605a..816292cb152 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,7 @@ # NEXT RELEASE ### Enhancements -* (PR [#????](https://github.com/realm/realm-core/pull/????)) -* None. +* Sync log statements now include the app services connection id in their prefix (e.g `Connection[1:] Session[1]: log message`) to make correlating sync activity to server logs easier during troubleshooting ((PR #7849)[https://github.com/realm/realm-core/pull/7849]). ### Fixed * ([#????](https://github.com/realm/realm-core/issues/????), since v?.?.?) diff --git a/src/realm/sync/client.cpp b/src/realm/sync/client.cpp index 52da5919151..bde98970c35 100644 --- a/src/realm/sync/client.cpp +++ b/src/realm/sync/client.cpp @@ -1832,9 +1832,7 @@ ClientImpl::Connection::Connection(ClientImpl& client, connection_ident_type ide Optional ssl_trust_certificate_path, std::function ssl_verify_callback, Optional proxy_config, ReconnectInfo reconnect_info) - : logger_ptr{std::make_shared(util::LogCategory::session, make_logger_prefix(ident), - client.logger_ptr)} // Throws - , logger{*logger_ptr} + : logger{make_logger(ident, std::nullopt, client.logger.base_logger)} // Throws , m_client{client} , m_verify_servers_ssl_certificate{verify_servers_ssl_certificate} // DEPRECATED , m_ssl_trust_certificate_path{std::move(ssl_trust_certificate_path)} // DEPRECATED @@ -1911,9 +1909,13 @@ std::string ClientImpl::Connection::get_http_request_path() const } -std::string ClientImpl::Connection::make_logger_prefix(connection_ident_type ident) +std::shared_ptr ClientImpl::Connection::make_logger(connection_ident_type ident, + std::optional coid, + std::shared_ptr base_logger) { - return util::format("Connection[%1] ", ident); + std::string prefix = + coid ? util::format("Connection[%1:%2] ", ident, *coid) : util::format("Connection[%1] ", ident); + return std::make_shared(util::LogCategory::session, std::move(prefix), base_logger); } diff --git a/src/realm/sync/noinst/client_impl_base.cpp b/src/realm/sync/noinst/client_impl_base.cpp index 89031d3ccdd..eab65ec8710 100644 --- a/src/realm/sync/noinst/client_impl_base.cpp +++ b/src/realm/sync/noinst/client_impl_base.cpp @@ -137,8 +137,7 @@ bool ClientImpl::decompose_server_url(const std::string& url, ProtocolEnvelope& } ClientImpl::ClientImpl(ClientConfig config) - : logger_ptr{std::make_shared(util::LogCategory::session, std::move(config.logger))} - , logger{*logger_ptr} + : logger(std::make_shared(util::LogCategory::session, std::move(config.logger))) , m_reconnect_mode{config.reconnect_mode} , m_connect_timeout{config.connect_timeout} , m_connection_linger_time{config.one_connection_per_session ? 0 : config.connection_linger_time} @@ -1225,6 +1224,14 @@ void Connection::disconnect(const SessionErrorInfo& info) m_sessions_enlisted_to_send.clear(); m_sending = false; + if (!m_appservices_coid.empty()) { + m_appservices_coid.clear(); + logger.base_logger = make_logger(m_ident, std::nullopt, get_client().logger.base_logger); + for (auto& [ident, sess] : m_sessions) { + sess->logger.base_logger = Session::make_logger(ident, logger.base_logger); + } + } + report_connection_state_change(ConnectionState::disconnected, info); // Throws initiate_reconnect_wait(); // Throws } @@ -1438,36 +1445,35 @@ void Connection::receive_test_command_response(session_ident_type session_ident, void Connection::receive_server_log_message(session_ident_type session_ident, util::Logger::Level level, std::string_view message) { - std::string prefix; - if (REALM_LIKELY(!m_appservices_coid.empty())) { - prefix = util::format("Server[%1]", m_appservices_coid); - } - else { - prefix = "Server"; - } - if (session_ident != 0) { if (auto sess = get_session(session_ident)) { - sess->logger.log(LogCategory::session, level, "%1 log: %2", prefix, message); + sess->logger.log(LogCategory::session, level, "Server log: %1", message); return; } - logger.log(util::LogCategory::session, level, "%1 log for unknown session %2: %3", prefix, session_ident, + logger.log(util::LogCategory::session, level, "Server log for unknown session %1: %2", session_ident, message); return; } - logger.log(level, "%1 log: %2", prefix, message); + logger.log(level, "Server log: %1", message); } void Connection::receive_appservices_request_id(std::string_view coid) { - // Only set once per connection - if (!coid.empty() && m_appservices_coid.empty()) { - m_appservices_coid = coid; - logger.log(util::LogCategory::session, util::LogCategory::Level::info, - "Connected to app services with request id: \"%1\"", m_appservices_coid); + if (coid.empty() || !m_appservices_coid.empty()) { + return; + } + m_appservices_coid = coid; + logger.log(util::LogCategory::session, util::LogCategory::Level::info, + "Connected to app services with request id: \"%1\". Further log entries for this connection will be " + "prefixed with \"Connection[%2:%1]\" instead of \"Connection[%2]\"", + m_appservices_coid, m_ident); + logger.base_logger = make_logger(m_ident, m_appservices_coid, get_client().logger.base_logger); + + for (auto& [ident, sess] : m_sessions) { + sess->logger.base_logger = Session::make_logger(ident, logger.base_logger); } } @@ -1667,15 +1673,14 @@ Session::~Session() } -std::string Session::make_logger_prefix(session_ident_type ident) +std::shared_ptr Session::make_logger(session_ident_type ident, + std::shared_ptr base_logger) { - std::ostringstream out; - out.imbue(std::locale::classic()); - out << "Session[" << ident << "]: "; // Throws - return out.str(); // Throws + auto prefix = util::format("Session[%1]: ", ident); + return std::make_shared(util::LogCategory::session, std::move(prefix), + std::move(base_logger)); } - void Session::activate() { REALM_ASSERT_EX(m_state == Unactivated, m_state); diff --git a/src/realm/sync/noinst/client_impl_base.hpp b/src/realm/sync/noinst/client_impl_base.hpp index c7e9717f302..ab839856ef7 100644 --- a/src/realm/sync/noinst/client_impl_base.hpp +++ b/src/realm/sync/noinst/client_impl_base.hpp @@ -195,8 +195,23 @@ class ClientImpl { static constexpr milliseconds_type default_pong_keepalive_timeout = 120000; // 2 minutes static constexpr milliseconds_type default_fast_reconnect_limit = 60000; // 1 minute - const std::shared_ptr logger_ptr; - util::Logger& logger; + class ForwardingLogger : public util::Logger { + public: + ForwardingLogger(std::shared_ptr logger) + : Logger(logger->get_category(), *logger) + , base_logger(std::move(logger)) + { + } + + void do_log(const util::LogCategory& cat, Level level, const std::string& msg) final + { + Logger::do_log(*base_logger, cat, level, msg); + } + + std::shared_ptr base_logger; + }; + + ForwardingLogger logger; ClientImpl(ClientConfig); ~ClientImpl(); @@ -391,8 +406,7 @@ class ClientImpl::Connection { using ReconnectInfo = ClientImpl::ReconnectInfo; using DownloadMessage = ClientProtocol::DownloadMessage; - std::shared_ptr logger_ptr; - util::Logger& logger; + ForwardingLogger logger; ClientImpl& get_client() noexcept; ReconnectInfo get_reconnect_info() const noexcept; @@ -573,7 +587,9 @@ class ClientImpl::Connection { Session* find_and_validate_session(session_ident_type session_ident, std::string_view message) noexcept; static bool was_voluntary(ConnectionTerminationReason) noexcept; - static std::string make_logger_prefix(connection_ident_type); + static std::shared_ptr make_logger(connection_ident_type ident, + std::optional coid, + std::shared_ptr base_logger); void report_connection_state_change(ConnectionState, util::Optional error_info = util::none); @@ -693,7 +709,6 @@ class ClientImpl::Connection { std::string m_signed_access_token; }; - /// A synchronization session between a local and a remote Realm file. /// /// All use of session objects, including construction and destruction, must @@ -703,8 +718,7 @@ class ClientImpl::Session { using ReceivedChangesets = ClientProtocol::ReceivedChangesets; using DownloadMessage = ClientProtocol::DownloadMessage; - std::shared_ptr logger_ptr; - util::Logger& logger; + ForwardingLogger logger; ClientImpl& get_client() noexcept; Connection& get_connection() noexcept; @@ -1058,7 +1072,7 @@ class ClientImpl::Session { request_ident_type m_last_pending_test_command_ident = 0; std::list m_pending_test_commands; - static std::string make_logger_prefix(session_ident_type); + static std::shared_ptr make_logger(session_ident_type, std::shared_ptr base_logger); Session(SessionWrapper& wrapper, Connection&, session_ident_type); @@ -1341,9 +1355,7 @@ inline ClientImpl::Session::Session(SessionWrapper& wrapper, Connection& conn) } inline ClientImpl::Session::Session(SessionWrapper& wrapper, Connection& conn, session_ident_type ident) - : logger_ptr{std::make_shared(util::LogCategory::session, make_logger_prefix(ident), - conn.logger_ptr)} // Throws - , logger{*logger_ptr} + : logger{make_logger(ident, conn.logger.base_logger)} // Throws , m_conn{conn} , m_ident{ident} , m_try_again_delay_info(conn.get_client().m_reconnect_backoff_info, conn.get_client().get_random()) diff --git a/src/realm/util/logger.hpp b/src/realm/util/logger.hpp index 16bd79e6fff..b8a82065ce4 100644 --- a/src/realm/util/logger.hpp +++ b/src/realm/util/logger.hpp @@ -245,6 +245,11 @@ class Logger { set_level_threshold(m_category, level); } + const LogCategory& get_category() const noexcept + { + return m_category; + } + // Set threshold level for the specific category void set_level_threshold(std::string_view cat_name, Level level) noexcept { diff --git a/test/object-store/sync/app.cpp b/test/object-store/sync/app.cpp index 36811ac317d..a1e04c733c0 100644 --- a/test/object-store/sync/app.cpp +++ b/test/object-store/sync/app.cpp @@ -3282,6 +3282,51 @@ TEST_CASE("app: sync integration", "[sync][pbs][app][baas]") { } } +TEST_CASE("app: sync logs contain baas coid", "[sync][app][baas]") { + class InMemoryLogger : public util::Logger { + public: + void do_log(const util::LogCategory& cat, Level level, const std::string& msg) final + { + auto formatted_line = util::format("%1 %2 %3", cat.get_name(), level, msg); + std::lock_guard lk(mtx); + log_messages.emplace_back(std::move(formatted_line)); + } + + std::vector get_log_messages() + { + std::lock_guard lk(mtx); + std::vector ret; + std::swap(ret, log_messages); + return ret; + } + + std::mutex mtx; + std::vector log_messages; + }; + + auto in_mem_logger = std::make_shared(); + TestAppSession app_session(get_runtime_app_session(), nullptr, DeleteApp{false}, ReconnectMode::normal, nullptr, + in_mem_logger); + + const auto partition = random_string(100); + SyncTestFile config(app_session.app()->current_user(), partition, util::none); + auto realm = successfully_async_open_realm(config); + auto sync_session = realm->sync_session(); + auto coid = SyncSession::OnlyForTesting::get_appservices_connection_id(*sync_session); + + auto transition_log_msg = + util::format("Connection[1] Connected to app services with request id: \"%1\". Further log entries for this " + "connection will be prefixed with \"Connection[1:%1]\" instead of \"Connection[1]\"", + coid); + auto bind_send_msg = util::format("Connection[1:%1] Session[1]: Sending: BIND", coid); + auto ping_send_msg = util::format("Connection[1:%1] Will emit a ping in", coid); + + auto log_messages = in_mem_logger->get_log_messages(); + REQUIRE_THAT(log_messages, AnyMatch(ContainsSubstring(transition_log_msg))); + REQUIRE_THAT(log_messages, AnyMatch(ContainsSubstring(bind_send_msg))); + REQUIRE_THAT(log_messages, AnyMatch(ContainsSubstring(ping_send_msg))); +} + TEST_CASE("app: trailing slash in base url", "[sync][app]") { auto logger = util::Logger::get_default_logger(); diff --git a/test/object-store/util/test_file.cpp b/test/object-store/util/test_file.cpp index ef2496be9e5..e4f3c372e5a 100644 --- a/test/object-store/util/test_file.cpp +++ b/test/object-store/util/test_file.cpp @@ -336,7 +336,8 @@ TestAppSession::TestAppSession() TestAppSession::TestAppSession(AppSession session, std::shared_ptr custom_transport, DeleteApp delete_app, ReconnectMode reconnect_mode, - std::shared_ptr custom_socket_provider) + std::shared_ptr custom_socket_provider, + std::shared_ptr logger) : m_app_session(std::make_unique(session)) , m_base_file_path(util::make_temp_dir() + random_string(10)) , m_delete_app(delete_app) @@ -356,6 +357,11 @@ TestAppSession::TestAppSession(AppSession session, // connection is kept open for reuse. In tests, we want to shut // down sync clients immediately. app_config.sync_client_config.timeouts.connection_linger_time = 0; + if (logger) { + app_config.sync_client_config.logger_factory = [logger](util::Logger::Level) { + return logger; + }; + } m_app = app::App::get_app(app::App::CacheMode::Disabled, app_config); diff --git a/test/object-store/util/test_file.hpp b/test/object-store/util/test_file.hpp index 93c80b13d91..97d4286dfb4 100644 --- a/test/object-store/util/test_file.hpp +++ b/test/object-store/util/test_file.hpp @@ -326,7 +326,8 @@ class TestAppSession { TestAppSession(); TestAppSession(realm::AppSession, std::shared_ptr = nullptr, DeleteApp = true, realm::ReconnectMode reconnect_mode = realm::ReconnectMode::normal, - std::shared_ptr custom_socket_provider = nullptr); + std::shared_ptr custom_socket_provider = nullptr, + std::shared_ptr logger = nullptr); ~TestAppSession(); std::shared_ptr app() const noexcept From bdf50c1e15d340dbb51429e87ddb1ce4993a9e58 Mon Sep 17 00:00:00 2001 From: Jonathan Reams Date: Fri, 26 Jul 2024 16:18:37 -0400 Subject: [PATCH 08/15] RCORE-2171 Add testing mode where baas integration tests always start out talking to the wrong base url (#7813) --- evergreen/config.yml | 30 ++ test/object-store/sync/app.cpp | 2 +- .../object-store/util/sync/baas_admin_api.cpp | 48 ++- .../object-store/util/sync/baas_admin_api.hpp | 8 + .../util/sync/redirect_server.hpp | 282 ++++++++++++++++++ 5 files changed, 367 insertions(+), 3 deletions(-) create mode 100644 test/object-store/util/sync/redirect_server.hpp diff --git a/evergreen/config.yml b/evergreen/config.yml index bc727f6ebd2..abdc37706ff 100644 --- a/evergreen/config.yml +++ b/evergreen/config.yml @@ -320,6 +320,10 @@ functions: export TSAN_OPTIONS="suppressions=$(pwd)/test/tsan.suppress history_size=4 $TSAN_OPTIONS" export UBSAN_OPTIONS="print_stacktrace=1" + if [[ -n "${enable_baas_redirector}" ]]; then + export ENABLE_BAAS_REDIRECTOR=On + fi + cd build if ! "$CTEST" -C ${cmake_build_type|Debug} $TEST_FLAGS; then BAAS_PID=$(pgrep baas_server) @@ -1263,6 +1267,20 @@ task_groups: - compile - ".test_suite !.requires_baas" +# Runs only the baas integration tests +- name: only_baas_integration_tests + max_hosts: 1 + setup_group_can_fail_task: true + setup_group: + - func: "fetch source" + - func: "fetch binaries" + teardown_task: + - func: "upload test results" + timeout: + - func: "run hang analyzer" + tasks: + - ".requires_baas" + # Runs object-store-tests against baas running on remote host - name: compile_test max_hosts: 1 @@ -1418,6 +1436,17 @@ buildvariants: tasks: - name: compile_test +- name: ubuntu-with-redirects + display_name: "Ubuntu (Baas Redirector Enabled)" + run_on: ubuntu2204-arm64-large + expansions: + fetch_missing_dependencies: On + c_compiler: "/opt/clang+llvm/bin/clang" + cxx_compiler: "/opt/clang+llvm/bin/clang++" + enable_baas_redirector: On + tasks: + - name: only_baas_integration_tests + - name: ubuntu-no-geospatial display_name: "Ubuntu (Geospatial Disabled)" run_on: ubuntu2204-arm64-large @@ -1850,6 +1879,7 @@ buildvariants: cxx_compiler: "./clang_binaries/bin/clang++" enable_llvm_coverage: On coveralls_flag_name: "ubuntu-x86_64" + enable_baas_redirector: On grcov_url: "https://s3.amazonaws.com/static.realm.io/evergreen-assets/grcov-x86_64-unknown-linux-gnu.tar.bz2" tasks: - name: compile_test_coverage diff --git a/test/object-store/sync/app.cpp b/test/object-store/sync/app.cpp index a1e04c733c0..9820c626e26 100644 --- a/test/object-store/sync/app.cpp +++ b/test/object-store/sync/app.cpp @@ -737,7 +737,7 @@ TEST_CASE("app: login_with_credentials integration", "[sync][app][user][baas]") // MARK: - UsernamePasswordProviderClient Tests TEST_CASE("app: UsernamePasswordProviderClient integration", "[sync][app][user][baas]") { - const std::string base_url = get_base_url(); + const std::string base_url = get_real_base_url(); AutoVerifiedEmailCredentials creds; auto email = creds.email; auto password = creds.password; diff --git a/test/object-store/util/sync/baas_admin_api.cpp b/test/object-store/util/sync/baas_admin_api.cpp index 8026f1ef944..6306f279f12 100644 --- a/test/object-store/util/sync/baas_admin_api.cpp +++ b/test/object-store/util/sync/baas_admin_api.cpp @@ -17,6 +17,7 @@ //////////////////////////////////////////////////////////////////////////// #include +#include #include @@ -515,7 +516,13 @@ class Baasaas { util::File::remove(lock_file.get_path()); } - const std::string& http_endpoint() + const std::string admin_endpoint() + { + poll(); + return m_http_endpoint; + } + + std::string http_endpoint() { poll(); return m_http_endpoint; @@ -584,6 +591,24 @@ class Baasaas { std::string m_mongo_endpoint; }; +static std::optional& get_redirector(const std::string& base_url) +{ + static std::optional redirector; + auto redirector_enabled = [&] { + const static auto enabled_values = {"On", "on", "1"}; + auto enable_redirector = getenv_sv("ENABLE_BAAS_REDIRECTOR"); + return std::any_of(enabled_values.begin(), enabled_values.end(), [&](const auto val) { + return val == enable_redirector; + }); + }; + + if (redirector_enabled() && !redirector && !base_url.empty()) { + redirector.emplace(base_url, util::Logger::get_default_logger()); + } + + return redirector; +} + class BaasaasLauncher : public Catch::EventListenerBase { public: static std::optional& get_baasaas_holder() @@ -653,6 +678,10 @@ class BaasaasLauncher : public Catch::EventListenerBase { void testRunEnded(Catch::TestRunStats const&) override { + if (auto& redirector = get_redirector({})) { + redirector = std::nullopt; + } + if (auto& baasaas_holder = get_baasaas_holder()) { baasaas_holder->stop(); } @@ -1256,6 +1285,17 @@ realm::Schema get_default_schema() } std::string get_base_url() +{ + auto base_url = get_real_base_url(); + + auto& redirector = get_redirector(base_url); + if (redirector) { + return redirector->base_url(); + } + return base_url; +} + +std::string get_real_base_url() { if (auto baas_url = getenv_sv("BAAS_BASE_URL"); !baas_url.empty()) { return std::string{baas_url}; @@ -1267,6 +1307,7 @@ std::string get_base_url() return get_compile_time_base_url(); } + std::string get_admin_url() { if (auto baas_admin_url = getenv_sv("BAAS_ADMIN_URL"); !baas_admin_url.empty()) { @@ -1275,8 +1316,11 @@ std::string get_admin_url() if (auto compile_url = get_compile_time_admin_url(); !compile_url.empty()) { return compile_url; } + if (auto& baasaas_holder = BaasaasLauncher::get_baasaas_holder(); baasaas_holder.has_value()) { + return baasaas_holder->admin_endpoint(); + } - return get_base_url(); + return get_real_base_url(); } std::string get_mongodb_server() diff --git a/test/object-store/util/sync/baas_admin_api.hpp b/test/object-store/util/sync/baas_admin_api.hpp index 554e37d0c29..f1d075323ea 100644 --- a/test/object-store/util/sync/baas_admin_api.hpp +++ b/test/object-store/util/sync/baas_admin_api.hpp @@ -271,7 +271,15 @@ AppSession create_app(const AppCreateConfig& config); AppSession get_runtime_app_session(); std::string get_mongodb_server(); +// Returns the base url tests should connect to via realm to test functionality. +// This endpoint may redirect you or may inject errors. std::string get_base_url(); +// Returns the base url of a real baas server after any redirects that may have been +// injected by the test harness. Use this to validate you're talking to the right +// URL in tests. +std::string get_real_base_url(); +// Returns the base url of the admin API. Use this to create/administer apps that +// your test is talking to. std::string get_admin_url(); template diff --git a/test/object-store/util/sync/redirect_server.hpp b/test/object-store/util/sync/redirect_server.hpp new file mode 100644 index 00000000000..6c3adbccf60 --- /dev/null +++ b/test/object-store/util/sync/redirect_server.hpp @@ -0,0 +1,282 @@ +//////////////////////////////////////////////////////////////////////////// +// +// Copyright 2024 Realm Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//////////////////////////////////////////////////////////////////////////// + +#pragma once + +#if defined(REALM_ENABLE_SYNC) && defined(REALM_ENABLE_AUTH_TESTS) + +#include +#include +#include +#include +#include +#include + +#include + +#include + +namespace realm::sync { + +class RedirectingHttpServer { +public: + RedirectingHttpServer(std::string redirect_to_base_url, std::shared_ptr logger) + : m_redirect_to_base_url(std::move(redirect_to_base_url)) + , m_logger(std::make_shared("HTTP Redirector ", std::move(logger))) + , m_acceptor(m_service) + , m_server_thread([this] { + m_service.run_until_stopped(); + }) + { + m_acceptor.open(m_endpoint.protocol()); + m_acceptor.bind(m_endpoint); + m_endpoint = m_acceptor.local_endpoint(); + m_acceptor.listen(); + m_service.post([this](Status status) { + REALM_ASSERT(status.is_ok()); + do_accept(); + }); + } + + ~RedirectingHttpServer() + { + m_acceptor.cancel(); + m_service.stop(); + m_server_thread.join(); + } + + std::string base_url() const + { + return util::format("http://localhost:%1", m_endpoint.port()); + } + +private: + struct BufferedSocket : network::Socket { + BufferedSocket(network::Service& service) + : network::Socket(service) + { + } + + BufferedSocket(network::Service& service, const network::StreamProtocol& protocol, + native_handle_type native_handle) + : network::Socket(service, protocol, native_handle) + { + } + + + template + void async_read_until(char* buffer, std::size_t size, char delim, H handler) + { + network::Socket::async_read_until(buffer, size, delim, m_read_buffer, std::move(handler)); + } + + template + void async_read(char* buffer, std::size_t size, H handler) + { + network::Socket::async_read(buffer, size, m_read_buffer, std::move(handler)); + } + + private: + network::ReadAheadBuffer m_read_buffer; + }; + + struct Conn : public util::RefCountBase, websocket::Config { + Conn(network::Service& service, const std::shared_ptr& logger) + : random(Catch::getSeed()) + , logger(logger) + , socket(service) + , http_server(socket, logger) + { + } + + // Implement the websocket::Config interface + const std::shared_ptr& websocket_get_logger() noexcept override + { + return logger; + } + + std::mt19937_64& websocket_get_random() noexcept override + { + return random; + } + + void async_write(const char* data, size_t size, websocket::WriteCompletionHandler handler) override + { + socket.async_write(data, size, std::move(handler)); + } + + void async_read(char* buffer, size_t size, websocket::ReadCompletionHandler handler) override + { + socket.async_read(buffer, size, std::move(handler)); + } + + void async_read_until(char* buffer, size_t size, char delim, + websocket::ReadCompletionHandler handler) override + { + socket.async_read_until(buffer, size, delim, std::move(handler)); + } + + void websocket_handshake_completion_handler(const HTTPHeaders&) override {} + + void websocket_read_error_handler(std::error_code) override {} + + void websocket_write_error_handler(std::error_code) override {} + + void websocket_handshake_error_handler(std::error_code, const HTTPHeaders*, std::string_view) override {} + + void websocket_protocol_error_handler(std::error_code) override {} + + bool websocket_text_message_received(const char*, size_t) override + { + return false; + } + + bool websocket_binary_message_received(const char*, size_t) override + { + return false; + } + + bool websocket_close_message_received(websocket::WebSocketError, std::string_view) override + { + return false; + } + + bool websocket_ping_message_received(const char*, size_t) override + { + return false; + } + + bool websocket_pong_message_received(const char*, size_t) override + { + return false; + } + + std::mt19937_64 random; + const std::shared_ptr logger; + BufferedSocket socket; + HTTPServer http_server; + std::optional websocket; + }; + + void send_simple_response(util::bind_ptr conn, HTTPStatus status, std::string reason, std::string body) + { + m_logger->debug("sending http response %1: %2 \"%3\"", status, reason, body); + HTTPResponse resp; + resp.status = status; + resp.reason = std::move(reason); + resp.body = std::move(body); + conn->http_server.async_send_response(resp, [this, conn](std::error_code ec) { + if (ec && ec != util::error::operation_aborted) { + m_logger->warn("Error sending response: %1", ec); + } + }); + } + + void do_websocket_redirect(util::bind_ptr conn, const HTTPRequest& req) + { + auto protocol_it = req.headers.find("Sec-WebSocket-Protocol"); + REALM_ASSERT(protocol_it != req.headers.end()); + auto protocols = protocol_it->second; + + auto first_comma = protocols.find(','); + std::string protocol; + if (first_comma == std::string::npos) { + protocol = protocols; + } + else { + protocol = protocols.substr(0, first_comma); + } + std::error_code ec; + auto maybe_resp = websocket::make_http_response(req, protocol, ec); + REALM_ASSERT(maybe_resp); + REALM_ASSERT(!ec); + conn->http_server.async_send_response(*maybe_resp, [this, conn](std::error_code ec) { + if (ec) { + if (ec != util::error::operation_aborted) { + m_logger->warn("Error sending websocket HTTP upgrade response: %1", ec); + } + return; + } + + conn->websocket.emplace(*conn); + conn->websocket->initiate_server_websocket_after_handshake(); + + static const std::string_view msg("\x0f\xa3Permanently moved"); + conn->websocket->async_write_close(msg.data(), msg.size(), [conn](std::error_code, size_t) { + conn->logger->debug("Sent close frame with move code"); + conn->websocket.reset(); + }); + }); + } + + void do_accept() + { + auto conn = util::make_bind(m_service, m_logger); + m_acceptor.async_accept(conn->socket, [this, conn](std::error_code ec) { + if (ec == util::error::operation_aborted) { + return; + } + do_accept(); + if (ec) { + m_logger->error("Error accepting new connection in: %1", ec); + return; + } + + conn->http_server.async_receive_request([this, conn](HTTPRequest req, std::error_code ec) { + if (ec) { + if (ec != util::error::operation_aborted) { + m_logger->error("Error receiving HTTP request to redirect: %1", ec); + } + return; + } + + if (req.path.find("/location") != std::string::npos) { + std::string_view base_url(m_redirect_to_base_url); + auto scheme = base_url.find("://"); + auto ws_url = util::format("ws%1", base_url.substr(scheme)); + nlohmann::json body{{"deployment_model", "GLOBAL"}, + {"location", "US-VA"}, + {"hostname", m_redirect_to_base_url}, + {"ws_hostname", ws_url}}; + auto body_str = body.dump(); + + send_simple_response(conn, HTTPStatus::Ok, "Okay", std::move(body_str)); + return; + } + + if (req.path.find("/realm-sync") != std::string::npos) { + do_websocket_redirect(conn, req); + return; + } + + send_simple_response(conn, HTTPStatus::NotFound, "Not found", {}); + }); + }); + } + + const std::string m_redirect_to_base_url; + const std::shared_ptr m_logger; + network::Service m_service; + network::Acceptor m_acceptor; + network::Endpoint m_endpoint; + std::thread m_server_thread; +}; + +} // namespace realm::sync + +#endif From 8ea5c77350e108b106ae8d4fe925ba07f8527568 Mon Sep 17 00:00:00 2001 From: Daniel Tabacaru <96778637+danieltabacaru@users.noreply.github.com> Date: Sat, 27 Jul 2024 10:08:32 +0300 Subject: [PATCH 09/15] Fix asan failure in schema migration tests (#7927) --- .../sync/flx_schema_migration.cpp | 56 +++++++++++-------- 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/test/object-store/sync/flx_schema_migration.cpp b/test/object-store/sync/flx_schema_migration.cpp index c1fa090e6e6..1058cadf01c 100644 --- a/test/object-store/sync/flx_schema_migration.cpp +++ b/test/object-store/sync/flx_schema_migration.cpp @@ -272,8 +272,8 @@ TEST_CASE("Sync schema migrations don't work with sync open", "[sync][flx][flx s config.schema = schema_v1; create_schema(app_session, *config.schema, config.schema_version); - config.sync_config->on_sync_client_event_hook = [&](std::weak_ptr, - const SyncClientHookData& data) mutable { + config.sync_config->on_sync_client_event_hook = [](std::weak_ptr, + const SyncClientHookData& data) { if (data.event != SyncClientHookEvent::ErrorMessageReceived) { return SyncClientHookAction::NoAction; } @@ -411,18 +411,18 @@ TEST_CASE("Schema version mismatch between client and server", "[sync][flx][flx auto schema_migration_required = false; config.sync_config->subscription_initializer = get_subscription_initializer_callback_for_schema_v0(); config.sync_config->error_handler = nullptr; - config.sync_config->on_sync_client_event_hook = - [&schema_migration_required](std::weak_ptr, const SyncClientHookData& data) mutable { - if (data.event != SyncClientHookEvent::ErrorMessageReceived) { - return SyncClientHookAction::NoAction; - } - auto error_code = sync::ProtocolError(data.error_info->raw_error_code); - if (error_code != sync::ProtocolError::schema_version_changed) { - return SyncClientHookAction::NoAction; - } - schema_migration_required = true; + config.sync_config->on_sync_client_event_hook = [&schema_migration_required](std::weak_ptr, + const SyncClientHookData& data) { + if (data.event != SyncClientHookEvent::ErrorMessageReceived) { return SyncClientHookAction::NoAction; - }; + } + auto error_code = sync::ProtocolError(data.error_info->raw_error_code); + if (error_code != sync::ProtocolError::schema_version_changed) { + return SyncClientHookAction::NoAction; + } + schema_migration_required = true; + return SyncClientHookAction::NoAction; + }; auto status = async_open_realm(config); REQUIRE_FALSE(status.is_ok()); @@ -450,8 +450,7 @@ TEST_CASE("Fresh realm does not require schema migration", "[sync][flx][flx sche config.schema_version = 1; config.schema = schema_v1; config.sync_config->subscription_initializer = get_subscription_initializer_callback_for_schema_v1(); - config.sync_config->on_sync_client_event_hook = [](std::weak_ptr, - const SyncClientHookData& data) mutable { + config.sync_config->on_sync_client_event_hook = [](std::weak_ptr, const SyncClientHookData& data) { if (data.event != SyncClientHookEvent::ErrorMessageReceived) { return SyncClientHookAction::NoAction; } @@ -660,8 +659,12 @@ TEST_CASE("An interrupted schema migration can recover on the next session", auto pf = util::make_promise_future(); config.sync_config->subscription_initializer = get_subscription_initializer_callback_for_schema_v1(); config.sync_config->on_sync_client_event_hook = [&schema_version_changed_count, &task, - &pf](std::weak_ptr, - const SyncClientHookData& data) mutable { + &pf](std::weak_ptr weak_session, + const SyncClientHookData& data) { + auto session = weak_session.lock(); + if (!session) { + return SyncClientHookAction::NoAction; + } if (data.event != SyncClientHookEvent::ErrorMessageReceived) { return SyncClientHookAction::NoAction; } @@ -768,7 +771,11 @@ TEST_CASE("Client reset during schema migration", "[sync][flx][flx schema migrat config.sync_config->client_resync_mode = ClientResyncMode::Recover; config.sync_config->on_sync_client_event_hook = [&harness, &schema_version_changed_count, &once](std::weak_ptr weak_session, - const SyncClientHookData& data) mutable { + const SyncClientHookData& data) { + auto session = weak_session.lock(); + if (!session) { + return SyncClientHookAction::NoAction; + } if (schema_version_changed_count == 1 && data.event == SyncClientHookEvent::DownloadMessageReceived && !once) { once = true; @@ -777,8 +784,6 @@ TEST_CASE("Client reset during schema migration", "[sync][flx][flx schema migrat if (data.event != SyncClientHookEvent::ErrorMessageReceived) { return SyncClientHookAction::NoAction; } - auto session = weak_session.lock(); - REQUIRE(session); auto error_code = sync::ProtocolError(data.error_info->raw_error_code); if (error_code == sync::ProtocolError::initial_sync_not_completed) { @@ -857,8 +862,12 @@ TEST_CASE("Migrate to new schema version after migration to intermediate version auto pf = util::make_promise_future(); config.sync_config->subscription_initializer = get_subscription_initializer_callback_for_schema_v1(); config.sync_config->on_sync_client_event_hook = [&schema_version_changed_count, &task, - &pf](std::weak_ptr, - const SyncClientHookData& data) mutable { + &pf](std::weak_ptr weak_session, + const SyncClientHookData& data) { + auto session = weak_session.lock(); + if (!session) { + return SyncClientHookAction::NoAction; + } if (data.event != SyncClientHookEvent::ErrorMessageReceived) { return SyncClientHookAction::NoAction; } @@ -999,8 +1008,7 @@ TEST_CASE("Client reset and schema migration", "[sync][flx][flx schema migration config.schema = schema_v1; config.sync_config->subscription_initializer = get_subscription_initializer_callback_for_schema_v1(); config.sync_config->client_resync_mode = ClientResyncMode::Recover; - config.sync_config->on_sync_client_event_hook = [](std::weak_ptr, - const SyncClientHookData& data) mutable { + config.sync_config->on_sync_client_event_hook = [](std::weak_ptr, const SyncClientHookData& data) { if (data.event != SyncClientHookEvent::ErrorMessageReceived) { return SyncClientHookAction::NoAction; } From 14bbdf1fb80b950d758da215cfcc6263e01d3f57 Mon Sep 17 00:00:00 2001 From: Daniel Tabacaru <96778637+danieltabacaru@users.noreply.github.com> Date: Sat, 27 Jul 2024 10:10:25 +0300 Subject: [PATCH 10/15] Do not report compensating write errors multiple times (#7922) --- CHANGELOG.md | 2 +- src/realm/sync/noinst/client_impl_base.hpp | 1 + test/object-store/sync/flx_sync.cpp | 159 +++++++++++++++++++++ 3 files changed, 161 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 816292cb152..6bf2e98e1d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ ### Fixed * ([#????](https://github.com/realm/realm-core/issues/????), since v?.?.?) -* None. +* Sync client may report duplicate compensating write errors ([#7708](https://github.com/realm/realm-core/issues/7708), since v14.8.0). ### Breaking changes * None. diff --git a/src/realm/sync/noinst/client_impl_base.hpp b/src/realm/sync/noinst/client_impl_base.hpp index ab839856ef7..f9f386d52f5 100644 --- a/src/realm/sync/noinst/client_impl_base.hpp +++ b/src/realm/sync/noinst/client_impl_base.hpp @@ -1502,6 +1502,7 @@ inline void ClientImpl::Session::reset_protocol_state() noexcept m_error_message_received = false; m_unbound_message_received = false; m_client_error = util::none; + m_pending_compensating_write_errors.clear(); m_upload_progress = m_progress.upload; m_last_download_mark_sent = m_last_download_mark_received; diff --git a/test/object-store/sync/flx_sync.cpp b/test/object-store/sync/flx_sync.cpp index c377dc77e22..b271b32b577 100644 --- a/test/object-store/sync/flx_sync.cpp +++ b/test/object-store/sync/flx_sync.cpp @@ -4202,6 +4202,165 @@ TEST_CASE("flx: compensating write errors get re-sent across sessions", "[sync][ REQUIRE(top_level_table->is_empty()); } +TEST_CASE("flx: compensating write errors are not duplicated", "[sync][flx][compensating write][baas]") { + FLXSyncTestHarness harness("flx_compensating_writes"); + auto config = harness.make_test_file(); + + auto test_obj_id_1 = ObjectId::gen(); + auto test_obj_id_2 = ObjectId::gen(); + + enum class TestState { Start, FirstError, SecondError, Resume, ThirdError, FourthError }; + TestingStateMachine state(TestState::Start); + + std::mutex errors_mutex; + std::vector> error_to_download_version; + std::vector compensating_writes; + sync::version_type download_version; + + config.sync_config->on_sync_client_event_hook = [&](std::weak_ptr weak_session, + const SyncClientHookData& data) { + if (auto session = weak_session.lock(); !session) { + return SyncClientHookAction::NoAction; + } + SyncClientHookAction action = SyncClientHookAction::NoAction; + state.transition_with([&](TestState cur_state) -> std::optional { + if (data.event != SyncClientHookEvent::ErrorMessageReceived) { + // Before the session is resumed, ignore the download messages received + // to undo the out-of-view writes. + if (data.event == SyncClientHookEvent::DownloadMessageReceived && + (cur_state == TestState::FirstError || cur_state == TestState::SecondError)) { + action = SyncClientHookAction::EarlyReturn; + } + else if (data.event == SyncClientHookEvent::DownloadMessageReceived && + (cur_state == TestState::Resume || cur_state == TestState::ThirdError)) { + download_version = data.progress.download.server_version; + } + else if (data.event == SyncClientHookEvent::BindMessageSent && cur_state == TestState::SecondError) { + return TestState::Resume; + } + return std::nullopt; + } + + auto error_code = sync::ProtocolError(data.error_info->raw_error_code); + if (error_code == sync::ProtocolError::initial_sync_not_completed) { + return std::nullopt; + } + + REQUIRE(error_code == sync::ProtocolError::compensating_write); + REQUIRE_FALSE(data.error_info->compensating_writes.empty()); + + if (cur_state == TestState::Start) { + return TestState::FirstError; + } + else if (cur_state == TestState::FirstError) { + // Return early so the second compensating write error is not saved + // by the sync client. + // This is so server versions received to undo the out-of-view writes are + // [x, x, y] instead of [x, y, x, y] (server versions don't increase + // monotonically as the client expects). + action = SyncClientHookAction::EarlyReturn; + return TestState::SecondError; + } + // Save third and fourth compensating write errors after resume. + std::lock_guard lk(errors_mutex); + for (const auto& compensating_write : data.error_info->compensating_writes) { + error_to_download_version.emplace_back(compensating_write.primary_key.get_object_id(), + *data.error_info->compensating_write_server_version); + } + return std::nullopt; + }); + return action; + }; + + config.sync_config->error_handler = [&](std::shared_ptr, SyncError error) { + std::unique_lock lk(errors_mutex); + REQUIRE(error.status == ErrorCodes::SyncCompensatingWrite); + for (const auto& compensating_write : error.compensating_writes_info) { + auto tracked_error = std::find_if(error_to_download_version.begin(), error_to_download_version.end(), + [&](const auto& pair) { + return pair.first == compensating_write.primary_key.get_object_id(); + }); + REQUIRE(tracked_error != error_to_download_version.end()); + CHECK(tracked_error->second <= download_version); + compensating_writes.push_back(compensating_write); + } + lk.unlock(); + + state.transition_with([&](TestState cur_state) -> std::optional { + if (cur_state == TestState::Resume) { + return TestState::ThirdError; + } + else if (cur_state == TestState::ThirdError) { + return TestState::FourthError; + } + return std::nullopt; + }); + }; + + auto realm = Realm::get_shared_realm(config); + auto table = realm->read_group().get_table("class_TopLevel"); + auto queryable_str_field = table->get_column_key("queryable_str_field"); + auto new_query = realm->get_latest_subscription_set().make_mutable_copy(); + new_query.insert_or_assign(Query(table).equal(queryable_str_field, "bizz")); + std::move(new_query).commit(); + + wait_for_upload(*realm); + wait_for_download(*realm); + + CppContext c(realm); + realm->begin_transaction(); + Object::create(c, realm, "TopLevel", + util::Any(AnyDict{ + {"_id", test_obj_id_1}, + {"queryable_str_field", std::string{"foo"}}, + })); + realm->commit_transaction(); + + realm->begin_transaction(); + Object::create(c, realm, "TopLevel", + util::Any(AnyDict{ + {"_id", test_obj_id_2}, + {"queryable_str_field", std::string{"baz"}}, + })); + realm->commit_transaction(); + state.wait_for(TestState::SecondError); + + nlohmann::json error_body = { + {"tryAgain", true}, {"message", "fake error"}, + {"shouldClientReset", false}, {"isRecoveryModeDisabled", false}, + {"action", "Transient"}, + }; + nlohmann::json test_command = {{"command", "ECHO_ERROR"}, + {"args", nlohmann::json{{"errorCode", 229}, {"errorBody", error_body}}}}; + + // Trigger a retryable transient error to resume the session. + auto test_cmd_res = + wait_for_future(SyncSession::OnlyForTesting::send_test_command(*realm->sync_session(), test_command.dump())) + .get(); + CHECK(test_cmd_res == "{}"); + state.wait_for(TestState::FourthError); + + REQUIRE(compensating_writes.size() == 2); + auto& write_info = compensating_writes[0]; + CHECK(write_info.primary_key.is_type(type_ObjectId)); + CHECK(write_info.primary_key.get_object_id() == test_obj_id_1); + CHECK(write_info.object_name == "TopLevel"); + CHECK(write_info.reason == util::format("write to ObjectID(\"%1\") in table \"TopLevel\" not allowed; object is " + "outside of the current query view", + test_obj_id_1)); + + write_info = compensating_writes[1]; + CHECK(write_info.primary_key.is_type(type_ObjectId)); + CHECK(write_info.primary_key.get_object_id() == test_obj_id_2); + CHECK(write_info.object_name == "TopLevel"); + CHECK(write_info.reason == util::format("write to ObjectID(\"%1\") in table \"TopLevel\" not allowed; object is " + "outside of the current query view", + test_obj_id_2)); + realm->refresh(); + auto top_level_table = realm->read_group().get_table("class_TopLevel"); + CHECK(top_level_table->is_empty()); +} + TEST_CASE("flx: bootstrap changesets are applied continuously", "[sync][flx][bootstrap][baas]") { FLXSyncTestHarness harness("flx_bootstrap_ordering", {g_large_array_schema, {"queryable_int_field"}}); fill_large_array_schema(harness); From f93e7585193ffc420b86987bc4df43fc211afd94 Mon Sep 17 00:00:00 2001 From: Michael Wilkerson-Barker Date: Tue, 30 Jul 2024 08:47:59 -0400 Subject: [PATCH 11/15] RCORE-1992 Add emscripten debug/release compile tasks to evergreen (#7916) * Fixed emscripten compilation errors; added emscripten to cross_compile.sh * Fixed emscripten warning * Updates for build; added debug and release builds * Updated emscripten variant names; Disabled warnings in cross_compile.sh * Updated emscripten errors when emcmake was not found * Updates to emscripten build * Build using emscripten SDK instead of docker * Reverted/removed REALM_COMBINED_TESTS cmake flag * Updated cross_compile to not build tests and moved emscripten build steps to config.yml --- CHANGELOG.md | 2 +- evergreen/config.yml | 78 +++++++++++++++ src/realm/alloc_slab.hpp | 6 +- src/realm/object-store/sync/app.cpp | 22 +++-- src/realm/object-store/sync/app.hpp | 2 + src/realm/sync/instruction_applier.cpp | 4 +- src/realm/util/file.cpp | 2 +- test/util/CMakeLists.txt | 4 +- tools/cross_compile.sh | 132 ++++++++++++++++--------- 9 files changed, 193 insertions(+), 59 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6bf2e98e1d2..9eb8ada9b1d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,7 @@ ----------- ### Internals -* None. +* Fix emscripten build and add emscripten debug/release compile tasks to evergreen. ([PR #7916](https://github.com/realm/realm-core/pull/7916)) ---------------------------------------------- diff --git a/evergreen/config.yml b/evergreen/config.yml index abdc37706ff..a1bb6ccc141 100644 --- a/evergreen/config.yml +++ b/evergreen/config.yml @@ -197,6 +197,48 @@ functions: -j ${max_jobs|$(grep -c proc /proc/cpuinfo)} \ $target + "compile emscripten": + - command: shell.exec + params: + working_dir: realm-core + shell: bash + script: |- + set -o errexit + set -o pipefail + set -o verbose + + if [ -n "${cmake_bindir|}" ]; then + export CMAKE="$(./evergreen/abspath.sh "${cmake_bindir}/cmake")" + else + export CMAKE="cmake" + fi + + # Check out the Emscripten SDK and activate the specified version + echo "Activating emscripten SDK version: ${emsdk_version|latest}" + git clone https://github.com/emscripten-core/emsdk.git + + pushd emsdk/ > /dev/null # realm-core/emsdk + ./emsdk install ${emsdk_version|latest} + ./emsdk activate ${emsdk_version|latest} + source ./emsdk_env.sh + popd > /dev/null # realm-core + + if [[ "$(uname -s)" =~ Darwin* ]]; then + NPROC="$(sysctl -n hw.ncpu)" + else + NPROC="$(nproc)" + fi + + mkdir -p build-emscripten + pushd build-emscripten > /dev/null # realm-core/build-emscripten + + # shellcheck disable=SC2086,SC2090 + emcmake $CMAKE -D CMAKE_BUILD_TYPE="${cmake_build_type|Debug}" \ + .. + + make "-j$NPROC" 2>&1 + popd > /dev/null # realm-core + "run benchmark": - command: shell.exec params: @@ -749,6 +791,12 @@ tasks: commands: - func: "compile" +- name: compile-emscripten + exec_timeout_secs: 1800 + tags: [ "for_pull_requests" ] + commands: + - func: "compile emscripten" + - name: package exec_timeout_secs: 1800 commands: @@ -1297,6 +1345,15 @@ task_groups: - compile - .test_suite +- name: compile_emscripten + max_hosts: 1 + setup_group_can_fail_task: true + setup_group: + - func: "fetch source" + - func: "fetch binaries" + tasks: + - compile-emscripten + # Runs object-store-tests against baas running on remote host and runs # the network simulation tests as a separate task for nightly builds - name: network_tests @@ -1648,6 +1705,27 @@ buildvariants: tasks: - name: fuzzer-tests +- name: ubuntu-emscripten + display_name: "Ubuntu (Emscripten x86_64)" + run_on: ubuntu2204-large + expansions: + cmake_url: "https://s3.amazonaws.com/static.realm.io/evergreen-assets/cmake-3.26.3-linux-x86_64.tar.gz" + cmake_bindir: "./cmake_binaries/bin" + emsdk_version: "3.1.37" + tasks: + - name: compile_emscripten + +- name: ubuntu-emscripten-release + display_name: "Ubuntu (Emscripten x86_64 Release build)" + run_on: ubuntu2204-large + expansions: + cmake_url: "https://s3.amazonaws.com/static.realm.io/evergreen-assets/cmake-3.26.3-linux-x86_64.tar.gz" + cmake_bindir: "./cmake_binaries/bin" + cmake_build_type: RelWithDebInfo + emsdk_version: "3.1.37" + tasks: + - name: compile_emscripten + # disable these builders since there are constantly failing and not yet ready for nightly builds # - name: ubuntu2004-network-nonideal # display_name: "Ubuntu 20.04 x86_64 (Utunbu2004 - nonideal transfer)" diff --git a/src/realm/alloc_slab.hpp b/src/realm/alloc_slab.hpp index df7cf413b9e..d3392cc3d6e 100644 --- a/src/realm/alloc_slab.hpp +++ b/src/realm/alloc_slab.hpp @@ -147,8 +147,8 @@ class SlabAlloc : public Allocator { /// This can happen if the conflicting thread (or process) terminates or /// crashes before the next retry. /// - /// \throw FileAccessError - /// \throw SlabAlloc::Retry + /// \throw FileAccessError if unable to access the file + /// \throw SlabAlloc::Retry if the request cannot be completed right now ref_type attach_file(const std::string& file_path, Config& cfg, util::WriteObserver* write_observer = nullptr); /// @brief Expand or contract file @@ -179,7 +179,7 @@ class SlabAlloc : public Allocator { /// /// \sa own_buffer() /// - /// \throw InvalidDatabase + /// \throw InvalidDatabase if an error occurs while attaching the allocator ref_type attach_buffer(const char* data, size_t size); void init_in_memory_buffer(); diff --git a/src/realm/object-store/sync/app.cpp b/src/realm/object-store/sync/app.cpp index 2b2a8b0fca0..9306807912a 100644 --- a/src/realm/object-store/sync/app.cpp +++ b/src/realm/object-store/sync/app.cpp @@ -210,12 +210,27 @@ SharedApp App::get_app(CacheMode mode, const AppConfig& config) NO_THREAD_SAFETY std::lock_guard lock(s_apps_mutex); auto& app = s_apps_cache[config.app_id][base_url_from_app_config(config)]; if (!app) { - app = std::make_shared(Private(), config); + app = App::make_app(config); } return app; } REALM_ASSERT(mode == CacheMode::Disabled); + return App::make_app(config); +} + +SharedApp App::make_app(const AppConfig& config) +{ +#ifdef __EMSCRIPTEN__ + if (!config.transport) { + // Make a copy and provide a default transport if not provided + AppConfig config_copy = config; + config_copy.transport = std::make_shared<_impl::EmscriptenNetworkTransport>(); + return std::make_shared(Private(), config_copy); + } return std::make_shared(Private(), config); +#else + return std::make_shared(Private(), config); +#endif } SharedApp App::get_cached_app(const std::string& app_id, const std::optional& base_url) @@ -257,11 +272,6 @@ App::App(Private, const AppConfig& config) , m_metadata_store(create_metadata_store(config, *m_file_manager)) , m_sync_manager(SyncManager::create(config.sync_client_config)) { -#ifdef __EMSCRIPTEN__ - if (!m_config.transport) { - m_config.transport = std::make_shared<_impl::EmscriptenNetworkTransport>(); - } -#endif REALM_ASSERT(m_config.transport); // if a base url is provided, then verify the value diff --git a/src/realm/object-store/sync/app.hpp b/src/realm/object-store/sync/app.hpp index 40508984381..3fe735ca648 100644 --- a/src/realm/object-store/sync/app.hpp +++ b/src/realm/object-store/sync/app.hpp @@ -449,6 +449,8 @@ class App : public std::enable_shared_from_this, private: const AppConfig m_config; + static SharedApp make_app(const AppConfig& config); + util::CheckedMutex m_route_mutex; // The following variables hold the different paths to Atlas, depending on the // request being performed diff --git a/src/realm/sync/instruction_applier.cpp b/src/realm/sync/instruction_applier.cpp index 3106858f890..60dfa4ae0c9 100644 --- a/src/realm/sync/instruction_applier.cpp +++ b/src/realm/sync/instruction_applier.cpp @@ -373,12 +373,12 @@ void InstructionApplier::operator()(const Instruction::Update& instr) field_name, table_name, col.get_type(), mixed_ptr->get_type()); } } - else if (const auto obj_val_ptr = mpark::get_if(&arg)) { + else if (mpark::get_if(&arg)) { if (obj.is_null(col)) { obj.create_and_set_linked_object(col); } } - else if (const auto erase_ptr = mpark::get_if(&arg)) { + else if (mpark::get_if(&arg)) { m_applier->bad_transaction_log("Update: Dictionary erase at object field"); } else if (mpark::get_if(&arg)) { diff --git a/src/realm/util/file.cpp b/src/realm/util/file.cpp index 3f059a0a9dd..3e32d530084 100644 --- a/src/realm/util/file.cpp +++ b/src/realm/util/file.cpp @@ -1774,7 +1774,7 @@ bool File::MapBase::try_extend_to(size_t size) noexcept #ifndef _WIN32 char* extension_start_addr = (char*)m_addr + m_size; size_t extension_size = size - m_size; - size_t extension_start_offset = m_offset + m_size; + size_t extension_start_offset = static_cast(m_offset) + m_size; #if REALM_ENABLE_ENCRYPTION if (m_encrypted_mapping) { void* got_addr = ::mmap(extension_start_addr, extension_size, PROT_READ | PROT_WRITE, diff --git a/test/util/CMakeLists.txt b/test/util/CMakeLists.txt index 4f652f1d6a8..8f04c9454be 100644 --- a/test/util/CMakeLists.txt +++ b/test/util/CMakeLists.txt @@ -58,8 +58,8 @@ if(UNIX AND NOT APPLE) find_library(LIBRT rt) if(LIBRT) target_link_libraries(TestUtil ${LIBRT}) - # Android has librt included in libc - elseif(NOT ANDROID) + # Android and Emscripten have librt included in libc + elseif(NOT ANDROID AND NOT EMSCRIPTEN) message(WARNING "librt was not found. This means that the benchmarks will not be able to link properly.") endif() endif() diff --git a/tools/cross_compile.sh b/tools/cross_compile.sh index 2ef59406733..0bdd966e99e 100755 --- a/tools/cross_compile.sh +++ b/tools/cross_compile.sh @@ -6,40 +6,50 @@ set -e SCRIPT=$(basename "${BASH_SOURCE[0]}") function usage { - echo "Usage: ${SCRIPT} -t -o -v [-a ] [-f ]" + echo "Usage: ${SCRIPT} -t -o [-v ] [-a ] [-f ]" echo "" echo "Arguments:" - echo " build_type=" - echo " target_os=" - echo " android_abi=" + echo " build_type Release|Debug|MinSizeDebug|RelWithDebInfo" + echo " target_os android|iphoneos|iphonesimulator|watchos|" + echo " watchsimulator|appletvos|appletvsimulator|" + echo " emscripten" + echo " android_abi armeabi-v7a|x86|x86_64|arm64-v8a" exit 1; } +# Variables +VERSION= +CMAKE_FLAGS= +EMCMAKE=emcmake +CMAKE=${CMAKE:-cmake} + # Parse the options while getopts ":o:a:t:v:f:" opt; do case "${opt}" in o) OS=${OPTARG} - [ "${OS}" == "android" ] || - [ "${OS}" == "iphoneos" ] || - [ "${OS}" == "iphonesimulator" ] || - [ "${OS}" == "watchos" ] || - [ "${OS}" == "watchsimulator" ] || - [ "${OS}" == "appletvos" ] || - [ "${OS}" == "appletvsimulator" ] || usage + [[ "${OS}" == "android" ]] || + [[ "${OS}" == "iphoneos" ]] || + [[ "${OS}" == "iphonesimulator" ]] || + [[ "${OS}" == "watchos" ]] || + [[ "${OS}" == "watchsimulator" ]] || + [[ "${OS}" == "appletvos" ]] || + [[ "${OS}" == "appletvsimulator" ]] || + [[ "${OS}" == "emscripten" ]] || usage ;; a) ARCH=${OPTARG} - [ "${ARCH}" == "armeabi-v7a" ] || - [ "${ARCH}" == "x86" ] || - [ "${ARCH}" == "x86_64" ] || - [ "${ARCH}" == "arm64-v8a" ] || usage + [[ "${ARCH}" == "armeabi-v7a" ]] || + [[ "${ARCH}" == "x86" ]] || + [[ "${ARCH}" == "x86_64" ]] || + [[ "${ARCH}" == "arm64-v8a" ]] || usage ;; t) BUILD_TYPE=${OPTARG} - [ "${BUILD_TYPE}" == "Debug" ] || - [ "${BUILD_TYPE}" == "MinSizeDebug" ] || - [ "${BUILD_TYPE}" == "Release" ] || usage + [[ "${BUILD_TYPE}" == "Debug" ]] || + [[ "${BUILD_TYPE}" == "MinSizeDebug" ]] || + [[ "${BUILD_TYPE}" == "Release" ]] || + [[ "${BUILD_TYPE}" == "RelWithDebInfo" ]] || usage ;; v) VERSION=${OPTARG};; f) CMAKE_FLAGS=${OPTARG};; @@ -50,52 +60,86 @@ done shift $((OPTIND-1)) # Check for obligatory fields -if [ -z "${OS}" ] || [ -z "${BUILD_TYPE}" ]; then - echo "ERROR: options -o, -t and -v are always needed"; +if [[ -z "${OS}" ]] || [[ -z "${BUILD_TYPE}" ]]; then + echo "ERROR: options -o and -t are required"; usage fi -# Check for android-related obligatory fields +if [[ -n "${VERSION}" ]]; then + # shellcheck disable=SC2089 + CMAKE_FLAGS="-D REALM_VERSION='${VERSION}' ${CMAKE_FLAGS}" +fi + if [[ "${OS}" == "android" ]]; then + # Check for android-related obligatory fields if [[ -z "${ARCH}" ]]; then - echo "ERROR: option -a is needed for android builds"; + echo "ERROR: option -a is required for android builds"; usage elif [[ -z "${ANDROID_NDK}" ]]; then echo "ERROR: set ANDROID_NDK to the top level path for the Android NDK"; usage fi -fi -if [ "${OS}" == "android" ]; then mkdir -p "build-android-${ARCH}-${BUILD_TYPE}" cd "build-android-${ARCH}-${BUILD_TYPE}" || exit 1 - cmake -D CMAKE_SYSTEM_NAME=Android \ - -D CMAKE_ANDROID_NDK="${ANDROID_NDK}" \ - -D CMAKE_INSTALL_PREFIX=install \ - -D CMAKE_BUILD_TYPE="${BUILD_TYPE}" \ - -D CMAKE_ANDROID_ARCH_ABI="${ARCH}" \ - -D CMAKE_TOOLCHAIN_FILE="./tools/cmake/android.toolchain.cmake" \ - -D REALM_ENABLE_ENCRYPTION=1 \ - -D REALM_VERSION="${VERSION}" \ - -D CPACK_SYSTEM_NAME="Android-${ARCH}" \ - -D CMAKE_MAKE_PROGRAM=ninja \ - -G Ninja \ - ${CMAKE_FLAGS} \ - .. + + # shellcheck disable=SC2086,SC2090 + ${CMAKE} -D CMAKE_SYSTEM_NAME=Android \ + -D CMAKE_ANDROID_NDK="${ANDROID_NDK}" \ + -D CMAKE_INSTALL_PREFIX=install \ + -D CMAKE_BUILD_TYPE="${BUILD_TYPE}" \ + -D CMAKE_ANDROID_ARCH_ABI="${ARCH}" \ + -D CMAKE_TOOLCHAIN_FILE="./tools/cmake/android.toolchain.cmake" \ + -D REALM_ENABLE_ENCRYPTION=On \ + -D CPACK_SYSTEM_NAME="Android-${ARCH}" \ + -D CMAKE_MAKE_PROGRAM=ninja \ + -G Ninja \ + ${CMAKE_FLAGS} \ + .. ninja -v ninja package +elif [[ "${OS}" == "emscripten" ]]; then + if [[ -n "${EMSDK}" ]]; then + EMCMAKE="${EMSDK}/upstream/emscripten/emcmake" + if ! [[ -e "${EMCMAKE}" ]]; then + echo "ERROR: emcmake not found: ${EMCMAKE}" + usage + fi + elif ! which emcmake > /dev/null; then + echo "ERROR: emcmake not found in path or set EMSDK to the" + echo " top level emscripten emsdk directory" + usage + fi + + if [[ "$(uname -s)" =~ Darwin* ]]; then + NPROC="$(sysctl -n hw.ncpu)" + else + NPROC="$(nproc)" + fi + + mkdir -p build-emscripten + cd build-emscripten || exit 1 + + # shellcheck disable=SC2086,SC2090 + ${EMCMAKE} ${CMAKE} -D CMAKE_BUILD_TYPE="${BUILD_TYPE}" \ + -D REALM_NO_TESTS=On \ + -D REALM_BUILD_LIB_ONLY=On \ + ${CMAKE_FLAGS} \ + .. + + make "-j${NPROC}" 2>&1 else mkdir -p build-xcode-platforms cd build-xcode-platforms || exit 1 - cmake -D CMAKE_TOOLCHAIN_FILE="../tools/cmake/xcode.toolchain.cmake" \ - -D CMAKE_BUILD_TYPE="${BUILD_TYPE}" \ - -D REALM_NO_TESTS=1 \ - -D REALM_BUILD_LIB_ONLY=1 \ - -D REALM_VERSION="${VERSION}" \ - ${CMAKE_FLAGS} \ - -G Xcode .. + # shellcheck disable=SC2086,SC2090 + ${CMAKE} -D CMAKE_TOOLCHAIN_FILE="../tools/cmake/xcode.toolchain.cmake" \ + -D CMAKE_BUILD_TYPE="${BUILD_TYPE}" \ + -D REALM_NO_TESTS=On \ + -D REALM_BUILD_LIB_ONLY=On \ + ${CMAKE_FLAGS} \ + -G Xcode .. xcodebuild -scheme ALL_BUILD -configuration "${BUILD_TYPE}" -destination "generic/platform=${OS}" PLATFORM_NAME="${OS}" EFFECTIVE_PLATFORM_NAME="-${OS}" cpack -C "${BUILD_TYPE}" fi From 819bb98c7d424bce0fca715b1df34d66ea45a9fc Mon Sep 17 00:00:00 2001 From: Thomas Goyne Date: Tue, 30 Jul 2024 12:11:00 -0700 Subject: [PATCH 12/15] Add support for multi-process subscription state change notifications (#7862) As with the other multi-process notifications, the core idea here is to eliminate the in-memory state and produce notifications based entirely on the current state of the Realm file. SubscriptionStore::update_state() has been replaced with separate functions for the specific legal state transitions, which also take a write transaction as a parameter. These functions are called by PendingBootstrapStore inside the same write transaction as the bootstrap updates which changed the subscription state. This is both a minor performance optimization (due to fewer writes) and eliminates a brief window between the two writes where the Realm file was in an inconsistent state. There's a minor functional change here: previously old subscription sets were superseded when the new one reached the Completed state, and now they are superseded on AwaitingMark. This aligns it with when the new subscription set becomes the one which is returned by get_active(). --- CHANGELOG.md | 1 + src/realm/sync/client.cpp | 182 +++------- src/realm/sync/noinst/client_history_impl.cpp | 17 +- src/realm/sync/noinst/client_history_impl.hpp | 7 +- src/realm/sync/noinst/client_impl_base.cpp | 30 +- src/realm/sync/noinst/client_impl_base.hpp | 9 +- src/realm/sync/noinst/client_reset.cpp | 9 +- src/realm/sync/noinst/client_reset.hpp | 3 +- .../sync/noinst/client_reset_operation.cpp | 5 +- .../sync/noinst/client_reset_operation.hpp | 3 +- .../sync/noinst/pending_bootstrap_store.cpp | 59 ++-- .../sync/noinst/pending_bootstrap_store.hpp | 22 +- src/realm/sync/subscriptions.cpp | 310 +++++++++++------- src/realm/sync/subscriptions.hpp | 56 +++- test/object-store/sync/flx_sync.cpp | 27 +- .../util/sync/sync_test_utils.cpp | 2 +- test/test_client_reset.cpp | 22 +- test/test_sync_pending_bootstraps.cpp | 51 +-- test/test_sync_subscriptions.cpp | 306 ++++++++++++----- test/util/unit_test.hpp | 14 + 20 files changed, 630 insertions(+), 505 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9eb8ada9b1d..192c37e1a17 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ ### Internals * Fix emscripten build and add emscripten debug/release compile tasks to evergreen. ([PR #7916](https://github.com/realm/realm-core/pull/7916)) +* Subscription set state change notifications now work in a multiprocess-compatible manner ([PR #7862](https://github.com/realm/realm-core/pull/7862)). ---------------------------------------------- diff --git a/src/realm/sync/client.cpp b/src/realm/sync/client.cpp index bde98970c35..7e6f7afb45f 100644 --- a/src/realm/sync/client.cpp +++ b/src/realm/sync/client.cpp @@ -104,8 +104,6 @@ class SessionWrapper final : public util::AtomicRefCountBase, DB::CommitListener void handle_pending_client_reset_acknowledgement(); - void update_subscription_version_info(); - // Can be called from any thread. std::string get_appservices_connection_id(); @@ -149,7 +147,7 @@ class SessionWrapper final : public util::AtomicRefCountBase, DB::CommitListener uint64_t uploadable; uint64_t downloaded; uint64_t downloadable; - int64_t query_version; + int64_t query_version = 0; double download_estimate; // Does not check snapshot @@ -179,11 +177,7 @@ class SessionWrapper final : public util::AtomicRefCountBase, DB::CommitListener const uint64_t m_schema_version; std::shared_ptr m_flx_subscription_store; - int64_t m_flx_active_version = 0; - int64_t m_flx_last_seen_version = 0; - int64_t m_flx_pending_mark_version = 0; std::unique_ptr m_flx_pending_bootstrap_store; - std::shared_ptr m_migration_store; // Set to true when this session wrapper is actualized (i.e. the wrapped @@ -242,8 +236,6 @@ class SessionWrapper final : public util::AtomicRefCountBase, DB::CommitListener void on_resumed(); void on_connection_state_changed(ConnectionState, const std::optional&); void on_flx_sync_progress(int64_t new_version, DownloadBatchState batch_state); - void on_flx_sync_error(int64_t version, std::string_view err_msg); - void on_flx_sync_version_complete(int64_t version); void init_progress_handler(); void check_progress(); @@ -795,14 +787,6 @@ void SessionImpl::handle_pending_client_reset_acknowledgement() } } -void SessionImpl::update_subscription_version_info() -{ - // Ignore the call if the session is not active - if (m_state == State::Active) { - m_wrapper.update_subscription_version_info(); - } -} - bool SessionImpl::process_flx_bootstrap_message(const DownloadMessage& message) { // Ignore the message if the session is not active or a steady state message @@ -818,10 +802,8 @@ bool SessionImpl::process_flx_bootstrap_message(const DownloadMessage& message) maybe_progress = message.progress; } - bool new_batch = false; try { - bootstrap_store->add_batch(*message.query_version, std::move(maybe_progress), message.downloadable, - message.changesets, &new_batch); + bootstrap_store->add_batch(*message.query_version, maybe_progress, message.downloadable, message.changesets); } catch (const LogicError& ex) { if (ex.code() == ErrorCodes::LimitExceeded) { @@ -834,12 +816,6 @@ bool SessionImpl::process_flx_bootstrap_message(const DownloadMessage& message) throw; } - // If we've started a new batch and there is more to come, call on_flx_sync_progress to mark the subscription as - // bootstrapping. - if (new_batch && message.batch_state == DownloadBatchState::MoreToCome) { - on_flx_sync_progress(*message.query_version, DownloadBatchState::MoreToCome); - } - auto hook_action = call_debug_hook(SyncClientHookEvent::BootstrapMessageProcessed, message.progress, *message.query_version, message.batch_state, message.changesets.size()); if (hook_action == SyncClientHookAction::EarlyReturn) { @@ -891,13 +867,11 @@ void SessionImpl::process_pending_flx_bootstrap() TransactionRef transact = get_db()->start_write(); while (bootstrap_store->has_pending()) { auto start_time = std::chrono::steady_clock::now(); - auto pending_batch = bootstrap_store->peek_pending(m_wrapper.m_flx_bootstrap_batch_size_bytes); + auto pending_batch = bootstrap_store->peek_pending(*transact, m_wrapper.m_flx_bootstrap_batch_size_bytes); if (!pending_batch.progress) { logger.info("Incomplete pending bootstrap found for query version %1", pending_batch.query_version); - // Close the write transaction before clearing the bootstrap store to avoid a deadlock because the - // bootstrap store requires a write transaction itself. - transact->close(); - bootstrap_store->clear(); + bootstrap_store->clear(*transact, pending_batch.query_version); + transact->commit(); return; } @@ -915,7 +889,7 @@ void SessionImpl::process_pending_flx_bootstrap() history.integrate_server_changesets( *pending_batch.progress, 1.0, pending_batch.changesets, new_version, batch_state, logger, transact, - [&](const TransactionRef& tr, util::Span changesets_applied) { + [&](const Transaction& tr, util::Span changesets_applied) { REALM_ASSERT_3(changesets_applied.size(), <=, pending_batch.changesets.size()); bootstrap_store->pop_front_pending(tr, changesets_applied.size()); }); @@ -935,7 +909,6 @@ void SessionImpl::process_pending_flx_bootstrap() } REALM_ASSERT_3(query_version, !=, -1); - on_flx_sync_progress(query_version, DownloadBatchState::LastInBatch); on_changesets_integrated(new_version.realm_version, progress); auto action = call_debug_hook(SyncClientHookEvent::BootstrapProcessed, progress, query_version, @@ -948,15 +921,7 @@ void SessionImpl::on_flx_sync_error(int64_t version, std::string_view err_msg) { // Ignore the call if the session is not active if (m_state == State::Active) { - m_wrapper.on_flx_sync_error(version, err_msg); - } -} - -void SessionImpl::on_flx_sync_progress(int64_t version, DownloadBatchState batch_state) -{ - // Ignore the call if the session is not active - if (m_state == State::Active) { - m_wrapper.on_flx_sync_progress(version, batch_state); + get_flx_subscription_store()->set_error(version, err_msg); } } @@ -974,14 +939,6 @@ MigrationStore* SessionImpl::get_migration_store() return m_wrapper.get_migration_store(); } -void SessionImpl::on_flx_sync_version_complete(int64_t version) -{ - // Ignore the call if the session is not active - if (m_state == State::Active) { - m_wrapper.on_flx_sync_version_complete(version); - } -} - SyncClientHookAction SessionImpl::call_debug_hook(const SyncClientHookData& data) { // Should never be called if session is not active @@ -1182,75 +1139,6 @@ bool SessionWrapper::has_flx_subscription_store() const return static_cast(m_flx_subscription_store); } -void SessionWrapper::on_flx_sync_error(int64_t version, std::string_view err_msg) -{ - REALM_ASSERT(!m_finalized); - get_flx_subscription_store()->update_state(version, SubscriptionSet::State::Error, err_msg); -} - -void SessionWrapper::on_flx_sync_version_complete(int64_t version) -{ - REALM_ASSERT(!m_finalized); - m_flx_last_seen_version = version; - m_flx_active_version = version; -} - -void SessionWrapper::on_flx_sync_progress(int64_t new_version, DownloadBatchState batch_state) -{ - if (!has_flx_subscription_store()) { - return; - } - - REALM_ASSERT(!m_finalized); - if (batch_state == DownloadBatchState::SteadyState) { - throw IntegrationException(ErrorCodes::SyncProtocolInvariantFailed, - "Unexpected batch state of SteadyState while downloading bootstrap"); - } - // Is this a server-initiated bootstrap? Skip notifying the subscription store - if (new_version == m_flx_active_version) { - return; - } - if (new_version < m_flx_active_version) { - throw IntegrationException(ErrorCodes::SyncProtocolInvariantFailed, - util::format("Bootstrap query version %1 is less than active version %2", - new_version, m_flx_active_version)); - } - if (new_version < m_flx_last_seen_version) { - throw IntegrationException( - ErrorCodes::SyncProtocolInvariantFailed, - util::format("Download message query version %1 is less than current bootstrap version %2", new_version, - m_flx_last_seen_version)); - } - - SubscriptionSet::State new_state = SubscriptionSet::State::Uncommitted; // Initialize to make compiler happy - - switch (batch_state) { - case DownloadBatchState::SteadyState: - // Cannot be called with this value. - REALM_UNREACHABLE(); - case DownloadBatchState::LastInBatch: - on_flx_sync_version_complete(new_version); - if (new_version == 0) { - new_state = SubscriptionSet::State::Complete; - } - else { - new_state = SubscriptionSet::State::AwaitingMark; - m_flx_pending_mark_version = new_version; - } - break; - case DownloadBatchState::MoreToCome: - if (m_flx_last_seen_version == new_version) { - return; - } - - m_flx_last_seen_version = new_version; - new_state = SubscriptionSet::State::Bootstrapping; - break; - } - - get_flx_subscription_store()->update_state(new_version, new_state); -} - SubscriptionStore* SessionWrapper::get_flx_subscription_store() { REALM_ASSERT(!m_finalized); @@ -1443,7 +1331,8 @@ void SessionWrapper::actualize() conn.update_connect_info(m_http_request_path_prefix, m_signed_access_token); // Throws std::unique_ptr sess = std::make_unique(*this, conn); // Throws if (m_sync_mode == SyncServerMode::FLX) { - m_flx_pending_bootstrap_store = std::make_unique(m_db, sess->logger); + m_flx_pending_bootstrap_store = + std::make_unique(m_db, sess->logger, m_flx_subscription_store); } sess->logger.info("Binding '%1' to '%2'", m_db->get_path(), m_virt_path); // Throws @@ -1453,10 +1342,6 @@ void SessionWrapper::actualize() }); conn.activate_session(std::move(sess)); // Throws - // Initialize the variables relying on the bootstrap store from the event loop to guarantee that a previous - // session cannot change the state of the bootstrap store at the same time. - update_subscription_version_info(); - if (was_created) conn.activate(); // Throws @@ -1563,6 +1448,10 @@ void SessionWrapper::on_download_completion() // completion without this. check_progress(); + if (m_flx_subscription_store) { + m_flx_subscription_store->download_complete(); + } + while (!m_download_completion_handlers.empty()) { auto handler = std::move(m_download_completion_handlers.back()); m_download_completion_handlers.pop_back(); @@ -1573,13 +1462,6 @@ void SessionWrapper::on_download_completion() m_upload_completion_handlers.push_back(std::move(handler)); // Throws m_sync_completion_handlers.pop_back(); } - - if (m_flx_subscription_store && m_flx_pending_mark_version != SubscriptionSet::EmptyVersion) { - m_sess->logger.debug("Marking query version %1 as complete after receiving MARK message", - m_flx_pending_mark_version); - m_flx_subscription_store->update_state(m_flx_pending_mark_version, SubscriptionSet::State::Complete); - m_flx_pending_mark_version = SubscriptionSet::EmptyVersion; - } } @@ -1626,15 +1508,34 @@ void SessionWrapper::check_progress() REALM_ASSERT(!m_finalized); REALM_ASSERT(m_sess); - if (!m_progress_handler && m_upload_completion_handlers.empty() && m_sync_completion_handlers.empty()) + // Check if there's anything which even wants progress or completion information + bool has_progress_handler = m_progress_handler && m_reliable_download_progress; + bool has_completion_handler = !m_upload_completion_handlers.empty() || !m_sync_completion_handlers.empty(); + if (!m_flx_subscription_store && !has_progress_handler && !has_completion_handler) return; - version_type uploaded_version; + // The order in which we report each type of completion or progress is important, + // and changing it needs to be avoided as it'd be a breaking change to the APIs + + TransactionRef tr; ReportedProgress p; + if (m_flx_subscription_store) { + m_flx_subscription_store->report_progress(tr); + } + + if (!has_progress_handler && !has_completion_handler) + return; + // The subscription store may have started a read transaction that we'll + // reuse, but it may not have needed to or may not exist + if (!tr) + tr = m_db->start_read(); + + version_type uploaded_version; DownloadableProgress downloadable; - ClientHistory::get_upload_download_state(*m_db, p.downloaded, downloadable, p.uploaded, p.uploadable, p.snapshot, - uploaded_version); - p.query_version = m_flx_last_seen_version; + ClientHistory::get_upload_download_state(*tr, m_db->get_alloc(), p.downloaded, downloadable, p.uploaded, + p.uploadable, p.snapshot, uploaded_version); + if (m_flx_subscription_store && has_progress_handler) + p.query_version = m_flx_subscription_store->get_downloading_query_version(*tr); report_progress(p, downloadable); report_upload_completion(uploaded_version); @@ -1793,15 +1694,6 @@ void SessionWrapper::handle_pending_client_reset_acknowledgement() }); } -void SessionWrapper::update_subscription_version_info() -{ - if (!m_flx_subscription_store) - return; - auto versions_info = m_flx_subscription_store->get_version_info(); - m_flx_active_version = versions_info.active; - m_flx_pending_mark_version = versions_info.pending_mark; -} - std::string SessionWrapper::get_appservices_connection_id() { auto pf = util::make_promise_future(); diff --git a/src/realm/sync/noinst/client_history_impl.cpp b/src/realm/sync/noinst/client_history_impl.cpp index 174a186dee8..022e381fa04 100644 --- a/src/realm/sync/noinst/client_history_impl.cpp +++ b/src/realm/sync/noinst/client_history_impl.cpp @@ -423,7 +423,7 @@ void ClientHistory::integrate_server_changesets( const SyncProgress& progress, DownloadableProgress downloadable_bytes, util::Span incoming_changesets, VersionInfo& version_info, DownloadBatchState batch_state, util::Logger& logger, const TransactionRef& transact, - util::UniqueFunction)> run_in_write_tr) + util::UniqueFunction)> run_in_write_tr) { REALM_ASSERT(incoming_changesets.size() != 0); REALM_ASSERT( @@ -502,7 +502,7 @@ void ClientHistory::integrate_server_changesets( update_sync_progress(progress, downloadable_bytes); // Throws } if (run_in_write_tr) { - run_in_write_tr(transact, changesets_for_cb); + run_in_write_tr(*transact, changesets_for_cb); } // The reason we can use the `origin_timestamp`, and the `origin_file_ident` @@ -610,14 +610,13 @@ size_t ClientHistory::transform_and_apply_server_changesets(util::Spanget_version(); + version_type current_client_version = rt.get_version(); downloaded_bytes = 0; downloadable_bytes = uint64_t(0); @@ -627,11 +626,11 @@ void ClientHistory::get_upload_download_state(DB& db, std::uint_fast64_t& downlo uploaded_version = 0; using gf = _impl::GroupFriend; - ref_type ref = gf::get_history_ref(*rt); + ref_type ref = gf::get_history_ref(rt); if (!ref) return; - Array root(db.get_alloc()); + Array root(alloc); root.init_from_ref(ref); downloaded_bytes = root.get_as_ref_or_tagged(s_progress_downloaded_bytes_iip).get_as_int(); downloadable_bytes = root.get_as_ref_or_tagged(s_progress_downloadable_bytes_iip).get_as_int(); @@ -642,9 +641,9 @@ void ClientHistory::get_upload_download_state(DB& db, std::uint_fast64_t& downlo if (uploaded_version == current_client_version) return; - BinaryColumn changesets(db.get_alloc()); + BinaryColumn changesets(alloc); changesets.init_from_ref(root.get_as_ref(s_changesets_iip)); - IntegerBpTree origin_file_idents(db.get_alloc()); + IntegerBpTree origin_file_idents(alloc); origin_file_idents.init_from_ref(root.get_as_ref(s_origin_file_idents_iip)); // `base_version` is the oldest version we have history for. If this is diff --git a/src/realm/sync/noinst/client_history_impl.hpp b/src/realm/sync/noinst/client_history_impl.hpp index 7d5b6b4dcc1..b1f74a1641b 100644 --- a/src/realm/sync/noinst/client_history_impl.hpp +++ b/src/realm/sync/noinst/client_history_impl.hpp @@ -252,10 +252,11 @@ class ClientHistory final : public _impl::History, public TransformHistory { const SyncProgress& progress, DownloadableProgress downloadable_bytes, util::Span changesets, VersionInfo& new_version, DownloadBatchState download_type, util::Logger&, const TransactionRef& transact, - util::UniqueFunction)> run_in_write_tr = nullptr); + util::UniqueFunction)> run_in_write_tr = nullptr); - static void get_upload_download_state(DB&, std::uint_fast64_t&, DownloadableProgress&, std::uint_fast64_t&, - std::uint_fast64_t&, std::uint_fast64_t&, version_type&); + static void get_upload_download_state(Transaction&, Allocator& alloc, std::uint_fast64_t&, DownloadableProgress&, + std::uint_fast64_t&, std::uint_fast64_t&, std::uint_fast64_t&, + version_type&); static void get_upload_download_state(DB*, std::uint_fast64_t&, std::uint_fast64_t&); /// Record the current download progress. diff --git a/src/realm/sync/noinst/client_impl_base.cpp b/src/realm/sync/noinst/client_impl_base.cpp index eab65ec8710..eb57eafd45c 100644 --- a/src/realm/sync/noinst/client_impl_base.cpp +++ b/src/realm/sync/noinst/client_impl_base.cpp @@ -20,8 +20,6 @@ #include #include -#include // Only for websocket::Error TODO remove - #include #include @@ -1364,13 +1362,8 @@ void Connection::receive_query_error_message(int raw_error_code, std::string_vie "Received a FLX query error message on a non-FLX sync connection"}); } - Session* sess = find_and_validate_session(session_ident, "QUERY_ERROR"); - if (REALM_UNLIKELY(!sess)) { - return; - } - - if (auto status = sess->receive_query_error_message(raw_error_code, message, query_version); !status.is_ok()) { - close_due_to_protocol_error(std::move(status)); + if (Session* sess = find_and_validate_session(session_ident, "QUERY_ERROR")) { + sess->receive_query_error_message(raw_error_code, message, query_version); } } @@ -1586,7 +1579,7 @@ void Session::integrate_changesets(const SyncProgress& progress, std::uint_fast6 auto transact = get_db()->start_read(); history.integrate_server_changesets( progress, downloadable_bytes, received_changesets, version_info, download_batch_state, logger, transact, - [&](const TransactionRef&, util::Span changesets) { + [&](const Transaction&, util::Span changesets) { gather_pending_compensating_writes(changesets, &pending_compensating_write_errors); }); // Throws if (received_changesets.size() == 1) { @@ -2238,13 +2231,10 @@ bool Session::client_reset_if_needed() Status cr_status = client_reset_config->error; ProtocolErrorInfo::Action cr_action = client_reset_config->action; - auto on_flx_version_complete = [this](int64_t version) { - this->on_flx_sync_version_complete(version); - }; try { // The file ident from the fresh realm will be copied over to the local realm bool did_reset = client_reset::perform_client_reset(logger, *get_db(), std::move(*client_reset_config), - get_flx_subscription_store(), on_flx_version_complete); + get_flx_subscription_store()); call_debug_hook(SyncClientHookEvent::ClientResetMergeComplete); if (!did_reset) { @@ -2291,8 +2281,6 @@ bool Session::client_reset_if_needed() // Checks if there is a pending client reset handle_pending_client_reset_acknowledgement(); - update_subscription_version_info(); - // If a migration or rollback is in progress, mark it complete when client reset is completed. if (auto migration_store = get_migration_store()) { migration_store->complete_migration_or_rollback(); @@ -2526,16 +2514,10 @@ Status Session::receive_unbound_message() } -Status Session::receive_query_error_message(int error_code, std::string_view message, int64_t query_version) +void Session::receive_query_error_message(int error_code, std::string_view message, int64_t query_version) { logger.info("Received QUERY_ERROR \"%1\" (error_code=%2, query_version=%3)", message, error_code, query_version); - // Ignore the message if the deactivation process has been initiated, - // because in that case, the associated Realm and SessionWrapper must - // not be accessed any longer. - if (m_state == Active) { - on_flx_sync_error(query_version, message); // throws - } - return Status::OK(); + on_flx_sync_error(query_version, message); // throws } // The caller (Connection) must discard the session if the session has become diff --git a/src/realm/sync/noinst/client_impl_base.hpp b/src/realm/sync/noinst/client_impl_base.hpp index f9f386d52f5..c6dd8f97ca3 100644 --- a/src/realm/sync/noinst/client_impl_base.hpp +++ b/src/realm/sync/noinst/client_impl_base.hpp @@ -784,10 +784,6 @@ class ClientImpl::Session { /// \brief Gets the migration store associated with this Session. MigrationStore* get_migration_store(); - /// Update internal client state when a flx subscription becomes complete outside - /// of the normal sync process. This can happen during client reset. - void on_flx_sync_version_complete(int64_t version); - /// If this session is currently suspended, resume it immediately. /// /// It is an error to call this function before activation of the session, @@ -928,7 +924,6 @@ class ClientImpl::Session { //@} void on_flx_sync_error(int64_t version, std::string_view err_msg); - void on_flx_sync_progress(int64_t version, DownloadBatchState batch_state); // Processes an FLX download message, if it's a bootstrap message. If it's not a bootstrap // message then this is a noop and will return false. Otherwise this will return true @@ -941,8 +936,6 @@ class ClientImpl::Session { bool client_reset_if_needed(); void handle_pending_client_reset_acknowledgement(); - void update_subscription_version_info(); - void gather_pending_compensating_writes(util::Span changesets, std::vector* out); void begin_resumption_delay(const ProtocolErrorInfo& error_info); @@ -1115,7 +1108,7 @@ class ClientImpl::Session { Status receive_mark_message(request_ident_type); Status receive_unbound_message(); Status receive_error_message(const ProtocolErrorInfo& info); - Status receive_query_error_message(int error_code, std::string_view message, int64_t query_version); + void receive_query_error_message(int error_code, std::string_view message, int64_t query_version); Status receive_test_command_response(request_ident_type, std::string_view body); void initiate_rebind(); diff --git a/src/realm/sync/noinst/client_reset.cpp b/src/realm/sync/noinst/client_reset.cpp index 9b5463b05e0..8c00fed2d48 100644 --- a/src/realm/sync/noinst/client_reset.cpp +++ b/src/realm/sync/noinst/client_reset.cpp @@ -481,8 +481,7 @@ ClientResyncMode reset_precheck_guard(const TransactionRef& wt_local, ClientResy } bool perform_client_reset_diff(DB& db_local, sync::ClientReset& reset_config, util::Logger& logger, - sync::SubscriptionStore* sub_store, - util::FunctionRef on_flx_version_complete) + sync::SubscriptionStore* sub_store) { DB& db_remote = *reset_config.fresh_copy; auto wt_local = db_local.start_write(); @@ -538,18 +537,16 @@ bool perform_client_reset_diff(DB& db_local, sync::ClientReset& reset_config, ut history_local.set_history_adjustments(logger, wt_local->get_version(), fresh_file_ident, fresh_server_version, recovered); - int64_t subscription_version = 0; if (sub_store) { if (recover_local_changes) { - subscription_version = sub_store->mark_active_as_complete(*wt_local); + sub_store->mark_active_as_complete(*wt_local); } else { - subscription_version = sub_store->set_active_as_latest(*wt_local); + sub_store->set_active_as_latest(*wt_local); } } wt_local->commit_and_continue_as_read(); - on_flx_version_complete(subscription_version); VersionID new_version_local = wt_local->get_version_of_current_transaction(); logger.info(util::LogCategory::reset, diff --git a/src/realm/sync/noinst/client_reset.hpp b/src/realm/sync/noinst/client_reset.hpp index 66ef940af37..58cb0f9815d 100644 --- a/src/realm/sync/noinst/client_reset.hpp +++ b/src/realm/sync/noinst/client_reset.hpp @@ -74,8 +74,7 @@ ClientResyncMode reset_precheck_guard(const TransactionRef& wt_local, ClientResy // to the fresh Realm. Then the local Realm will have its client file ident set to // the file ident from the fresh realm bool perform_client_reset_diff(DB& db, sync::ClientReset& reset_config, util::Logger& logger, - sync::SubscriptionStore* sub_store, - util::FunctionRef on_flx_version_complete); + sync::SubscriptionStore* sub_store); } // namespace _impl::client_reset } // namespace realm diff --git a/src/realm/sync/noinst/client_reset_operation.cpp b/src/realm/sync/noinst/client_reset_operation.cpp index 59b0ee29052..98d752e64b7 100644 --- a/src/realm/sync/noinst/client_reset_operation.cpp +++ b/src/realm/sync/noinst/client_reset_operation.cpp @@ -52,7 +52,7 @@ bool is_fresh_path(const std::string& path) } bool perform_client_reset(util::Logger& logger, DB& db, sync::ClientReset&& reset_config, - sync::SubscriptionStore* sub_store, util::FunctionRef on_flx_version) + sync::SubscriptionStore* sub_store) { REALM_ASSERT(reset_config.mode != ClientResyncMode::Manual); REALM_ASSERT(reset_config.fresh_copy); @@ -97,8 +97,7 @@ bool perform_client_reset(util::Logger& logger, DB& db, sync::ClientReset&& rese if (notify_after) { previous_state = db.start_frozen(frozen_before_state_version); } - bool did_recover = - client_reset::perform_client_reset_diff(db, reset_config, logger, sub_store, on_flx_version); // throws + bool did_recover = client_reset::perform_client_reset_diff(db, reset_config, logger, sub_store); // throws if (notify_after) { notify_after(previous_state->get_version_of_current_transaction(), did_recover); diff --git a/src/realm/sync/noinst/client_reset_operation.hpp b/src/realm/sync/noinst/client_reset_operation.hpp index 748bea0a611..5ed44f0e325 100644 --- a/src/realm/sync/noinst/client_reset_operation.hpp +++ b/src/realm/sync/noinst/client_reset_operation.hpp @@ -21,7 +21,6 @@ #include #include -#include #include #include #include @@ -39,7 +38,7 @@ std::string get_fresh_path_for(const std::string& realm_path); bool is_fresh_path(const std::string& realm_path); bool perform_client_reset(util::Logger& logger, DB& db, sync::ClientReset&& reset_config, - sync::SubscriptionStore* sub_store, util::FunctionRef on_flx_version); + sync::SubscriptionStore* sub_store); } // namespace realm::_impl::client_reset diff --git a/src/realm/sync/noinst/pending_bootstrap_store.cpp b/src/realm/sync/noinst/pending_bootstrap_store.cpp index 229d35c431c..10fcfed55d2 100644 --- a/src/realm/sync/noinst/pending_bootstrap_store.cpp +++ b/src/realm/sync/noinst/pending_bootstrap_store.cpp @@ -29,6 +29,7 @@ #include "realm/sync/noinst/protocol_codec.hpp" #include "realm/sync/noinst/sync_metadata_schema.hpp" #include "realm/sync/protocol.hpp" +#include "realm/sync/subscriptions.hpp" #include "realm/sync/transform.hpp" #include "realm/util/assert.hpp" #include "realm/util/buffer.hpp" @@ -61,9 +62,11 @@ constexpr static std::string_view c_progress_latest_server_version_salt("latest_ } // namespace -PendingBootstrapStore::PendingBootstrapStore(DBRef db, util::Logger& logger) +PendingBootstrapStore::PendingBootstrapStore(DBRef db, util::Logger& logger, + std::shared_ptr subscription_store) : m_db(std::move(db)) , m_logger(logger) + , m_subscription_store(std::move(subscription_store)) { std::vector internal_tables{ {&m_table, @@ -129,8 +132,7 @@ PendingBootstrapStore::PendingBootstrapStore(DBRef db, util::Logger& logger) void PendingBootstrapStore::add_batch(int64_t query_version, util::Optional progress, DownloadableProgress download_progress, - const _impl::ClientProtocol::ReceivedChangesets& changesets, - bool* created_new_batch_out) + const _impl::ClientProtocol::ReceivedChangesets& changesets) { std::vector> compressed_changesets; compressed_changesets.reserve(changesets.size()); @@ -181,12 +183,12 @@ void PendingBootstrapStore::add_batch(int64_t query_version, util::Optionalcommit(); - - if (created_new_batch_out) { - *created_new_batch_out = did_create; + if (did_create) { + m_subscription_store->begin_bootstrap(*tr, query_version); } + tr->commit(); + if (did_create) { m_logger.debug(util::LogCategory::changeset, "Created new pending bootstrap object with %1 changesets for query version %2", @@ -202,6 +204,7 @@ void PendingBootstrapStore::add_batch(int64_t query_version, util::Optionalstart_write(); - clear(*tr); - tr->commit(); -} - -void PendingBootstrapStore::clear(Transaction& wt) +void PendingBootstrapStore::clear(Transaction& wt, int64_t query_version) { auto bootstrap_table = wt.get_table(m_table); bootstrap_table->clear(); + if (m_has_pending) { + m_subscription_store->cancel_bootstrap(wt, query_version); + } m_has_pending = false; } -PendingBootstrapStore::PendingBatch PendingBootstrapStore::peek_pending(size_t limit_in_bytes) +PendingBootstrapStore::PendingBatch PendingBootstrapStore::peek_pending(Transaction& tr, size_t limit_in_bytes) { - auto tr = m_db->start_read(); - auto bootstrap_table = tr->get_table(m_table); - if (bootstrap_table->is_empty()) { - return {}; - } + auto bootstrap_table = tr.get_table(m_table); // We should only have one pending bootstrap at a time. REALM_ASSERT(bootstrap_table->size() == 1); @@ -249,7 +244,7 @@ PendingBootstrapStore::PendingBatch PendingBootstrapStore::peek_pending(size_t l progress_obj.get(m_progress_download_client_version); progress.upload.last_integrated_server_version = progress_obj.get(m_progress_upload_server_version); progress.upload.client_version = progress_obj.get(m_progress_upload_client_version); - ret.progress = std::move(progress); + ret.progress = progress; } auto changeset_list = bootstrap_obj.get_linklist(m_changesets); @@ -310,14 +305,10 @@ PendingBootstrapStore::PendingBatchStats PendingBootstrapStore::pending_stats() return stats; } -void PendingBootstrapStore::pop_front_pending(const TransactionRef& tr, size_t count) +void PendingBootstrapStore::pop_front_pending(const Transaction& tr, size_t count) { - REALM_ASSERT_3(tr->get_transact_stage(), ==, DB::transact_Writing); - auto bootstrap_table = tr->get_table(m_table); - if (bootstrap_table->is_empty()) { - return; - } - + REALM_ASSERT_3(tr.get_transact_stage(), ==, DB::transact_Writing); + auto bootstrap_table = tr.get_table(m_table); // We should only have one pending bootstrap at a time. REALM_ASSERT(bootstrap_table->size() == 1); @@ -333,18 +324,22 @@ void PendingBootstrapStore::pop_front_pending(const TransactionRef& tr, size_t c } } + int64_t query_version = bootstrap_obj.get(m_query_version); if (changeset_list.is_empty()) { m_logger.debug(util::LogCategory::changeset, "Removing pending bootstrap obj for query version %1", - bootstrap_obj.get(m_query_version)); + query_version); bootstrap_obj.remove(); } else { m_logger.debug(util::LogCategory::changeset, - "Removing pending bootstrap batch for query version %1. %2 changeset remaining", - bootstrap_obj.get(m_query_version), changeset_list.size()); + "Removing pending bootstrap batch for query version %1. %2 changeset remaining", query_version, + changeset_list.size()); } m_has_pending = !bootstrap_table->is_empty(); + if (!m_has_pending) { + m_subscription_store->complete_bootstrap(tr, query_version); + } } } // namespace realm::sync diff --git a/src/realm/sync/noinst/pending_bootstrap_store.hpp b/src/realm/sync/noinst/pending_bootstrap_store.hpp index 52db9377946..e9e4079694b 100644 --- a/src/realm/sync/noinst/pending_bootstrap_store.hpp +++ b/src/realm/sync/noinst/pending_bootstrap_store.hpp @@ -22,8 +22,6 @@ #include "realm/list.hpp" #include "realm/obj.hpp" #include "realm/object-store/c_api/util.hpp" -#include "realm/sync/protocol.hpp" -#include "realm/sync/noinst/protocol_codec.hpp" #include "realm/sync/transform.hpp" #include "realm/util/buffer.hpp" #include "realm/util/logger.hpp" @@ -33,6 +31,8 @@ namespace realm::sync { +class SubscriptionStore; + class PendingBootstrapException : public std::runtime_error { public: using std::runtime_error::runtime_error; @@ -42,11 +42,10 @@ class PendingBootstrapException : public std::runtime_error { // that are sent across multiple download messages. class PendingBootstrapStore { public: - // Constructs from a DBRef. Constructing is destructive - since pending bootstraps are only valid for the - // session they occurred in, this will drop/clear all data when the bootstrap store is constructed. + // Constructs from a DBRef. // // Underneath this creates a table which stores each download message's changesets. - explicit PendingBootstrapStore(DBRef db, util::Logger& logger); + PendingBootstrapStore(DBRef db, util::Logger& logger, std::shared_ptr subscription_store); PendingBootstrapStore(const PendingBootstrapStore&) = delete; PendingBootstrapStore& operator=(const PendingBootstrapStore&) = delete; @@ -64,7 +63,7 @@ class PendingBootstrapStore { // Returns the next batch (download message) of changesets if it exists. The transaction must be in the reading // state. - PendingBatch peek_pending(size_t limit_in_bytes); + PendingBatch peek_pending(Transaction& tr, size_t limit_in_bytes); struct PendingBatchStats { int64_t query_version = 0; @@ -75,22 +74,19 @@ class PendingBootstrapStore { // Removes the first set of changesets from the current pending bootstrap batch. The transaction must be in the // writing state. - void pop_front_pending(const TransactionRef& tr, size_t count); + void pop_front_pending(const Transaction& tr, size_t count); // Adds a set of changesets to the store. void add_batch(int64_t query_version, util::Optional progress, - DownloadableProgress download_progress, const std::vector& changesets, - bool* created_new_batch); - - void clear(); - void clear(Transaction& wt); + DownloadableProgress download_progress, const std::vector& changesets); + void clear(Transaction& wt, int64_t query_version); private: DBRef m_db; // The pending_bootstrap_store is tied to the lifetime of a session, so a shared_ptr is not needed util::Logger& m_logger; - _impl::ClientProtocol m_client_protocol; + std::shared_ptr m_subscription_store; TableKey m_cursor_table; diff --git a/src/realm/sync/subscriptions.cpp b/src/realm/sync/subscriptions.cpp index 8205d0370fa..47682f288a5 100644 --- a/src/realm/sync/subscriptions.cpp +++ b/src/realm/sync/subscriptions.cpp @@ -91,7 +91,7 @@ SubscriptionSet::State state_from_storage(int64_t value) } } -int64_t state_to_storage(SubscriptionSet::State state) +constexpr int64_t state_to_storage(SubscriptionSet::State state) { switch (state) { case SubscriptionSet::State::Pending: @@ -109,7 +109,7 @@ int64_t state_to_storage(SubscriptionSet::State state) } } -size_t state_to_order(SubscriptionSet::State needle) +constexpr size_t state_to_order(SubscriptionSet::State needle) { using State = SubscriptionSet::State; switch (needle) { @@ -442,12 +442,6 @@ util::Future SubscriptionSet::get_state_change_notificat auto mgr = get_flx_subscription_store(); // Throws util::CheckedLockGuard lk(mgr->m_pending_notifications_mutex); - // If we've already been superseded by another version getting completed, then we should skip registering - // a notification because it may never fire. - if (mgr->m_min_outstanding_version > version()) { - return util::Future::make_ready(State::Superseded); - } - State cur_state = state(); std::string err_str = error_str(); @@ -486,35 +480,71 @@ void SubscriptionSet::get_state_change_notification( }); } -void SubscriptionStore::process_notifications(State new_state, int64_t version, std::string_view error_str) +void SubscriptionStore::report_progress() { - std::list to_finish; - { - util::CheckedLockGuard lk(m_pending_notifications_mutex); - splice_if(m_pending_notifications, to_finish, [&](auto& req) { - return (req.version == version && - (new_state == State::Error || state_to_order(new_state) >= state_to_order(req.notify_when))) || - (new_state == State::Complete && req.version < version); - }); + TransactionRef tr; + report_progress(tr); +} - if (new_state == State::Complete) { - m_min_outstanding_version = version; +void SubscriptionStore::report_progress(TransactionRef& tr) +{ + util::CheckedUniqueLock lk(m_pending_notifications_mutex); + if (m_pending_notifications.empty()) + return; + + if (!tr) + tr = m_db->start_read(); + auto sub_sets = tr->get_table(m_sub_set_table); + + struct NotificationCompletion { + util::Promise promise; + std::string_view error_str; + State state; + }; + std::vector to_finish; + m_pending_notifications.remove_if([&](NotificationRequest& req) { + Obj obj = sub_sets->get_object_with_primary_key(req.version); + if (!obj) { + to_finish.push_back({std::move(req.promise), {}, State::Superseded}); + return true; } - } - for (auto& req : to_finish) { - if (new_state == State::Error && req.version == version) { - req.promise.set_error({ErrorCodes::SubscriptionFailed, error_str}); + auto state = state_from_storage(obj.get(m_sub_set_state)); + if (state_to_order(state) < state_to_order(req.notify_when)) + return false; + + std::string_view error_str; + if (state == State::Error) { + error_str = std::string_view(obj.get(m_sub_set_error_str)); } - else if (req.version < version) { - req.promise.emplace_value(State::Superseded); + to_finish.push_back({std::move(req.promise), error_str, state}); + return true; + }); + lk.unlock(); + + for (auto& [promise, error_str, state] : to_finish) { + if (state == State::Error) { + promise.set_error({ErrorCodes::SubscriptionFailed, error_str}); } else { - req.promise.emplace_value(new_state); + promise.emplace_value(state); } } } +int64_t SubscriptionStore::get_downloading_query_version(Transaction& tr) const +{ + auto sub_sets = tr.get_table(m_sub_set_table); + ObjKey key; + const Mixed states[] = { + state_to_storage(SubscriptionSet::State::Complete), + state_to_storage(SubscriptionSet::State::AwaitingMark), + state_to_storage(SubscriptionSet::State::Bootstrapping), + }; + sub_sets->where().in(m_sub_set_state, std::begin(states), std::end(states)).max(m_sub_set_version_num, &key); + return key ? sub_sets->get_object(key).get(m_sub_set_version_num) : 0; +} + SubscriptionSet MutableSubscriptionSet::commit() { if (m_tr->get_transact_stage() != DB::transact_Writing) { @@ -541,14 +571,11 @@ SubscriptionSet MutableSubscriptionSet::commit() new_sub.set(mgr->m_sub_query_str, StringData(sub.query_string)); } m_obj.set(mgr->m_sub_set_state, state_to_storage(m_state)); - if (!m_error_str.empty()) { - m_obj.set(mgr->m_sub_set_error_str, StringData(m_error_str)); - } const auto flx_version = version(); m_tr->commit_and_continue_as_read(); - mgr->process_notifications(m_state, flx_version, std::string_view(error_str())); + mgr->report_progress(m_tr); return mgr->get_refreshed(m_obj.get_key(), flx_version, m_tr->get_version_of_current_transaction()); } @@ -701,20 +728,15 @@ Obj SubscriptionStore::get_active(const Transaction& tr) // There should always be at least one SubscriptionSet - the zeroth subscription set for schema instructions. REALM_ASSERT(!sub_sets->is_empty()); - DescriptorOrdering descriptor_ordering; - descriptor_ordering.append_sort(SortDescriptor{{{sub_sets->get_primary_key_column()}}, {false}}); - descriptor_ordering.append_limit(LimitDescriptor{1}); - auto res = sub_sets->where() - .equal(m_sub_set_state, state_to_storage(SubscriptionSet::State::Complete)) - .Or() - .equal(m_sub_set_state, state_to_storage(SubscriptionSet::State::AwaitingMark)) - .find_all(descriptor_ordering); + ObjKey key; + sub_sets->where() + .equal(m_sub_set_state, state_to_storage(SubscriptionSet::State::Complete)) + .Or() + .equal(m_sub_set_state, state_to_storage(SubscriptionSet::State::AwaitingMark)) + .max(m_sub_set_version_num, &key); // If there is no active subscription yet, return the zeroth subscription. - if (res.is_empty()) { - return sub_sets->get_object_with_primary_key(0); - } - return res.get_object(0); + return key ? sub_sets->get_object(key) : sub_sets->get_object_with_primary_key(0); } SubscriptionStore::VersionInfo SubscriptionStore::get_version_info() const @@ -724,24 +746,20 @@ SubscriptionStore::VersionInfo SubscriptionStore::get_version_info() const // There should always be at least one SubscriptionSet - the zeroth subscription set for schema instructions. REALM_ASSERT(!sub_sets->is_empty()); + auto get = [](Mixed m) { + return m.is_type(type_Int) ? m.get_int() : SubscriptionSet::EmptyVersion; + }; + VersionInfo ret; ret.latest = sub_sets->max(sub_sets->get_primary_key_column())->get_int(); - DescriptorOrdering descriptor_ordering; - descriptor_ordering.append_sort(SortDescriptor{{{sub_sets->get_primary_key_column()}}, {false}}); - descriptor_ordering.append_limit(LimitDescriptor{1}); - - auto res = sub_sets->where() - .equal(m_sub_set_state, state_to_storage(SubscriptionSet::State::Complete)) - .Or() - .equal(m_sub_set_state, state_to_storage(SubscriptionSet::State::AwaitingMark)) - .find_all(descriptor_ordering); - ret.active = res.is_empty() ? SubscriptionSet::EmptyVersion : res.get_object(0).get_primary_key().get_int(); - - res = sub_sets->where() - .equal(m_sub_set_state, state_to_storage(SubscriptionSet::State::AwaitingMark)) - .find_all(descriptor_ordering); - ret.pending_mark = res.is_empty() ? SubscriptionSet::EmptyVersion : res.get_object(0).get_primary_key().get_int(); - + ret.active = get(*sub_sets->where() + .equal(m_sub_set_state, state_to_storage(SubscriptionSet::State::Complete)) + .Or() + .equal(m_sub_set_state, state_to_storage(SubscriptionSet::State::AwaitingMark)) + .max(m_sub_set_version_num)); + ret.pending_mark = get(*sub_sets->where() + .equal(m_sub_set_state, state_to_storage(SubscriptionSet::State::AwaitingMark)) + .max(m_sub_set_version_num)); return ret; } @@ -753,22 +771,21 @@ SubscriptionStore::get_next_pending_version(int64_t last_query_version) const // There should always be at least one SubscriptionSet - the zeroth subscription set for schema instructions. REALM_ASSERT(!sub_sets->is_empty()); - DescriptorOrdering descriptor_ordering; - descriptor_ordering.append_sort(SortDescriptor{{{sub_sets->get_primary_key_column()}}, {true}}); - auto res = sub_sets->where() - .greater(sub_sets->get_primary_key_column(), last_query_version) - .group() - .equal(m_sub_set_state, state_to_storage(SubscriptionSet::State::Pending)) - .Or() - .equal(m_sub_set_state, state_to_storage(SubscriptionSet::State::Bootstrapping)) - .end_group() - .find_all(descriptor_ordering); - - if (res.is_empty()) { + ObjKey key; + sub_sets->where() + .greater(sub_sets->get_primary_key_column(), last_query_version) + .group() + .equal(m_sub_set_state, state_to_storage(SubscriptionSet::State::Pending)) + .Or() + .equal(m_sub_set_state, state_to_storage(SubscriptionSet::State::Bootstrapping)) + .end_group() + .min(m_sub_set_version_num, &key); + + if (!key) { return util::none; } - auto obj = res.get_object(0); + auto obj = sub_sets->get_object(key); auto query_version = obj.get_primary_key().get_int(); auto snapshot_version = obj.get(m_sub_set_snapshot_version); return PendingSubscription{query_version, static_cast(snapshot_version)}; @@ -807,7 +824,6 @@ void SubscriptionStore::reset(Transaction& wt) util::CheckedUniqueLock lk(m_pending_notifications_mutex); auto to_finish = std::move(m_pending_notifications); - m_min_outstanding_version = 0; lk.unlock(); for (auto& req : to_finish) { @@ -815,57 +831,124 @@ void SubscriptionStore::reset(Transaction& wt) } } -void SubscriptionStore::update_state(int64_t version, State new_state, std::optional error_str) +void SubscriptionStore::begin_bootstrap(const Transaction& tr, int64_t query_version) { - REALM_ASSERT(error_str.has_value() == (new_state == State::Error)); - REALM_ASSERT(new_state != State::Pending); - REALM_ASSERT(new_state != State::Superseded); - - auto tr = m_db->start_write(); - auto sub_sets = tr->get_table(m_sub_set_table); - auto obj = sub_sets->get_object_with_primary_key(version); + auto sub_sets = tr.get_table(m_sub_set_table); + REALM_ASSERT(!sub_sets->is_empty()); + Obj obj = sub_sets->get_object_with_primary_key(query_version); if (!obj) { - // This can happen either due to a bug in the sync client or due to the - // server sending us an error message for an invalid query version. We - // assume it is the latter here. throw RuntimeError(ErrorCodes::SyncProtocolInvariantFailed, - util::format("Invalid state update for nonexistent query version %1", version)); + util::format("Received bootstrap for nonexistent query version %1", query_version)); } - auto old_state = state_from_storage(obj.get(m_sub_set_state)); - switch (new_state) { - case State::Error: - if (old_state == State::Complete) { - throw RuntimeError(ErrorCodes::SyncProtocolInvariantFailed, - util::format("Received error '%1' for already-completed query version %2. This " - "may be due to a queryable field being removed in the server-side " - "configuration making the previous subscription set no longer valid.", - *error_str, version)); - } - break; - - case State::Bootstrapping: + switch (auto old_state = state_from_storage(obj.get(m_sub_set_state))) { + case State::Complete: case State::AwaitingMark: - REALM_ASSERT(old_state != State::Complete); - REALM_ASSERT(old_state != State::Error); + // Once bootstrapping has completed it remains complete even if the + // server decides to send us more bootstrap messages + return; + + case State::Pending: + obj.set(m_sub_set_state, state_to_storage(State::Bootstrapping)); break; + case State::Error: { + auto error = obj.get(m_sub_set_error_str); + throw RuntimeError(ErrorCodes::SyncProtocolInvariantFailed, + util::format("Received bootstrap for query version %1 after receiving the error '%2'", + query_version, error)); + } + + default: + // Any other state is an internal bug of some sort + REALM_ASSERT_EX(false, old_state); + static_cast(old_state); + } +} + +void SubscriptionStore::do_complete_bootstrap(const Transaction& tr, int64_t query_version, State new_state) +{ + auto sub_sets = tr.get_table(m_sub_set_table); + REALM_ASSERT(!sub_sets->is_empty()); + Obj obj = sub_sets->get_object_with_primary_key(query_version); + // The sub set object being deleted while we're in the middle of applying + // a bootstrap would be an internal bug + REALM_ASSERT(obj); + + switch (auto old_state = state_from_storage(obj.get(m_sub_set_state))) { case State::Complete: - supercede_prior_to(tr, version); + case State::AwaitingMark: + // We were applying a bootstrap for a subscription which had already + // completed applying a bootstrap, which means something like a + // permission change occurred server-side which triggered a rebootstrap. + return; + + case State::Bootstrapping: + obj.set(m_sub_set_state, state_to_storage(new_state)); break; - case State::Uncommitted: - case State::Superseded: - case State::Pending: - REALM_TERMINATE("Illegal new state for subscription set"); + default: + // Any other state is an internal bug of some sort + REALM_ASSERT_EX(false, old_state); + static_cast(old_state); } - obj.set(m_sub_set_state, state_to_storage(new_state)); - obj.set(m_sub_set_error_str, error_str ? StringData(*error_str) : StringData()); + // Supersede all older subscription sets + if (new_state == State::AwaitingMark) { + sub_sets->where().less(m_sub_set_version_num, query_version).remove(); + } +} + +void SubscriptionStore::complete_bootstrap(const Transaction& tr, int64_t query_version) +{ + do_complete_bootstrap(tr, query_version, State::AwaitingMark); +} + +void SubscriptionStore::cancel_bootstrap(const Transaction& tr, int64_t query_version) +{ + do_complete_bootstrap(tr, query_version, State::Pending); +} +void SubscriptionStore::set_error(int64_t query_version, std::string_view error_str) +{ + auto tr = m_db->start_write(); + auto sub_sets = tr->get_table(m_sub_set_table); + auto obj = sub_sets->get_object_with_primary_key(query_version); + if (!obj) { + // This can happen either due to a bug in the sync client or due to the + // server sending us an error message for an invalid query version. We + // assume it is the latter here. + throw RuntimeError(ErrorCodes::SyncProtocolInvariantFailed, + util::format("Invalid state update for nonexistent query version %1", query_version)); + } + + auto old_state = state_from_storage(obj.get(m_sub_set_state)); + if (old_state == State::Complete) { + throw RuntimeError(ErrorCodes::SyncProtocolInvariantFailed, + util::format("Received error '%1' for already-completed query version %2. This " + "may be due to a queryable field being removed in the server-side " + "configuration making the previous subscription set no longer valid.", + error_str, query_version)); + } + + obj.set(m_sub_set_state, state_to_storage(State::Error)); + obj.set(m_sub_set_error_str, error_str); tr->commit(); +} - process_notifications(new_state, version, error_str.value_or(std::string_view{})); +void SubscriptionStore::download_complete() +{ + auto tr = m_db->start_read(); + auto obj = get_active(*tr); + if (state_from_storage(obj.get(m_sub_set_state)) != State::AwaitingMark) + return; + + // Although subscription sets can be created from any thread or process, + // they're only *modified* on the sync client thread, so we don't have to + // recheck that things have changed after the promote to write + tr->promote_to_write(); + obj.set(m_sub_set_state, state_to_storage(State::Complete)); + tr->commit(); } SubscriptionSet SubscriptionStore::get_by_version(int64_t version_id) @@ -875,9 +958,8 @@ SubscriptionSet SubscriptionStore::get_by_version(int64_t version_id) if (auto obj = sub_sets->get_object_with_primary_key(version_id)) { return SubscriptionSet(weak_from_this(), *tr, obj); } - - util::CheckedLockGuard lk(m_pending_notifications_mutex); - if (version_id < m_min_outstanding_version) { + REALM_ASSERT(!sub_sets->is_empty()); + if (version_id < sub_sets->min(m_sub_set_version_num)->get_int()) { return SubscriptionSet(weak_from_this(), version_id, SubscriptionSet::SupersededTag{}); } throw KeyNotFound(util::format("Subscription set with version %1 not found", version_id)); @@ -912,14 +994,6 @@ SubscriptionStore::TableSet SubscriptionStore::get_tables_for_latest(const Trans return ret; } -void SubscriptionStore::supercede_prior_to(TransactionRef tr, int64_t version_id) const -{ - auto sub_sets = tr->get_table(m_sub_set_table); - Query remove_query(sub_sets); - remove_query.less(sub_sets->get_primary_key_column(), version_id); - remove_query.remove(); -} - MutableSubscriptionSet SubscriptionStore::make_mutable_copy(const SubscriptionSet& set) { auto new_tr = m_db->start_write(); diff --git a/src/realm/sync/subscriptions.hpp b/src/realm/sync/subscriptions.hpp index 8b735d53ac4..a4f8ae4ed1c 100644 --- a/src/realm/sync/subscriptions.hpp +++ b/src/realm/sync/subscriptions.hpp @@ -339,17 +339,51 @@ class SubscriptionStore : public std::enable_shared_from_this util::Optional get_next_pending_version(int64_t last_query_version) const; std::vector get_pending_subscriptions() REQUIRES(!m_pending_notifications_mutex); - // Update the state of an existing subscription set. `error_str` must be set - // if the new state is Error, and must not be set otherwise. Many state - // transitions are not legal; see the state diagram on SubscriptionSet. + // Mark query_version as having received an error from the server. Will + // throw an exception if the version is not in a state where an error is + // expected (i.e. if it's already completed or superseded). // - // If set to State::Complete, this will erase all subscription sets with a - // version less than the given one. + // This should only be called internally within the sync client. + void set_error(int64_t query_version, std::string_view error_str); + // Mark query_version as having begun bootstrapping. This should be called + // inside the write transaction used to store the first set of changesets. + // Has no effect if the version is already complete. Throws if the version + // is superseded or errored. + // + // This should only be called internally within the sync client. + void begin_bootstrap(const Transaction&, int64_t query_version); + // Mark query_version as having completed bootstrapping. This should be + // called inside the write transaction which removes the final pending changeset. + // Has no effect if the version is already complete. Throws if the version + // is superseded or errored. // // This should only be called internally within the sync client. - void update_state(int64_t version_id, SubscriptionSet::State state, - std::optional error_str = util::none) - REQUIRES(!m_pending_notifications_mutex); + void complete_bootstrap(const Transaction&, int64_t query_version); + // Roll query_version back to the Pending state if it is currently Bootstrapping. + // Has no effect if the bootstrap in progress is not the first boostrap for + // this subscription set. + // + // This should only be called internally within the sync client. + void cancel_bootstrap(const Transaction&, int64_t query_version); + // Report that a download has completed, meaning that the active subscription + // set should advance to the Completed state if it is currently in the + // AwaitingMark state. Has no effect if it is in any other state. + // + // This should only be called internally within the sync client. + void download_complete(); + + // If there are any notifications registered, check if they have been completed + // and fulfill them if so. + void report_progress() REQUIRES(!m_pending_notifications_mutex); + void report_progress(TransactionRef& tr) REQUIRES(!m_pending_notifications_mutex); + + // Get the query version which we most recently received a DOWNLOAD message + // for (which may be distinct from both the latest and active versions). + int64_t get_downloading_query_version(Transaction& rt) const; + + // Mark the currently active subscription set as being complete without going + // through the normal bootstrapping flow. Used for client resets where we + // copy the data for the subscription over from the fresh Realm. int64_t mark_active_as_complete(Transaction& wt) REQUIRES(!m_pending_notifications_mutex); // Notify all subscription state change notification handlers on this subscription store with the @@ -385,10 +419,6 @@ class SubscriptionStore : public std::enable_shared_from_this SubscriptionSet::State notify_when; }; - void process_notifications(State new_state, int64_t version, std::string_view error_str) - REQUIRES(!m_pending_notifications_mutex); - void supercede_prior_to(TransactionRef tr, int64_t version_id) const; - Obj get_active(const Transaction& tr); SubscriptionSet get_refreshed(ObjKey, int64_t flx_version, std::optional version = util::none); MutableSubscriptionSet make_mutable_copy(const SubscriptionSet& set); @@ -397,6 +427,7 @@ class SubscriptionStore : public std::enable_shared_from_this void initialize_subscriptions_table(TransactionRef&& tr); // Clear the table and reinitialize it. void clear(Transaction& wt); + void do_complete_bootstrap(const Transaction&, int64_t query_version, SubscriptionSet::State new_state); friend class MutableSubscriptionSet; friend class Subscription; @@ -418,7 +449,6 @@ class SubscriptionStore : public std::enable_shared_from_this ColKey m_sub_set_subscriptions; util::CheckedMutex m_pending_notifications_mutex; - int64_t m_min_outstanding_version GUARDED_BY(m_pending_notifications_mutex) = 0; std::list m_pending_notifications GUARDED_BY(m_pending_notifications_mutex); }; diff --git a/test/object-store/sync/flx_sync.cpp b/test/object-store/sync/flx_sync.cpp index b271b32b577..9f648c44d7a 100644 --- a/test/object-store/sync/flx_sync.cpp +++ b/test/object-store/sync/flx_sync.cpp @@ -2793,6 +2793,13 @@ TEST_CASE("flx: bootstrap batching prevents orphan documents", "[sync][flx][boot REQUIRE(latest_subs.at(0).object_class_name == "TopLevel"); }; + auto peek_pending_state = [](const DBRef& db) { + auto logger = util::Logger::get_default_logger(); + sync::PendingBootstrapStore bootstrap_store(db, *logger, nullptr); + REQUIRE(bootstrap_store.has_pending()); + return bootstrap_store.peek_pending(*db->start_read(), 1024 * 1024 * 16); + }; + auto mutate_realm = [&] { harness.load_initial_data([&](SharedRealm realm) { auto table = realm->read_group().get_table("class_TopLevel"); @@ -2847,10 +2854,7 @@ TEST_CASE("flx: bootstrap batching prevents orphan documents", "[sync][flx][boot DBOptions options; options.encryption_key = test_util::crypt_key(); auto realm = DB::create(sync::make_client_replication(), interrupted_realm_config.path, options); - auto logger = util::Logger::get_default_logger(); - sync::PendingBootstrapStore bootstrap_store(realm, *logger); - REQUIRE(bootstrap_store.has_pending()); - auto pending_batch = bootstrap_store.peek_pending(1024 * 1024 * 16); + auto pending_batch = peek_pending_state(realm); REQUIRE(pending_batch.query_version == 1); REQUIRE(pending_batch.progress); @@ -2940,10 +2944,7 @@ TEST_CASE("flx: bootstrap batching prevents orphan documents", "[sync][flx][boot DBOptions options; options.encryption_key = test_util::crypt_key(); auto realm = DB::create(sync::make_client_replication(), interrupted_realm_config.path, options); - util::StderrLogger logger; - sync::PendingBootstrapStore bootstrap_store(realm, logger); - REQUIRE(bootstrap_store.has_pending()); - auto pending_batch = bootstrap_store.peek_pending(1024 * 1024 * 16); + auto pending_batch = peek_pending_state(realm); REQUIRE(pending_batch.query_version == 1); REQUIRE(pending_batch.progress); @@ -3009,10 +3010,7 @@ TEST_CASE("flx: bootstrap batching prevents orphan documents", "[sync][flx][boot DBOptions options; options.encryption_key = test_util::crypt_key(); auto realm = DB::create(sync::make_client_replication(), interrupted_realm_config.path, options); - auto logger = util::Logger::get_default_logger(); - sync::PendingBootstrapStore bootstrap_store(realm, *logger); - REQUIRE(bootstrap_store.has_pending()); - auto pending_batch = bootstrap_store.peek_pending(1024 * 1024 * 16); + auto pending_batch = peek_pending_state(realm); REQUIRE(pending_batch.query_version == 1); REQUIRE(!pending_batch.progress); REQUIRE(pending_batch.remaining_changesets == 0); @@ -3086,10 +3084,7 @@ TEST_CASE("flx: bootstrap batching prevents orphan documents", "[sync][flx][boot DBOptions options; options.encryption_key = test_util::crypt_key(); auto realm = DB::create(sync::make_client_replication(), interrupted_realm_config.path, options); - auto logger = util::Logger::get_default_logger(); - sync::PendingBootstrapStore bootstrap_store(realm, *logger); - REQUIRE(bootstrap_store.has_pending()); - auto pending_batch = bootstrap_store.peek_pending(1024 * 1024 * 16); + auto pending_batch = peek_pending_state(realm); REQUIRE(pending_batch.query_version == 1); REQUIRE(static_cast(pending_batch.progress)); REQUIRE(pending_batch.remaining_changesets == 0); diff --git a/test/object-store/util/sync/sync_test_utils.cpp b/test/object-store/util/sync/sync_test_utils.cpp index 412caba3a5f..80191b355b1 100644 --- a/test/object-store/util/sync/sync_test_utils.cpp +++ b/test/object-store/util/sync/sync_test_utils.cpp @@ -436,7 +436,7 @@ struct FakeLocalClientReset : public TestClientReset { {ErrorCodes::SyncClientResetRequired, "Bad client file ident"}}; using _impl::client_reset::perform_client_reset_diff; - perform_client_reset_diff(*local_db, reset_config, *logger, nullptr, [](int64_t) {}); + perform_client_reset_diff(*local_db, reset_config, *logger, nullptr); remote_realm->close(); if (m_on_post_reset) { diff --git a/test/test_client_reset.cpp b/test/test_client_reset.cpp index ca67da47876..d99c365c028 100644 --- a/test/test_client_reset.cpp +++ b/test/test_client_reset.cpp @@ -916,8 +916,8 @@ void expect_reset(unit_test::TestContext& test_context, DBRef& target, DBRef& fr sync::ClientReset cr_config{mode, fresh, error, action}; - bool did_reset = _impl::client_reset::perform_client_reset(*test_context.logger, *target, std::move(cr_config), - sub_store, [](int64_t) {}); + bool did_reset = + _impl::client_reset::perform_client_reset(*test_context.logger, *target, std::move(cr_config), sub_store); CHECK(did_reset); // Should have closed and deleted the fresh realm @@ -1121,8 +1121,8 @@ TEST(ClientReset_UninitializedFile) // Should not perform a client reset because the target file has never been // written to - bool did_reset = _impl::client_reset::perform_client_reset(*test_context.logger, *db_empty, std::move(cr_config), - nullptr, [](int64_t) {}); + bool did_reset = + _impl::client_reset::perform_client_reset(*test_context.logger, *db_empty, std::move(cr_config), nullptr); CHECK_NOT(did_reset); auto rd_tr = db_empty->start_frozen(); CHECK_NOT(PendingResetStore::has_pending_reset(rd_tr)); @@ -1293,9 +1293,9 @@ TEST(ClientReset_Recover_RecoveryDisabled) {ErrorCodes::SyncClientResetRequired, "Bad client file identifier (IDENT)"}, sync::ProtocolErrorInfo::Action::ClientResetNoRecovery}; - CHECK_THROW((_impl::client_reset::perform_client_reset(*test_context.logger, *dbs.first, std::move(cr_config), - nullptr, [](int64_t) {})), - sync::ClientResetFailed); + CHECK_THROW( + _impl::client_reset::perform_client_reset(*test_context.logger, *dbs.first, std::move(cr_config), nullptr), + sync::ClientResetFailed); auto rd_tr = dbs.first->start_frozen(); CHECK_NOT(PendingResetStore::has_pending_reset(rd_tr)); } @@ -1542,15 +1542,15 @@ TEST(ClientReset_Recover_UploadableBytes) uint_fast64_t unused, pre_reset_uploadable_bytes; DownloadableProgress unused_progress; version_type unused_version; - history.get_upload_download_state(*db, unused, unused_progress, unused, pre_reset_uploadable_bytes, unused, - unused_version); + history.get_upload_download_state(*db->start_read(), db->get_alloc(), unused, unused_progress, unused, + pre_reset_uploadable_bytes, unused, unused_version); CHECK_GREATER(pre_reset_uploadable_bytes, 0); expect_reset(test_context, db, db_fresh, ClientResyncMode::Recover, nullptr); uint_fast64_t post_reset_uploadable_bytes; - history.get_upload_download_state(*db, unused, unused_progress, unused, post_reset_uploadable_bytes, unused, - unused_version); + history.get_upload_download_state(*db->start_read(), db->get_alloc(), unused, unused_progress, unused, + post_reset_uploadable_bytes, unused, unused_version); CHECK_GREATER(post_reset_uploadable_bytes, 0); CHECK_GREATER(pre_reset_uploadable_bytes, post_reset_uploadable_bytes); } diff --git a/test/test_sync_pending_bootstraps.cpp b/test/test_sync_pending_bootstraps.cpp index 25ef5b28cd0..25a49931dd5 100644 --- a/test/test_sync_pending_bootstraps.cpp +++ b/test/test_sync_pending_bootstraps.cpp @@ -1,6 +1,7 @@ #include "realm/db.hpp" #include "realm/sync/noinst/client_history_impl.hpp" #include "realm/sync/noinst/pending_bootstrap_store.hpp" +#include "realm/sync/subscriptions.hpp" #include "test.hpp" #include "util/test_path.hpp" @@ -14,9 +15,13 @@ TEST(Sync_PendingBootstrapStoreBatching) progress.download = {5, 5}; progress.latest_server_version = {5, 123456789}; progress.upload = {5, 5}; + { auto db = DB::create(make_client_replication(), db_path); - sync::PendingBootstrapStore store(db, *test_context.logger); + auto sub_store = sync::SubscriptionStore::create(db); + sync::PendingBootstrapStore store(db, *test_context.logger, sub_store); + int64_t query_version = sub_store->get_latest().make_mutable_copy().commit().version(); + CHECK_EQUAL(sub_store->get_by_version(query_version).state(), SubscriptionSet::State::Pending); CHECK(!store.has_pending()); std::vector changesets; @@ -32,11 +37,10 @@ TEST(Sync_PendingBootstrapStoreBatching) changesets.emplace_back(3, 8, BinaryData(changeset_data.back()), 3, 1); changesets.back().original_changeset_size = 1024; - bool created_new_batch = false; - store.add_batch(1, util::none, 0, changesets, &created_new_batch); + store.add_batch(query_version, util::none, 0, changesets); - CHECK(created_new_batch); CHECK(store.has_pending()); + CHECK_EQUAL(sub_store->get_by_version(query_version).state(), SubscriptionSet::State::Bootstrapping); changesets.clear(); changeset_data.clear(); @@ -47,13 +51,14 @@ TEST(Sync_PendingBootstrapStoreBatching) changesets.emplace_back(5, 10, BinaryData(changeset_data.back()), 5, 3); changesets.back().original_changeset_size = 1024; - store.add_batch(1, progress, 1, changesets, &created_new_batch); - CHECK(!created_new_batch); + store.add_batch(query_version, progress, 1, changesets); + CHECK_EQUAL(sub_store->get_by_version(query_version).state(), SubscriptionSet::State::Bootstrapping); } { auto db = DB::create(make_client_replication(), db_path); - sync::PendingBootstrapStore store(db, *test_context.logger); + auto sub_store = sync::SubscriptionStore::create(db); + sync::PendingBootstrapStore store(db, *test_context.logger, sub_store); CHECK(store.has_pending()); auto stats = store.pending_stats(); @@ -61,7 +66,7 @@ TEST(Sync_PendingBootstrapStoreBatching) CHECK_EQUAL(stats.pending_changesets, 5); CHECK_EQUAL(stats.query_version, 1); - auto pending_batch = store.peek_pending((1024 * 3) - 1); + auto pending_batch = store.peek_pending(*db->start_read(), (1024 * 3) - 1); CHECK_EQUAL(pending_batch.changesets.size(), 3); CHECK_EQUAL(pending_batch.remaining_changesets, 2); CHECK_EQUAL(pending_batch.query_version, 1); @@ -87,11 +92,11 @@ TEST(Sync_PendingBootstrapStoreBatching) validate_changeset(2, 3, 8, 'c', 3, 1); auto tr = db->start_write(); - store.pop_front_pending(tr, pending_batch.changesets.size()); + store.pop_front_pending(*tr, pending_batch.changesets.size()); tr->commit(); CHECK(store.has_pending()); - pending_batch = store.peek_pending(1024 * 2); + pending_batch = store.peek_pending(*db->start_read(), 1024 * 2); CHECK_EQUAL(pending_batch.changesets.size(), 2); CHECK_EQUAL(pending_batch.remaining_changesets, 0); CHECK_EQUAL(pending_batch.query_version, 1); @@ -100,9 +105,10 @@ TEST(Sync_PendingBootstrapStoreBatching) validate_changeset(1, 5, 10, 'e', 5, 3); tr = db->start_write(); - store.pop_front_pending(tr, pending_batch.changesets.size()); + store.pop_front_pending(*tr, pending_batch.changesets.size()); tr->commit(); CHECK(!store.has_pending()); + CHECK_EQUAL(sub_store->get_latest().state(), SubscriptionSet::State::AwaitingMark); } } @@ -114,7 +120,8 @@ TEST(Sync_PendingBootstrapStoreClear) progress.latest_server_version = {5, 123456789}; progress.upload = {5, 5}; auto db = DB::create(make_client_replication(), db_path); - sync::PendingBootstrapStore store(db, *test_context.logger); + auto sub_store = sync::SubscriptionStore::create(db); + sync::PendingBootstrapStore store(db, *test_context.logger, sub_store); CHECK(!store.has_pending()); std::vector changesets; @@ -127,25 +134,23 @@ TEST(Sync_PendingBootstrapStoreClear) changesets.emplace_back(2, 7, BinaryData(changeset_data.back()), 2, 1); changesets.back().original_changeset_size = 1024; - bool created_new_batch = false; - store.add_batch(2, progress, 1, changesets, &created_new_batch); - CHECK(created_new_batch); + int64_t query_version = sub_store->get_latest().make_mutable_copy().commit().version(); + store.add_batch(query_version, progress, 1, changesets); CHECK(store.has_pending()); + CHECK_EQUAL(SubscriptionSet::State::Bootstrapping, sub_store->get_latest().state()); - auto pending_batch = store.peek_pending(1025); + auto pending_batch = store.peek_pending(*db->start_read(), 1025); CHECK_EQUAL(pending_batch.remaining_changesets, 0); - CHECK_EQUAL(pending_batch.query_version, 2); + CHECK_EQUAL(pending_batch.query_version, query_version); CHECK(pending_batch.progress); CHECK_EQUAL(pending_batch.changesets.size(), 2); - store.clear(); + auto tr = db->start_write(); + store.clear(*tr, query_version); + tr->commit(); + CHECK_EQUAL(SubscriptionSet::State::Pending, sub_store->get_latest().state()); CHECK_NOT(store.has_pending()); - pending_batch = store.peek_pending(1024); - CHECK_EQUAL(pending_batch.changesets.size(), 0); - CHECK_EQUAL(pending_batch.query_version, 0); - CHECK_EQUAL(pending_batch.remaining_changesets, 0); - CHECK_NOT(pending_batch.progress); } } // namespace realm::sync diff --git a/test/test_sync_subscriptions.cpp b/test/test_sync_subscriptions.cpp index 0732d9ea1e5..99d9d831cdc 100644 --- a/test/test_sync_subscriptions.cpp +++ b/test/test_sync_subscriptions.cpp @@ -165,7 +165,13 @@ TEST(Sync_SubscriptionStoreStateUpdates) } // Mark the version 2 set as complete. - store->update_state(2, SubscriptionSet::State::Complete); + { + auto tr = fixture.db->start_write(); + store->begin_bootstrap(*tr, 2); + store->complete_bootstrap(*tr, 2); + tr->commit(); + store->download_complete(); + } // There should now only be one set, version 2, that is complete. Trying to // get version 1 should report that it was superseded @@ -179,6 +185,18 @@ TEST(Sync_SubscriptionStoreStateUpdates) CHECK_EQUAL(store->get_by_version(1).state(), SubscriptionSet::State::Superseded); } + // Trying to begin a bootstrap on the superseded version should report that + // the server did something invalid + { + auto tr = fixture.db->start_write(); + CHECK_THROW_ERROR(store->begin_bootstrap(*tr, 1), SyncProtocolInvariantFailed, "nonexistent query version 1"); + CHECK_THROW_ERROR(store->begin_bootstrap(*tr, 4), SyncProtocolInvariantFailed, "nonexistent query version 4"); + } + CHECK_THROW_ERROR(store->set_error(2, "err1"), SyncProtocolInvariantFailed, + "Received error 'err1' for already-completed query version 2"); + CHECK_THROW_ERROR(store->set_error(4, "err2"), SyncProtocolInvariantFailed, + "Invalid state update for nonexistent query version 4"); + { auto set = store->get_latest().make_mutable_copy(); CHECK_EQUAL(set.size(), 1); @@ -277,111 +295,239 @@ TEST(Sync_SubscriptionStoreAssignAnonAndNamed) } } -TEST(Sync_SubscriptionStoreNotifications) +TEST(Sync_SubscriptionStoreNotifications_Basics) { SHARED_GROUP_TEST_PATH(sub_store_path); SubscriptionStoreFixture fixture(sub_store_path); auto store = SubscriptionStore::create(fixture.db); - std::vector> notification_futures; auto sub_set = store->get_latest().make_mutable_copy(); - notification_futures.push_back(sub_set.get_state_change_notification(SubscriptionSet::State::Pending)); - sub_set = sub_set.commit().make_mutable_copy(); - notification_futures.push_back(sub_set.get_state_change_notification(SubscriptionSet::State::Bootstrapping)); + auto v1_pending = sub_set.get_state_change_notification(SubscriptionSet::State::Pending); + auto v1_bootstrapping = sub_set.get_state_change_notification(SubscriptionSet::State::Bootstrapping); + auto v1_awaiting_mark = sub_set.get_state_change_notification(SubscriptionSet::State::AwaitingMark); + auto v1_complete = sub_set.get_state_change_notification(SubscriptionSet::State::Complete); + auto v1_error = sub_set.get_state_change_notification(SubscriptionSet::State::Error); + auto v1_superseded = sub_set.get_state_change_notification(SubscriptionSet::State::Superseded); sub_set = sub_set.commit().make_mutable_copy(); - notification_futures.push_back(sub_set.get_state_change_notification(SubscriptionSet::State::Bootstrapping)); - sub_set = sub_set.commit().make_mutable_copy(); - notification_futures.push_back(sub_set.get_state_change_notification(SubscriptionSet::State::Complete)); - sub_set = sub_set.commit().make_mutable_copy(); - notification_futures.push_back(sub_set.get_state_change_notification(SubscriptionSet::State::Complete)); - sub_set = sub_set.commit().make_mutable_copy(); - notification_futures.push_back(sub_set.get_state_change_notification(SubscriptionSet::State::Complete)); - sub_set.commit(); + auto v2_pending = sub_set.get_state_change_notification(SubscriptionSet::State::Pending); + auto v2_bootstrapping = sub_set.get_state_change_notification(SubscriptionSet::State::Bootstrapping); + auto v2_complete = sub_set.get_state_change_notification(SubscriptionSet::State::Complete); // This should complete immediately because transitioning to the Pending state happens when you commit. - CHECK_EQUAL(notification_futures[0].get(), SubscriptionSet::State::Pending); + CHECK(v1_pending.is_ready()); + CHECK_EQUAL(v1_pending.get(), SubscriptionSet::State::Pending); // This should also return immediately with a ready future because the subset is in the correct state. CHECK_EQUAL(store->get_by_version(1).get_state_change_notification(SubscriptionSet::State::Pending).get(), SubscriptionSet::State::Pending); - // This should not be ready yet because we haven't updated its state. - CHECK_NOT(notification_futures[1].is_ready()); + // v2 hasn't been committed yet + CHECK_NOT(v2_pending.is_ready()); + sub_set.commit(); + CHECK(v2_pending.is_ready()); + CHECK_EQUAL(v2_pending.get(), SubscriptionSet::State::Pending); - store->update_state(2, SubscriptionSet::State::Bootstrapping); + // v1 bootstrapping hasn't begun yet + CHECK_NOT(v1_bootstrapping.is_ready()); - // Now we should be able to get the future result because we updated the state. - CHECK_EQUAL(notification_futures[1].get(), SubscriptionSet::State::Bootstrapping); + { + auto tr = fixture.db->start_write(); + store->begin_bootstrap(*tr, 1); + tr->commit_and_continue_as_read(); + store->report_progress(tr); + } - // This should not be ready yet because we haven't updated its state. - CHECK_NOT(notification_futures[2].is_ready()); + // v1 is now in the bootstrapping state but v2 isn't + CHECK(v1_bootstrapping.is_ready()); + CHECK_EQUAL(v1_bootstrapping.get(), SubscriptionSet::State::Bootstrapping); + CHECK_NOT(v2_bootstrapping.is_ready()); - // Update the state to complete - skipping the bootstrapping phase entirely. - store->update_state(3, SubscriptionSet::State::Complete); + // Requesting a new notification for the bootstrapping state completes immediately + CHECK_EQUAL(store->get_by_version(1).get_state_change_notification(SubscriptionSet::State::Bootstrapping).get(), + SubscriptionSet::State::Bootstrapping); - // Now we should be able to get the future result because we updated the state and skipped the bootstrapping - // phase. - CHECK_EQUAL(notification_futures[2].get(), SubscriptionSet::State::Complete); + // Completing the bootstrap advances us to the awaiting mark state + { + auto tr = fixture.db->start_write(); + store->complete_bootstrap(*tr, 1); + tr->commit_and_continue_as_read(); + store->report_progress(tr); + } - // Update one of the subscription sets to have an error state along with an error message. - std::string error_msg = "foo bar bizz buzz. i'm an error string for this test!"; - CHECK_NOT(notification_futures[3].is_ready()); - auto old_sub_set = store->get_by_version(4); - store->update_state(4, SubscriptionSet::State::Error, std::string_view(error_msg)); + CHECK(v1_awaiting_mark.is_ready()); + CHECK_EQUAL(v1_awaiting_mark.get(), SubscriptionSet::State::AwaitingMark); + CHECK_NOT(v1_complete.is_ready()); + CHECK_NOT(v2_bootstrapping.is_ready()); - CHECK_EQUAL(old_sub_set.state(), SubscriptionSet::State::Pending); - CHECK(old_sub_set.error_str().is_null()); - old_sub_set.refresh(); - CHECK_EQUAL(old_sub_set.state(), SubscriptionSet::State::Error); - CHECK_EQUAL(old_sub_set.error_str(), error_msg); + // Receiving download completion after the bootstrap application adavances us to Complete + store->download_complete(); + store->report_progress(); - // This should return a non-OK Status with the error message we set on the subscription set. - auto err_res = notification_futures[3].get_no_throw(); - CHECK_NOT(err_res.is_ok()); - CHECK_EQUAL(err_res.get_status().code(), ErrorCodes::SubscriptionFailed); - CHECK_EQUAL(err_res.get_status().reason(), error_msg); + CHECK(v1_complete.is_ready()); + CHECK_EQUAL(v1_complete.get(), SubscriptionSet::State::Complete); + CHECK_NOT(v1_error.is_ready()); + CHECK_NOT(v1_superseded.is_ready()); + CHECK_NOT(v2_bootstrapping.is_ready()); - // Getting a ready future on a set that's already in the error state should also return immediately with an error. - err_res = store->get_by_version(4).get_state_change_notification(SubscriptionSet::State::Complete).get_no_throw(); - CHECK_NOT(err_res.is_ok()); - CHECK_EQUAL(err_res.get_status().code(), ErrorCodes::SubscriptionFailed); - CHECK_EQUAL(err_res.get_status().reason(), error_msg); + // Completing v2's bootstrap supersedes v1, completing both that and the error notification + { + auto tr = fixture.db->start_write(); + store->begin_bootstrap(*tr, 2); + store->complete_bootstrap(*tr, 2); + tr->commit_and_continue_as_read(); + store->report_progress(tr); + } - // When a higher version supercedes an older one - i.e. you send query sets for versions 5/6 and the server starts - // bootstrapping version 6 - we expect the notifications for both versions to be fulfilled when the latest one - // completes bootstrapping. - CHECK_NOT(notification_futures[4].is_ready()); - CHECK_NOT(notification_futures[5].is_ready()); + CHECK(v1_error.is_ready()); + CHECK(v1_superseded.is_ready()); + CHECK(v2_bootstrapping.is_ready()); + CHECK_EQUAL(v1_error.get(), SubscriptionSet::State::Superseded); + CHECK_EQUAL(v1_superseded.get(), SubscriptionSet::State::Superseded); + CHECK_EQUAL(v2_bootstrapping.get(), SubscriptionSet::State::AwaitingMark); + + // v2 is not actually complete until we get download completion + CHECK_NOT(v2_complete.is_ready()); + store->download_complete(); + store->report_progress(); + CHECK(v2_complete.is_ready()); + CHECK_EQUAL(v2_complete.get(), SubscriptionSet::State::Complete); +} - old_sub_set = store->get_by_version(5); +TEST(Sync_SubscriptionStoreNotifications_Errors) +{ + SHARED_GROUP_TEST_PATH(sub_store_path); + SubscriptionStoreFixture fixture(sub_store_path); + auto store = SubscriptionStore::create(fixture.db); + + const auto states = {SubscriptionSet::State::Bootstrapping, SubscriptionSet::State::AwaitingMark, + SubscriptionSet::State::Complete, SubscriptionSet::State::Error}; - store->update_state(6, SubscriptionSet::State::Complete); + auto sub_set = store->get_latest().make_mutable_copy(); + std::vector> v1_futures; + for (auto state : states) + v1_futures.push_back(sub_set.get_state_change_notification(state)); + auto v1_superseded = sub_set.get_state_change_notification(SubscriptionSet::State::Superseded); + auto v1_sub_set = sub_set.commit(); + sub_set = v1_sub_set.make_mutable_copy(); + std::vector> v2_futures; + for (auto state : states) + v2_futures.push_back(sub_set.get_state_change_notification(state)); + auto v2_sub_set = sub_set.commit(); + sub_set = v2_sub_set.make_mutable_copy(); + std::vector> v3_futures; + for (auto state : states) + v3_futures.push_back(sub_set.get_state_change_notification(state)); + auto v3_sub_set = sub_set.commit(); + + // Error state completes notifications for all states except superseded + store->set_error(1, "v1 error message"); + store->report_progress(); + for (auto& fut : v1_futures) { + CHECK(fut.is_ready()); + auto status = fut.get_no_throw(); + CHECK_NOT(status.is_ok()); + CHECK_EQUAL(status.get_status().code(), ErrorCodes::SubscriptionFailed); + CHECK_EQUAL(status.get_status().reason(), "v1 error message"); + } + CHECK_NOT(v1_superseded.is_ready()); + + // Sub set is not updated until refreshing + CHECK_EQUAL(v1_sub_set.state(), SubscriptionSet::State::Pending); + + v1_sub_set.refresh(); + CHECK_EQUAL(v1_sub_set.state(), SubscriptionSet::State::Error); + CHECK_EQUAL(v1_sub_set.error_str(), "v1 error message"); + + // Directly fetching while in an error state works too + v1_sub_set = store->get_by_version(1); + CHECK_EQUAL(v1_sub_set.state(), SubscriptionSet::State::Error); + CHECK_EQUAL(v1_sub_set.error_str(), "v1 error message"); + + // v1 failing does not update v2 or v3 + for (auto& fut : v2_futures) + CHECK_NOT(fut.is_ready()); + for (auto& fut : v3_futures) + CHECK_NOT(fut.is_ready()); + + // v3 failing does not update v2 + store->set_error(3, "v3 error message"); + store->report_progress(); + + CHECK_NOT(v1_superseded.is_ready()); + for (auto& fut : v2_futures) + CHECK_NOT(fut.is_ready()); + for (auto& fut : v3_futures) { + CHECK(fut.is_ready()); + auto status = fut.get_no_throw(); + CHECK_NOT(status.is_ok()); + CHECK_EQUAL(status.get_status().code(), ErrorCodes::SubscriptionFailed); + CHECK_EQUAL(status.get_status().reason(), "v3 error message"); + } - CHECK_EQUAL(notification_futures[4].get(), SubscriptionSet::State::Superseded); - CHECK_EQUAL(notification_futures[5].get(), SubscriptionSet::State::Complete); + // Trying to begin a bootstrap on the errored version should report that + // the server did something invalid + { + auto tr = fixture.db->start_write(); + CHECK_THROW_ERROR(store->begin_bootstrap(*tr, 3), SyncProtocolInvariantFailed, + "Received bootstrap for query version 3 after receiving the error 'v3 error message'"); + } +} - // Also check that new requests for the superseded sub set get filled immediately. - CHECK_EQUAL(old_sub_set.get_state_change_notification(SubscriptionSet::State::Complete).get(), - SubscriptionSet::State::Superseded); - old_sub_set.refresh(); - CHECK_EQUAL(old_sub_set.state(), SubscriptionSet::State::Superseded); +TEST(Sync_SubscriptionStoreNotifications_MultipleStores) +{ + SHARED_GROUP_TEST_PATH(sub_store_path); + SubscriptionStoreFixture fixture(sub_store_path); + auto store_1 = SubscriptionStore::create(fixture.db); + auto db_2 = DB::create(make_client_replication(), sub_store_path); + auto store_2 = SubscriptionStore::create(db_2); - // Check that asking for a state change that is less than the current state of the sub set gets filled - // immediately. - CHECK_EQUAL(sub_set.get_state_change_notification(SubscriptionSet::State::Bootstrapping).get(), - SubscriptionSet::State::Complete); + store_1->get_latest().make_mutable_copy().commit(); - // Check that if a subscription set gets updated to a new state and the SubscriptionSet returned by commit() is - // not explicitly refreshed (i.e. is reading from a snapshot from before the state change), that it can still - // return a ready future. - auto mut_set = store->get_latest().make_mutable_copy(); - auto waitable_set = mut_set.commit(); + auto sub_set = store_2->get_latest(); + auto future = sub_set.get_state_change_notification(SubscriptionSet::State::Pending); + CHECK(future.is_ready()); - store->update_state(waitable_set.version(), SubscriptionSet::State::Complete); + future = sub_set.get_state_change_notification(SubscriptionSet::State::Bootstrapping); + CHECK_NOT(future.is_ready()); + { + auto tr = fixture.db->start_write(); + store_1->begin_bootstrap(*tr, 1); + tr->commit(); + } + CHECK_NOT(future.is_ready()); + store_2->report_progress(); + CHECK(future.is_ready()); - auto fut = waitable_set.get_state_change_notification(SubscriptionSet::State::Complete); - CHECK(fut.is_ready()); - CHECK_EQUAL(std::move(fut).get(), SubscriptionSet::State::Complete); + future = sub_set.get_state_change_notification(SubscriptionSet::State::AwaitingMark); + CHECK_NOT(future.is_ready()); + { + auto tr = fixture.db->start_write(); + store_1->complete_bootstrap(*tr, 1); + tr->commit(); + } + CHECK_NOT(future.is_ready()); + store_2->report_progress(); + CHECK(future.is_ready()); + + future = sub_set.get_state_change_notification(SubscriptionSet::State::Complete); + CHECK_NOT(future.is_ready()); + store_1->download_complete(); + CHECK_NOT(future.is_ready()); + store_2->report_progress(); + CHECK(future.is_ready()); + + future = sub_set.get_state_change_notification(SubscriptionSet::State::Superseded); + CHECK_NOT(future.is_ready()); + store_1->get_active().make_mutable_copy().commit(); + { + auto tr = fixture.db->start_write(); + store_1->begin_bootstrap(*tr, 2); + store_1->complete_bootstrap(*tr, 2); + tr->commit(); + } + CHECK_NOT(future.is_ready()); + store_2->report_progress(); + CHECK(future.is_ready()); } TEST(Sync_SubscriptionStoreRefreshSubscriptionSetInvalid) @@ -455,8 +601,16 @@ TEST(Sync_SubscriptionStoreNextPendingVersion) sub_set = mut_sub_set.commit(); auto pending_set = sub_set.version(); - store->update_state(complete_set, SubscriptionSet::State::Complete); - store->update_state(bootstrapping_set, SubscriptionSet::State::Bootstrapping); + { + auto tr = fixture.db->start_write(); + store->begin_bootstrap(*tr, complete_set); + store->complete_bootstrap(*tr, complete_set); + tr->commit(); + store->download_complete(); + tr = fixture.db->start_write(); + store->begin_bootstrap(*tr, bootstrapping_set); + tr->commit(); + } auto pending_version = store->get_next_pending_version(0); CHECK(pending_version); diff --git a/test/util/unit_test.hpp b/test/util/unit_test.hpp index 9e542b386d5..d35749d1a95 100644 --- a/test/util/unit_test.hpp +++ b/test/util/unit_test.hpp @@ -163,6 +163,20 @@ } \ }()) +#define CHECK_THROW_ERROR(expr, ec, message) \ + ([&] { \ + try { \ + (expr); \ + test_context.throw_any_failed(__FILE__, __LINE__, #expr); \ + } \ + catch (const Exception& e) { \ + CHECK_EQUAL(e.code(), ErrorCodes::ec); \ + CHECK_STRING_CONTAINS(e.what(), message); \ + return true; \ + } \ + return false; \ + }()) + #define CHECK_NOTHROW(expr) \ ([&] { \ try { \ From f35c76e47bd1483711bbf3ffd3b6c1b638113552 Mon Sep 17 00:00:00 2001 From: Jonathan Reams Date: Wed, 31 Jul 2024 15:01:39 -0400 Subject: [PATCH 13/15] RCORE-1990 Get exact parity with Jenkins for UWP builders in evg (#7937) --- evergreen/config.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/evergreen/config.yml b/evergreen/config.yml index a1bb6ccc141..0fc2ec19351 100644 --- a/evergreen/config.yml +++ b/evergreen/config.yml @@ -2060,6 +2060,22 @@ buildvariants: cmake_bindir: "/cygdrive/c/Program Files/CMake/bin/" cmake_generator: "Visual Studio 16 2019" cmake_generator_platform: "Win32" + cmake_build_type: "Release" + extra_flags: -DCMAKE_SYSTEM_NAME=WindowsStore -DCMAKE_SYSTEM_VERSION=10.0 + max_jobs: $(($(grep -c proc /proc/cpuinfo) / 2)) + fetch_missing_dependencies: On + python3: "/cygdrive/c/python/python37/python.exe" + no_tests: On + tasks: + - name: compile_only + +- name: windows-x64-uwp + display_name: "Windows X64 (UWP)" + run_on: windows-vsCurrent-large + expansions: + cmake_bindir: "/cygdrive/c/Program Files/CMake/bin/" + cmake_generator: "Visual Studio 16 2019" + extra_flags: "-A x64" cmake_build_type: "Debug" extra_flags: -DCMAKE_SYSTEM_NAME=WindowsStore -DCMAKE_SYSTEM_VERSION=10.0 max_jobs: $(($(grep -c proc /proc/cpuinfo) / 2)) From 117c1cbf2b96eaa1bf51562e4f45d3649b3bdad2 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 21:02:15 +0200 Subject: [PATCH 14/15] Prepare for release 14.11.2 (#7939) Co-authored-by: nicola-cab <1497069+nicola-cab@users.noreply.github.com> --- CHANGELOG.md | 3 +-- Package.swift | 2 +- dependencies.yml | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 192c37e1a17..6ac4872eadb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,9 @@ -# NEXT RELEASE +# 14.11.2 Release notes ### Enhancements * Sync log statements now include the app services connection id in their prefix (e.g `Connection[1:] Session[1]: log message`) to make correlating sync activity to server logs easier during troubleshooting ((PR #7849)[https://github.com/realm/realm-core/pull/7849]). ### Fixed -* ([#????](https://github.com/realm/realm-core/issues/????), since v?.?.?) * Sync client may report duplicate compensating write errors ([#7708](https://github.com/realm/realm-core/issues/7708), since v14.8.0). ### Breaking changes diff --git a/Package.swift b/Package.swift index 13f36481e45..6bf7f751455 100644 --- a/Package.swift +++ b/Package.swift @@ -3,7 +3,7 @@ import PackageDescription import Foundation -let versionStr = "14.11.1" +let versionStr = "14.11.2" let versionPieces = versionStr.split(separator: "-") let versionCompontents = versionPieces[0].split(separator: ".") let versionExtra = versionPieces.count > 1 ? versionPieces[1] : "" diff --git a/dependencies.yml b/dependencies.yml index e9f49bfa898..45bf8cf0afc 100644 --- a/dependencies.yml +++ b/dependencies.yml @@ -1,5 +1,5 @@ PACKAGE_NAME: realm-core -VERSION: 14.11.1 +VERSION: 14.11.2 OPENSSL_VERSION: 3.2.0 ZLIB_VERSION: 1.2.13 # https://github.com/10gen/baas/commits From b10122e61472310997852e0b83d337a6d96feba6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 21:03:56 +0200 Subject: [PATCH 15/15] New changelog section to prepare for vNext (#7941) Co-authored-by: nicola-cab <1497069+nicola-cab@users.noreply.github.com> --- CHANGELOG.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6ac4872eadb..3403afcfa99 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,26 @@ +# NEXT RELEASE + +### Enhancements +* (PR [#????](https://github.com/realm/realm-core/pull/????)) +* None. + +### Fixed +* ([#????](https://github.com/realm/realm-core/issues/????), since v?.?.?) +* None. + +### Breaking changes +* None. + +### Compatibility +* Fileformat: Generates files with format v24. Reads and automatically upgrade from fileformat v10. If you want to upgrade from an earlier file format version you will have to use RealmCore v13.x.y or earlier. + +----------- + +### Internals +* None. + +---------------------------------------------- + # 14.11.2 Release notes ### Enhancements