diff --git a/Firestore/Example/Firestore.xcodeproj/project.pbxproj b/Firestore/Example/Firestore.xcodeproj/project.pbxproj index abf18be6369..37f065398bd 100644 --- a/Firestore/Example/Firestore.xcodeproj/project.pbxproj +++ b/Firestore/Example/Firestore.xcodeproj/project.pbxproj @@ -71,6 +71,7 @@ 07ADEF17BFBC07C0C2E306F6 /* FSTMockDatastore.mm in Sources */ = {isa = PBXBuildFile; fileRef = 5492E02D20213FFC00B64F25 /* FSTMockDatastore.mm */; }; 07B1E8C62772758BC82FEBEE /* field_mask_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 549CCA5320A36E1F00BCEB75 /* field_mask_test.cc */; }; 07F1F1FA00CE7B55E3476FD4 /* Validation_BloomFilterTest_MD5_50000_01_membership_test_result.json in Resources */ = {isa = PBXBuildFile; fileRef = C8FB22BCB9F454DA44BA80C8 /* Validation_BloomFilterTest_MD5_50000_01_membership_test_result.json */; }; + 0845C33F3018D8ABCD1C7B47 /* canonify_eq_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 51004EAF5EE01ADCE8FE3788 /* canonify_eq_test.cc */; }; 0869E4C03A4648B67A719349 /* Validation_BloomFilterTest_MD5_500_1_membership_test_result.json in Resources */ = {isa = PBXBuildFile; fileRef = 8AB49283E544497A9C5A0E59 /* Validation_BloomFilterTest_MD5_500_1_membership_test_result.json */; }; 086A8CEDD4C4D5C858498C2D /* settings_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = DD12BC1DB2480886D2FB0005 /* settings_test.cc */; }; 086E10B1B37666FB746D56BC /* FSTHelpers.mm in Sources */ = {isa = PBXBuildFile; fileRef = 5492E03A2021401F00B64F25 /* FSTHelpers.mm */; }; @@ -411,6 +412,7 @@ 37286D731E432CB873354357 /* remote_event_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 584AE2C37A55B408541A6FF3 /* remote_event_test.cc */; }; 37461AF1ACC2E64DF1709736 /* Validation_BloomFilterTest_MD5_1_01_bloom_filter_proto.json in Resources */ = {isa = PBXBuildFile; fileRef = 0D964D4936953635AC7E0834 /* Validation_BloomFilterTest_MD5_1_01_bloom_filter_proto.json */; }; 37664236439C338A73A984B9 /* debug_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = F6DBD8EDF0074DD0079ECCE6 /* debug_test.cc */; }; + 377EDDC526AD5BB77E0CEC5D /* canonify_eq_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 51004EAF5EE01ADCE8FE3788 /* canonify_eq_test.cc */; }; 3783E25DFF9E5C0896D34FEF /* index_spec_test.json in Resources */ = {isa = PBXBuildFile; fileRef = 8C7278B604B8799F074F4E8C /* index_spec_test.json */; }; 37C4BF11C8B2B8B54B5ED138 /* string_apple_benchmark.mm in Sources */ = {isa = PBXBuildFile; fileRef = 4C73C0CC6F62A90D8573F383 /* string_apple_benchmark.mm */; }; 37EC6C6EA9169BB99078CA96 /* reference_set_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 132E32997D781B896672D30A /* reference_set_test.cc */; }; @@ -1074,6 +1076,7 @@ 8778C1711059598070F86D3C /* leveldb_globals_cache_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = FC44D934D4A52C790659C8D6 /* leveldb_globals_cache_test.cc */; }; 87B5972F1C67CB8D53ADA024 /* object_value_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 214877F52A705012D6720CA0 /* object_value_test.cc */; }; 87B5AC3EBF0E83166B142FA4 /* string_apple_benchmark.mm in Sources */ = {isa = PBXBuildFile; fileRef = 4C73C0CC6F62A90D8573F383 /* string_apple_benchmark.mm */; }; + 87EC2B2C93CBF76A94BA2C31 /* canonify_eq_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 51004EAF5EE01ADCE8FE3788 /* canonify_eq_test.cc */; }; 881E55152AB34465412F8542 /* FSTAPIHelpers.mm in Sources */ = {isa = PBXBuildFile; fileRef = 5492E04E202154AA00B64F25 /* FSTAPIHelpers.mm */; }; 88929ED628DA8DD9592974ED /* task_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 899FC22684B0F7BEEAE13527 /* task_test.cc */; }; 8976F3D5515C4A784EC6627F /* arithmetic_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 76EED4ED84056B623D92FE20 /* arithmetic_test.cc */; }; @@ -1102,6 +1105,7 @@ 8E730A5C992370DCBDD833E9 /* unicode_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 09C56D14F17CA02A07C60847 /* unicode_test.cc */; }; 8E7CC4EAE25E06CDAB4001DF /* nested_properties_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8AC88AA2B929CFEC2656E37D /* nested_properties_test.cc */; }; 8ECDF2AFCF1BCA1A2CDAAD8A /* document_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = AB6B908320322E4D00CC290A /* document_test.cc */; }; + 8ED98C1CF17399FC0990DD4B /* canonify_eq_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 51004EAF5EE01ADCE8FE3788 /* canonify_eq_test.cc */; }; 8F2055702DB5EE8DA4BACD7C /* memory_document_overlay_cache_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 29D9C76922DAC6F710BC1EF4 /* memory_document_overlay_cache_test.cc */; }; 8F3AE423677A4C50F7E0E5C0 /* database_info_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = AB38D92E20235D22000A432D /* database_info_test.cc */; }; 8F4F40E9BC7ED588F67734D5 /* app_testing.mm in Sources */ = {isa = PBXBuildFile; fileRef = 5467FB07203E6A44009C9584 /* app_testing.mm */; }; @@ -1220,6 +1224,8 @@ A4ECA8335000CBDF94586C94 /* FSTDatastoreTests.mm in Sources */ = {isa = PBXBuildFile; fileRef = 5492E07E202154EC00B64F25 /* FSTDatastoreTests.mm */; }; A4F2B68E7EFADB0EB443CFF8 /* Pods_Firestore_Tests_iOS.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8294C2063C0096AE5E43F6DF /* Pods_Firestore_Tests_iOS.framework */; }; A5175CA2E677E13CC5F23D72 /* document_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = AB6B908320322E4D00CC290A /* document_test.cc */; }; + A5301AA55748A11801E3EE47 /* field_behavior.pb.cc in Sources */ = {isa = PBXBuildFile; fileRef = FAAF1A69F4A315C38357BDC4 /* field_behavior.pb.cc */; }; + A53C9BA3D0E366DCCDD640BF /* canonify_eq_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 51004EAF5EE01ADCE8FE3788 /* canonify_eq_test.cc */; }; A55266E6C986251D283CE948 /* FIRCursorTests.mm in Sources */ = {isa = PBXBuildFile; fileRef = 5492E070202154D600B64F25 /* FIRCursorTests.mm */; }; A5583822218F9D5B1E86FCAC /* overlay_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = E1459FA70B8FC18DE4B80D0D /* overlay_test.cc */; }; A57EC303CD2D6AA4F4745551 /* FIRFieldValueTests.mm in Sources */ = {isa = PBXBuildFile; fileRef = 5492E04A202154AA00B64F25 /* FIRFieldValueTests.mm */; }; @@ -1586,6 +1592,7 @@ DAFF0D0121E64AC40062958F /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = DAFF0D0021E64AC40062958F /* main.m */; }; DAFF0D0921E653A00062958F /* GoogleService-Info.plist in Resources */ = {isa = PBXBuildFile; fileRef = 54D400D32148BACE001D2BCC /* GoogleService-Info.plist */; }; DB3ADDA51FB93E84142EA90D /* FIRBundlesTests.mm in Sources */ = {isa = PBXBuildFile; fileRef = 776530F066E788C355B78457 /* FIRBundlesTests.mm */; }; + DB4EBD8AA4FC9AB004BA5DB4 /* canonify_eq_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 51004EAF5EE01ADCE8FE3788 /* canonify_eq_test.cc */; }; DB7E9C5A59CCCDDB7F0C238A /* path_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 403DBF6EFB541DFD01582AA3 /* path_test.cc */; }; DBDC8E997E909804F1B43E92 /* log_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 54C2294E1FECABAE007D065B /* log_test.cc */; }; DBF2E95F2EA837033E4A0528 /* array_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 0458BABD8F8738AD16F4A2FE /* array_test.cc */; }; @@ -1974,6 +1981,7 @@ 4D65F6E69993611D47DC8E7C /* SnapshotListenerSourceTests.swift */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.swift; path = SnapshotListenerSourceTests.swift; sourceTree = ""; }; 4D9E51DA7A275D8B1CAEAEB2 /* listen_source_spec_test.json */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.json; path = listen_source_spec_test.json; sourceTree = ""; }; 4F5B96F3ABCD2CA901DB1CD4 /* bundle_builder.cc */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.cpp.cpp; path = bundle_builder.cc; sourceTree = ""; }; + 51004EAF5EE01ADCE8FE3788 /* canonify_eq_test.cc */ = {isa = PBXFileReference; includeInIndex = 1; name = canonify_eq_test.cc; path = pipeline/canonify_eq_test.cc; sourceTree = ""; }; 526D755F65AC676234F57125 /* target_test.cc */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.cpp.cpp; path = target_test.cc; sourceTree = ""; }; 52756B7624904C36FBB56000 /* fake_target_metadata_provider.h */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.c.h; path = fake_target_metadata_provider.h; sourceTree = ""; }; 5342CDDB137B4E93E2E85CCA /* byte_string_test.cc */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = sourcecode.cpp.cpp; name = byte_string_test.cc; path = nanopb/byte_string_test.cc; sourceTree = ""; }; @@ -3109,6 +3117,7 @@ 994A757C4E80A7423BCA69E5 /* pipeline */ = { isa = PBXGroup; children = ( + 51004EAF5EE01ADCE8FE3788 /* canonify_eq_test.cc */, 3081975D68903993303FA256 /* collection_group_test.cc */, 4B0A3187AAD8B02135E80C2E /* collection_test.cc */, B32C2DDDEC16F6465317B8AE /* complex_test.cc */, @@ -4489,6 +4498,7 @@ AB6D588EB21A2C8D40CEB408 /* byte_stream_cpp_test.cc in Sources */, AA13B6E1EF0AD9E9857AAE1C /* byte_stream_test.cc in Sources */, EBE4A7B6A57BCE02B389E8A6 /* byte_string_test.cc in Sources */, + A53C9BA3D0E366DCCDD640BF /* canonify_eq_test.cc in Sources */, 9AC604BF7A76CABDF26F8C8E /* cc_compilation_test.cc in Sources */, F5231A9CB6877EB3A269AFF0 /* collection_group_test.cc in Sources */, 1B730A4E8C4BD7B5B0FF9C7F /* collection_test.cc in Sources */, @@ -4653,8 +4663,8 @@ 482D503CC826265FCEAB53DE /* thread_safe_memoizer_testing.cc in Sources */, 451EFFB413364E5A420F8B2D /* thread_safe_memoizer_testing_test.cc in Sources */, 5497CB78229DECDE000FB92F /* time_testing.cc in Sources */, - ACC9369843F5ED3BD2284078 /* timestamp_test.cc in Sources */, B7EFE1206B6A5A1712BD6745 /* timestamp_test.cc in Sources */, + ACC9369843F5ED3BD2284078 /* timestamp_test.cc in Sources */, 2AAEABFD550255271E3BAC91 /* to_string_apple_test.mm in Sources */, 1E2AE064CF32A604DC7BFD4D /* to_string_test.cc in Sources */, AAFA9D7A0A067F2D3D8D5487 /* token_test.cc in Sources */, @@ -4740,6 +4750,7 @@ A3262936317851958C8EABAF /* byte_stream_cpp_test.cc in Sources */, 44C4244E42FFFB6E9D7F28BA /* byte_stream_test.cc in Sources */, E1264B172412967A09993EC6 /* byte_string_test.cc in Sources */, + 87EC2B2C93CBF76A94BA2C31 /* canonify_eq_test.cc in Sources */, 079E63E270F3EFCA175D2705 /* cc_compilation_test.cc in Sources */, FCE5A2058DCFA6999FBF826F /* collection_group_test.cc in Sources */, 0480559E91BB66732ABE45C8 /* collection_test.cc in Sources */, @@ -4904,8 +4915,8 @@ 3D6AC48D6197E6539BBBD28F /* thread_safe_memoizer_testing.cc in Sources */, 7801E06BFFB08FCE7AB54AD6 /* thread_safe_memoizer_testing_test.cc in Sources */, 5497CB79229DECDE000FB92F /* time_testing.cc in Sources */, - 26CB3D7C871BC56456C6021E /* timestamp_test.cc in Sources */, 02E1EA3818F4BEEA9CE40DAE /* timestamp_test.cc in Sources */, + 26CB3D7C871BC56456C6021E /* timestamp_test.cc in Sources */, 5BE49546D57C43DDFCDB6FBD /* to_string_apple_test.mm in Sources */, E500AB82DF2E7F3AFDB1AB3F /* to_string_test.cc in Sources */, 5C9B5696644675636A052018 /* token_test.cc in Sources */, @@ -5017,12 +5028,13 @@ 583DF65751B7BBD0A222CAB4 /* byte_stream_cpp_test.cc in Sources */, 915A9B8DB280DB4787D83FFE /* byte_stream_test.cc in Sources */, D658E6DA5A218E08810E1688 /* byte_string_test.cc in Sources */, + DB4EBD8AA4FC9AB004BA5DB4 /* canonify_eq_test.cc in Sources */, 0A52B47C43B7602EE64F53A7 /* cc_compilation_test.cc in Sources */, E3E6B368A755D892F937DBF7 /* collection_group_test.cc in Sources */, 064689971747DA312770AB7A /* collection_test.cc in Sources */, 1DB3013C5FC736B519CD65A3 /* common.pb.cc in Sources */, - 99F97B28DA546D42AB14214B /* comparison_test.cc in Sources */, 555161D6DB2DDC8B57F72A70 /* comparison_test.cc in Sources */, + 99F97B28DA546D42AB14214B /* comparison_test.cc in Sources */, BB5F19878EA5A8D9C7276D40 /* complex_test.cc in Sources */, 7394B5C29C6E524C2AF964E6 /* counting_query_engine.cc in Sources */, C02A969BF4BB63ABCB531B4B /* create_noop_connectivity_monitor.cc in Sources */, @@ -5181,8 +5193,8 @@ 25D74F38A5EE96CC653ABB49 /* thread_safe_memoizer_testing.cc in Sources */, 688AC36AA9D0677E910D5A37 /* thread_safe_memoizer_testing_test.cc in Sources */, 6300709ECDE8E0B5A8645F8D /* time_testing.cc in Sources */, - A405A976DB6444D3ED3FCAB2 /* timestamp_test.cc in Sources */, 0CEE93636BA4852D3C5EC428 /* timestamp_test.cc in Sources */, + A405A976DB6444D3ED3FCAB2 /* timestamp_test.cc in Sources */, 95DCD082374F871A86EF905F /* to_string_apple_test.mm in Sources */, 9E656F4FE92E8BFB7F625283 /* to_string_test.cc in Sources */, 96D95E144C383459D4E26E47 /* token_test.cc in Sources */, @@ -5294,12 +5306,13 @@ 2F3740131CC8F8230351B91D /* byte_stream_cpp_test.cc in Sources */, 62EC5F7FB416BA124A2B4604 /* byte_stream_test.cc in Sources */, 297DC2B3C1EB136D58F4BA9C /* byte_string_test.cc in Sources */, + 377EDDC526AD5BB77E0CEC5D /* canonify_eq_test.cc in Sources */, 1E8A00ABF414AC6C6591D9AC /* cc_compilation_test.cc in Sources */, 1CDA0E10BC669276E0EAA1E8 /* collection_group_test.cc in Sources */, C87DF880BADEA1CBF8365700 /* collection_test.cc in Sources */, 1D71CA6BBA1E3433F243188E /* common.pb.cc in Sources */, - 476AE05E0878007DE1BF5460 /* comparison_test.cc in Sources */, 9C86EEDEA131BFD50255EEF1 /* comparison_test.cc in Sources */, + 476AE05E0878007DE1BF5460 /* comparison_test.cc in Sources */, C5434EF8A0C8B79A71F0784C /* complex_test.cc in Sources */, DCD83C545D764FB15FD88B02 /* counting_query_engine.cc in Sources */, ECC433628575AE994C621C54 /* create_noop_connectivity_monitor.cc in Sources */, @@ -5458,8 +5471,8 @@ CF18D52A88F4F6F62C5495EF /* thread_safe_memoizer_testing.cc in Sources */, A7669E72BCED7FBADA4B1314 /* thread_safe_memoizer_testing_test.cc in Sources */, A25FF76DEF542E01A2DF3B0E /* time_testing.cc in Sources */, - BDDAB87A7D76562BCB5D0BF8 /* timestamp_test.cc in Sources */, 1E42CD0F60EB22A5D0C86D1F /* timestamp_test.cc in Sources */, + BDDAB87A7D76562BCB5D0BF8 /* timestamp_test.cc in Sources */, F9705E595FC3818F13F6375A /* to_string_apple_test.mm in Sources */, 3BAFCABA851AE1865D904323 /* to_string_test.cc in Sources */, 1B9E54F4C4280A713B825981 /* token_test.cc in Sources */, @@ -5555,6 +5568,7 @@ 0B55CD5CB8DFEBF2D22A2332 /* byte_stream_cpp_test.cc in Sources */, 44A8B51C05538A8DACB85578 /* byte_stream_test.cc in Sources */, 7B86B1B21FD0EF2A67547F66 /* byte_string_test.cc in Sources */, + 0845C33F3018D8ABCD1C7B47 /* canonify_eq_test.cc in Sources */, 08A9C531265B5E4C5367346E /* cc_compilation_test.cc in Sources */, BD333303B7E2C052F54F9F83 /* collection_group_test.cc in Sources */, C551536B0BAE9EB452DD6758 /* collection_test.cc in Sources */, @@ -5719,8 +5733,8 @@ 8D67BAAD6D2F1913BACA6AC1 /* thread_safe_memoizer_testing.cc in Sources */, BD0882A40BD8AE042629C179 /* thread_safe_memoizer_testing_test.cc in Sources */, 5497CB77229DECDE000FB92F /* time_testing.cc in Sources */, - ABF6506C201131F8005F2C74 /* timestamp_test.cc in Sources */, 3D1365A99984C2F86C2B8A82 /* timestamp_test.cc in Sources */, + ABF6506C201131F8005F2C74 /* timestamp_test.cc in Sources */, B68B1E012213A765008977EF /* to_string_apple_test.mm in Sources */, B696858E2214B53900271095 /* to_string_test.cc in Sources */, D50232D696F19C2881AC01CE /* token_test.cc in Sources */, @@ -5851,12 +5865,13 @@ A4757C171D2407F61332EA38 /* byte_stream_cpp_test.cc in Sources */, 35503DAC4FD0D765A2DE82A8 /* byte_stream_test.cc in Sources */, 52967C3DD7896BFA48840488 /* byte_string_test.cc in Sources */, + 8ED98C1CF17399FC0990DD4B /* canonify_eq_test.cc in Sources */, 338DFD5BCD142DF6C82A0D56 /* cc_compilation_test.cc in Sources */, 4A6B1E0B678E31367A55DC17 /* collection_group_test.cc in Sources */, BACA9CDF0F2E926926B5F36F /* collection_test.cc in Sources */, 4C66806697D7BCA730FA3697 /* common.pb.cc in Sources */, - C885C84B7549C860784E4E3C /* comparison_test.cc in Sources */, EC7A44792A5513FBB6F501EE /* comparison_test.cc in Sources */, + C885C84B7549C860784E4E3C /* comparison_test.cc in Sources */, 62C86789E72E624A27BF6AE5 /* complex_test.cc in Sources */, BDF3A6C121F2773BB3A347A7 /* counting_query_engine.cc in Sources */, 1F4930A8366F74288121F627 /* create_noop_connectivity_monitor.cc in Sources */, @@ -6015,8 +6030,8 @@ D928302820891CCCAD0437DD /* thread_safe_memoizer_testing.cc in Sources */, C099AEC05D44976755BA32A2 /* thread_safe_memoizer_testing_test.cc in Sources */, 2D220B9ABFA36CD7AC43D0A7 /* time_testing.cc in Sources */, - 06B8A653BC26CB2C96024993 /* timestamp_test.cc in Sources */, D91D86B29B86A60C05879A48 /* timestamp_test.cc in Sources */, + 06B8A653BC26CB2C96024993 /* timestamp_test.cc in Sources */, 60260A06871DCB1A5F3448D3 /* to_string_apple_test.mm in Sources */, ECED3B60C5718B085AAB14FB /* to_string_test.cc in Sources */, F0EA84FB66813F2BC164EF7C /* token_test.cc in Sources */, diff --git a/Firestore/Example/Tests/SpecTests/FSTMockDatastore.mm b/Firestore/Example/Tests/SpecTests/FSTMockDatastore.mm index 27ad5e9c7c9..d75302e77d9 100644 --- a/Firestore/Example/Tests/SpecTests/FSTMockDatastore.mm +++ b/Firestore/Example/Tests/SpecTests/FSTMockDatastore.mm @@ -109,7 +109,7 @@ bool IsOpen() const override { } void WatchQuery(const TargetData& query) override { - LOG_DEBUG("WatchQuery: %s: %s, %s", query.target_id(), query.target().ToString(), + LOG_DEBUG("WatchQuery: %s: %s, %s", query.target_id(), query.target_or_pipeline().ToString(), query.resume_token().ToString()); // Snapshot version is ignored on the wire diff --git a/Firestore/Example/Tests/SpecTests/FSTSpecTests.mm b/Firestore/Example/Tests/SpecTests/FSTSpecTests.mm index f2b8ca2e4be..40404d9242a 100644 --- a/Firestore/Example/Tests/SpecTests/FSTSpecTests.mm +++ b/Firestore/Example/Tests/SpecTests/FSTSpecTests.mm @@ -982,7 +982,7 @@ - (void)validateActiveTargets { const TargetData &actual = found->second; XCTAssertEqual(actual.purpose(), targetData.purpose()); - XCTAssertEqual(actual.target(), targetData.target()); + XCTAssertEqual(actual.target_or_pipeline(), targetData.target_or_pipeline()); XCTAssertEqual(actual.target_id(), targetData.target_id()); XCTAssertEqual(actual.snapshot_version(), targetData.snapshot_version()); XCTAssertEqual(actual.resume_token(), targetData.resume_token()); diff --git a/Firestore/core/src/api/api_fwd.h b/Firestore/core/src/api/api_fwd.h index ded3bfb76af..38f521a1948 100644 --- a/Firestore/core/src/api/api_fwd.h +++ b/Firestore/core/src/api/api_fwd.h @@ -46,6 +46,7 @@ class Firestore; class ListenerRegistration; class Pipeline; class PipelineSnapshot; +class RealtimePipeline; class Query; class QuerySnapshot; class Settings; diff --git a/Firestore/core/src/api/expressions.cc b/Firestore/core/src/api/expressions.cc index 5c76d880eda..495314624a5 100644 --- a/Firestore/core/src/api/expressions.cc +++ b/Firestore/core/src/api/expressions.cc @@ -50,6 +50,10 @@ google_firestore_v1_Value Constant::to_proto() const { return *model::DeepClone(*value_).release(); } +const google_firestore_v1_Value& Constant::value() const { + return *value_; +} + std::unique_ptr Constant::ToEvaluable() const { return std::make_unique( std::make_unique(*this)); diff --git a/Firestore/core/src/api/expressions.h b/Firestore/core/src/api/expressions.h index ec412f71fd9..c90dcce2eb7 100644 --- a/Firestore/core/src/api/expressions.h +++ b/Firestore/core/src/api/expressions.h @@ -80,6 +80,8 @@ class Constant : public Expr { } google_firestore_v1_Value to_proto() const override; + const google_firestore_v1_Value& value() const; + std::unique_ptr ToEvaluable() const override; private: diff --git a/Firestore/core/src/api/ordering.h b/Firestore/core/src/api/ordering.h index 000c15a8204..a512c8585d5 100644 --- a/Firestore/core/src/api/ordering.h +++ b/Firestore/core/src/api/ordering.h @@ -49,10 +49,18 @@ class Ordering { return expr_.get(); } + const std::shared_ptr expr_shared() const { + return expr_; + } + Direction direction() const { return direction_; } + Ordering WithReversedDirection() const { + return Ordering(expr_, direction_ == ASCENDING ? DESCENDING : ASCENDING); + } + google_firestore_v1_Value to_proto() const; private: diff --git a/Firestore/core/src/api/query_snapshot.cc b/Firestore/core/src/api/query_snapshot.cc index e24d0fc4b1b..cc310161dbf 100644 --- a/Firestore/core/src/api/query_snapshot.cc +++ b/Firestore/core/src/api/query_snapshot.cc @@ -110,7 +110,8 @@ void QuerySnapshot::ForEachChange( // Special case the first snapshot because index calculation is easy and // fast. Also all changes on the first snapshot are adds so there are also // no metadata-only changes to filter out. - DocumentComparator doc_comparator = snapshot_.query().Comparator(); + DocumentComparator doc_comparator = + snapshot_.query_or_pipeline().Comparator(); absl::optional last_document; size_t index = 0; for (const DocumentViewChange& change : snapshot_.document_changes()) { diff --git a/Firestore/core/src/api/realtime_pipeline.cc b/Firestore/core/src/api/realtime_pipeline.cc index 62b3e71a3e5..9a944d4575c 100644 --- a/Firestore/core/src/api/realtime_pipeline.cc +++ b/Firestore/core/src/api/realtime_pipeline.cc @@ -19,6 +19,7 @@ #include #include +#include "Firestore/core/src/core/pipeline_util.h" #include "Firestore/core/src/remote/serializer.h" namespace firebase { @@ -27,8 +28,26 @@ namespace api { RealtimePipeline::RealtimePipeline( std::vector> stages, - remote::Serializer serializer) - : stages_(std::move(stages)), serializer_(serializer) { + std::unique_ptr serializer) + : stages_(std::move(stages)), serializer_(std::move(serializer)) { + this->rewritten_stages_ = core::RewriteStages(this->stages()); +} + +RealtimePipeline::RealtimePipeline(const RealtimePipeline& other) + : stages_(other.stages_), + rewritten_stages_(other.rewritten_stages_), + serializer_(std::make_unique( + other.serializer_->database_id())) { +} + +RealtimePipeline& RealtimePipeline::operator=(const RealtimePipeline& other) { + if (this != &other) { + stages_ = other.stages_; + rewritten_stages_ = other.rewritten_stages_; + serializer_ = + std::make_unique(other.serializer_->database_id()); + } + return *this; } RealtimePipeline RealtimePipeline::AddingStage( @@ -36,7 +55,8 @@ RealtimePipeline RealtimePipeline::AddingStage( auto copy = std::vector>(this->stages_); copy.push_back(stage); - return {copy, serializer_}; + return {copy, + std::make_unique(serializer_->database_id())}; } const std::vector>& RealtimePipeline::stages() @@ -49,13 +69,8 @@ RealtimePipeline::rewritten_stages() const { return this->rewritten_stages_; } -void RealtimePipeline::SetRewrittentStages( - std::vector> stages) { - this->rewritten_stages_ = std::move(stages); -} - -EvaluateContext RealtimePipeline::evaluate_context() { - return EvaluateContext(&serializer_); +EvaluateContext RealtimePipeline::evaluate_context() const { + return EvaluateContext(serializer_.get()); } } // namespace api diff --git a/Firestore/core/src/api/realtime_pipeline.h b/Firestore/core/src/api/realtime_pipeline.h index 225cbcd9f3b..ab81d264a1c 100644 --- a/Firestore/core/src/api/realtime_pipeline.h +++ b/Firestore/core/src/api/realtime_pipeline.h @@ -18,36 +18,37 @@ #define FIRESTORE_CORE_SRC_API_REALTIME_PIPELINE_H_ #include -#include #include -#include "Firestore/core/src/api/firestore.h" -#include "Firestore/core/src/api/pipeline_snapshot.h" #include "Firestore/core/src/api/stages.h" -#include "Firestore/core/src/remote/serializer.h" namespace firebase { namespace firestore { +namespace remote { +class Serializer; +} // namespace remote + namespace api { class RealtimePipeline { public: RealtimePipeline(std::vector> stages, - remote::Serializer serializer); + std::unique_ptr serializer); + + RealtimePipeline(const RealtimePipeline& other); + RealtimePipeline& operator=(const RealtimePipeline& other); RealtimePipeline AddingStage(std::shared_ptr stage); const std::vector>& stages() const; const std::vector>& rewritten_stages() const; - void SetRewrittentStages(std::vector>); - - EvaluateContext evaluate_context(); + EvaluateContext evaluate_context() const; private: std::vector> stages_; std::vector> rewritten_stages_; - remote::Serializer serializer_; + std::unique_ptr serializer_; }; } // namespace api diff --git a/Firestore/core/src/api/stages.cc b/Firestore/core/src/api/stages.cc index 37fadf386eb..7d6c7e861e8 100644 --- a/Firestore/core/src/api/stages.cc +++ b/Firestore/core/src/api/stages.cc @@ -54,7 +54,6 @@ google_firestore_v1_Pipeline_Stage CollectionSource::to_proto() const { result.args = nanopb::MakeArray(1); result.args[0].which_value_type = google_firestore_v1_Value_reference_value_tag; - // TODO: use EncodeResourceName instead result.args[0].reference_value = nanopb::MakeBytesArray(this->path_.CanonicalString()); @@ -106,10 +105,12 @@ google_firestore_v1_Pipeline_Stage DocumentsSource::to_proto() const { result.args_count = documents_.size(); result.args = nanopb::MakeArray(result.args_count); - for (size_t i = 0; i < documents_.size(); ++i) { + size_t i = 0; + for (const auto& document : documents_) { result.args[i].which_value_type = google_firestore_v1_Value_reference_value_tag; - result.args[i].reference_value = nanopb::MakeBytesArray(documents_[i]); + result.args[i].reference_value = nanopb::MakeBytesArray(document); + i++; } result.options_count = 0; @@ -478,6 +479,19 @@ model::PipelineInputOutputVector DatabaseSource::Evaluate( return results; } +model::PipelineInputOutputVector DocumentsSource::Evaluate( + const EvaluateContext& /*context*/, + const model::PipelineInputOutputVector& inputs) const { + model::PipelineInputOutputVector results; + for (const model::PipelineInputOutput& input : inputs) { + if (input.is_found_document() && + documents_.count(input.key().path().CanonicalString()) > 0) { + results.push_back(input); + } + } + return results; +} + model::PipelineInputOutputVector Where::Evaluate( const EvaluateContext& context, const model::PipelineInputOutputVector& inputs) const { @@ -499,16 +513,29 @@ model::PipelineInputOutputVector Where::Evaluate( model::PipelineInputOutputVector LimitStage::Evaluate( const EvaluateContext& /*context*/, const model::PipelineInputOutputVector& inputs) const { + model::PipelineInputOutputVector::const_iterator begin; + model::PipelineInputOutputVector::const_iterator end; + size_t count; + if (limit_ < 0) { - // Or handle as error? Assuming non-negative limit. - return {}; - } - size_t count = static_cast(limit_); - if (count > inputs.size()) { - count = inputs.size(); + // if limit_ is negative, we treat it as limit to last, returns the last + // limit_ documents. + count = static_cast(-limit_); + if (count > inputs.size()) { + count = inputs.size(); + } + begin = inputs.end() - count; + end = inputs.end(); + } else { + count = static_cast(limit_); + if (count > inputs.size()) { + count = inputs.size(); + } + begin = inputs.begin(); + end = inputs.begin() + count; } - return model::PipelineInputOutputVector(inputs.begin(), - inputs.begin() + count); + + return model::PipelineInputOutputVector(begin, end); } model::PipelineInputOutputVector SortStage::Evaluate( diff --git a/Firestore/core/src/api/stages.h b/Firestore/core/src/api/stages.h index 27eb7ca9936..2bc53b01281 100644 --- a/Firestore/core/src/api/stages.h +++ b/Firestore/core/src/api/stages.h @@ -18,9 +18,9 @@ #define FIRESTORE_CORE_SRC_API_STAGES_H_ #include +#include #include #include -#include #include #include "Firestore/Protos/nanopb/google/firestore/v1/document.nanopb.h" @@ -85,6 +85,10 @@ class CollectionSource : public EvaluableStage { return "collection"; } + std::string path() const { + return path_.CanonicalString(); + } + model::PipelineInputOutputVector Evaluate( const EvaluateContext& context, const model::PipelineInputOutputVector& inputs) const override; @@ -122,6 +126,10 @@ class CollectionGroupSource : public EvaluableStage { return "collection_group"; } + absl::string_view collection_id() const { + return collection_id_; + } + model::PipelineInputOutputVector Evaluate( const EvaluateContext& context, const model::PipelineInputOutputVector& inputs) const override; @@ -130,21 +138,29 @@ class CollectionGroupSource : public EvaluableStage { std::string collection_id_; }; -class DocumentsSource : public Stage { +class DocumentsSource : public EvaluableStage { public: - explicit DocumentsSource(std::vector documents) - : documents_(std::move(documents)) { + explicit DocumentsSource(const std::vector& documents) + : documents_(documents.cbegin(), documents.cend()) { } ~DocumentsSource() override = default; google_firestore_v1_Pipeline_Stage to_proto() const override; - absl::string_view name() const { + model::PipelineInputOutputVector Evaluate( + const EvaluateContext& context, + const model::PipelineInputOutputVector& inputs) const override; + + absl::string_view name() const override { return "documents"; } + std::vector documents() const { + return std::vector(documents_.cbegin(), documents_.cend()); + } + private: - std::vector documents_; + std::set documents_; }; class AddFields : public Stage { @@ -190,6 +206,10 @@ class Where : public EvaluableStage { return "where"; } + const Expr* expr() const { + return expr_.get(); + } + model::PipelineInputOutputVector Evaluate( const EvaluateContext& context, const model::PipelineInputOutputVector& inputs) const override; @@ -250,6 +270,10 @@ class LimitStage : public EvaluableStage { return "limit"; } + int64_t limit() const { + return limit_; + } + model::PipelineInputOutputVector Evaluate( const EvaluateContext& context, const model::PipelineInputOutputVector& inputs) const override; diff --git a/Firestore/core/src/core/core_fwd.h b/Firestore/core/src/core/core_fwd.h index 52f37bfbce6..dd294a8114c 100644 --- a/Firestore/core/src/core/core_fwd.h +++ b/Firestore/core/src/core/core_fwd.h @@ -55,6 +55,7 @@ class QueryListener; class SyncEngine; class SyncEngineCallback; class Target; +class TargetOrPipeline; class TargetIdGenerator; class Transaction; class ViewDocumentChanges; diff --git a/Firestore/core/src/core/event_manager.cc b/Firestore/core/src/core/event_manager.cc index d5c3f3542b9..f366c0879a3 100644 --- a/Firestore/core/src/core/event_manager.cc +++ b/Firestore/core/src/core/event_manager.cc @@ -36,7 +36,12 @@ EventManager::EventManager(QueryEventSource* query_event_source) model::TargetId EventManager::AddQueryListener( std::shared_ptr listener) { - const Query& query = listener->query(); + const QueryOrPipeline& query_or_pipeline = listener->query(); + if (query_or_pipeline.IsPipeline()) { + HARD_FAIL("Unimplemented"); + } + + const auto& query = query_or_pipeline.query(); ListenerSetupAction listener_action = ListenerSetupAction::NoSetupActionRequired; @@ -92,7 +97,12 @@ model::TargetId EventManager::AddQueryListener( void EventManager::RemoveQueryListener( std::shared_ptr listener) { - const Query& query = listener->query(); + const auto& query_or_pipeline = listener->query(); + if (query_or_pipeline.IsPipeline()) { + HARD_FAIL("Unimplemented"); + } + + const auto& query = query_or_pipeline.query(); ListenerRemovalAction listener_action = ListenerRemovalAction::NoRemovalActionRequired; @@ -170,7 +180,7 @@ void EventManager::OnViewSnapshots( std::vector&& snapshots) { bool raised_event = false; for (ViewSnapshot& snapshot : snapshots) { - const Query& query = snapshot.query(); + const QueryOrPipeline& query = snapshot.query_or_pipeline(); auto found_iter = queries_.find(query); if (found_iter != queries_.end()) { QueryListenersInfo& query_info = found_iter->second; @@ -187,7 +197,7 @@ void EventManager::OnViewSnapshots( } } -void EventManager::OnError(const core::Query& query, +void EventManager::OnError(const core::QueryOrPipeline& query, const util::Status& error) { auto found_iter = queries_.find(query); if (found_iter == queries_.end()) { diff --git a/Firestore/core/src/core/event_manager.h b/Firestore/core/src/core/event_manager.h index 9ee783a85bd..b1ba6217e0f 100644 --- a/Firestore/core/src/core/event_manager.h +++ b/Firestore/core/src/core/event_manager.h @@ -71,7 +71,8 @@ class EventManager : public SyncEngineCallback { // Implements `QueryEventCallback`. void HandleOnlineStateChange(model::OnlineState online_state) override; void OnViewSnapshots(std::vector&& snapshots) override; - void OnError(const core::Query& query, const util::Status& error) override; + void OnError(const core::QueryOrPipeline& query, + const util::Status& error) override; private: /** @@ -128,7 +129,7 @@ class EventManager : public SyncEngineCallback { QueryEventSource* query_event_source_ = nullptr; model::OnlineState online_state_ = model::OnlineState::Unknown; - std::unordered_map queries_; + std::unordered_map queries_; std::unordered_set>> snapshots_in_sync_listeners_; }; diff --git a/Firestore/core/src/core/firestore_client.cc b/Firestore/core/src/core/firestore_client.cc index 0c6ac315dea..314c8f207e7 100644 --- a/Firestore/core/src/core/firestore_client.cc +++ b/Firestore/core/src/core/firestore_client.cc @@ -421,7 +421,9 @@ bool FirestoreClient::is_terminated() const { } std::shared_ptr FirestoreClient::ListenToQuery( - Query query, ListenOptions options, ViewSnapshotSharedListener&& listener) { + QueryOrPipeline query, + ListenOptions options, + ViewSnapshotSharedListener&& listener) { VerifyNotTerminated(); auto query_listener = QueryListener::Create( diff --git a/Firestore/core/src/core/firestore_client.h b/Firestore/core/src/core/firestore_client.h index 24c0e8c396a..1d54e38ecb7 100644 --- a/Firestore/core/src/core/firestore_client.h +++ b/Firestore/core/src/core/firestore_client.h @@ -36,6 +36,7 @@ #include "Firestore/core/src/util/executor.h" #include "Firestore/core/src/util/nullability.h" #include "Firestore/core/src/util/status_fwd.h" +#include "pipeline_util.h" namespace firebase { namespace firestore { @@ -117,7 +118,7 @@ class FirestoreClient : public std::enable_shared_from_this { /** Starts listening to a query. */ std::shared_ptr ListenToQuery( - Query query, + QueryOrPipeline query, ListenOptions options, ViewSnapshotSharedListener&& listener); diff --git a/Firestore/core/src/core/pipeline_run.cc b/Firestore/core/src/core/pipeline_run.cc index f27a15abdad..5cecde2365d 100644 --- a/Firestore/core/src/core/pipeline_run.cc +++ b/Firestore/core/src/core/pipeline_run.cc @@ -30,10 +30,6 @@ namespace core { std::vector RunPipeline( api::RealtimePipeline& pipeline, const std::vector& inputs) { - if (pipeline.rewritten_stages().empty()) { - pipeline.SetRewrittentStages(RewriteStages(pipeline.stages())); - } - auto current = std::vector(inputs); for (const auto& stage : pipeline.rewritten_stages()) { current = stage->Evaluate(pipeline.evaluate_context(), current); diff --git a/Firestore/core/src/core/pipeline_util.cc b/Firestore/core/src/core/pipeline_util.cc index c2381c768d1..e1fe9b7ea97 100644 --- a/Firestore/core/src/core/pipeline_util.cc +++ b/Firestore/core/src/core/pipeline_util.cc @@ -16,11 +16,38 @@ #include "Firestore/core/src/core/pipeline_util.h" +#include + +#include +#include +#include +#include + #include "Firestore/core/src/api/expressions.h" +#include "Firestore/core/src/api/ordering.h" #include "Firestore/core/src/api/realtime_pipeline.h" #include "Firestore/core/src/api/stages.h" +#include "Firestore/core/src/core/bound.h" +#include "Firestore/core/src/core/filter.h" +#include "Firestore/core/src/core/order_by.h" +#include "Firestore/core/src/core/query.h" +#include "Firestore/core/src/model/document.h" +#include "Firestore/core/src/model/document_key.h" +#include "Firestore/core/src/model/document_set.h" +#include "Firestore/core/src/model/field_path.h" #include "Firestore/core/src/model/mutable_document.h" +#include "Firestore/core/src/model/value_util.h" #include "Firestore/core/src/remote/serializer.h" +#include "Firestore/core/src/util/comparison.h" +#include "Firestore/core/src/util/hard_assert.h" +#include "Firestore/core/src/util/log.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_format.h" +#include "absl/strings/str_join.h" +#include "absl/types/optional.h" +#include "absl/types/variant.h" +#include "expressions_eval.h" +#include "pipeline_run.h" // Contains RunPipeline, EvaluateContext namespace firebase { namespace firestore { @@ -34,6 +61,26 @@ auto NewKeyOrdering() { api::Ordering::Direction::ASCENDING); } +// Helper to get orderings from the last effective SortStage +const std::vector& GetLastEffectiveSortOrderings( + const api::RealtimePipeline& pipeline) { + const auto& stages = pipeline.rewritten_stages(); + for (auto it = stages.rbegin(); it != stages.rend(); ++it) { + if (auto sort_stage = std::dynamic_pointer_cast(*it)) { + return sort_stage->orders(); + } + // TODO(pipeline): Consider stages that might invalidate ordering later, + // like fineNearest + } + HARD_FAIL( + "RealtimePipeline must contain at least one Sort stage " + "(ensured by RewriteStages)."); + // Return a reference to avoid copying, but satisfy compiler in HARD_FAIL + // case. This line should be unreachable. + static const std::vector empty_orderings; + return empty_orderings; +} + } // namespace std::vector> RewriteStages( @@ -87,6 +134,689 @@ std::vector> RewriteStages( return new_stages; } +// Anonymous namespace for canonicalization helpers +namespace { + +std::string CanonifyConstant(const api::Constant* constant) { + return model::CanonicalId(constant->value()); +} + +// Accepts raw pointer because that's what api::Ordering::expr() returns +std::string CanonifyExpr(const api::Expr* expr) { + HARD_ASSERT(expr != nullptr, "Canonify a null expr"); + + if (auto field_ref = dynamic_cast(expr)) { + return absl::StrFormat("fld(%s)", + field_ref->field_path().CanonicalString()); + } else if (auto constant = dynamic_cast(expr)) { + return absl::StrFormat("cst(%s)", CanonifyConstant(constant)); + } else if (auto func = dynamic_cast(expr)) { + std::vector param_strings; + for (const auto& param_ptr : func->params()) { + param_strings.push_back( + CanonifyExpr(param_ptr.get())); // Pass raw pointer from shared_ptr + } + return absl::StrFormat("fn(%s[%s])", func->name(), + absl::StrJoin(param_strings, ",")); + } + + HARD_FAIL("Canonify a unrecognized expr"); +} + +std::string CanonifySortOrderings(const std::vector& orders) { + std::vector entries; + for (const auto& order : orders) { + // Use api::Ordering::Direction::ASCENDING + entries.push_back(absl::StrCat( + CanonifyExpr(order.expr()), // order.expr() returns const api::Expr* + order.direction() == api::Ordering::Direction::ASCENDING ? "asc" + : "desc")); + } + return absl::StrJoin(entries, ","); +} + +std::string CanonifyStage(const std::shared_ptr& stage) { + HARD_ASSERT(stage != nullptr, "Canonify a null stage"); + + // Placeholder implementation - needs details for each stage type + // (CollectionSource, Where, Sort, Limit, Select, AddFields, Aggregate, etc.) + // Use dynamic_pointer_cast to check types. + if (auto collection_source = + std::dynamic_pointer_cast(stage)) { + return absl::StrFormat("%s(%s)", collection_source->name(), + collection_source->path()); + } else if (auto collection_group = + std::dynamic_pointer_cast(stage)) { + return absl::StrFormat("%s(%s)", collection_group->name(), + collection_group->collection_id()); + } else if (auto documents_source = + std::dynamic_pointer_cast(stage)) { + std::vector sorted_documents = documents_source->documents(); + return absl::StrFormat("%s(%s)", documents_source->name(), + absl::StrJoin(sorted_documents, ",")); + } else if (auto where_stage = std::dynamic_pointer_cast(stage)) { + return absl::StrFormat("%s(%s)", where_stage->name(), + CanonifyExpr(where_stage->expr())); + } else if (auto sort_stage = + std::dynamic_pointer_cast(stage)) { + return absl::StrFormat( + "%s(%s)", sort_stage->name(), + CanonifySortOrderings(sort_stage->orders())); // Use orders() getter + } else if (auto limit_stage = + std::dynamic_pointer_cast(stage)) { + return absl::StrFormat("%s(%d)", limit_stage->name(), limit_stage->limit()); + } + + HARD_FAIL(absl::StrFormat("Trying to canonify an unrecognized stage type %s", + stage->name()) + .c_str()); +} + +// Canonicalizes a RealtimePipeline by canonicalizing its stages. +std::string CanonifyPipeline(const api::RealtimePipeline& pipeline) { + std::vector stage_strings; + for (const auto& stage : pipeline.rewritten_stages()) { + stage_strings.push_back(CanonifyStage(stage)); + } + return absl::StrJoin(stage_strings, "|"); +} + +} // namespace + +// QueryOrPipeline member function implementations + +bool QueryOrPipeline::operator==(const QueryOrPipeline& other) const { + if (data_.index() != other.data_.index()) { + return false; // Different types stored + } + + if (IsPipeline()) { + // Compare pipelines by their canonical representation + return CanonifyPipeline(pipeline()) == CanonifyPipeline(other.pipeline()); + } else { + // Compare queries using Query::operator== + return query() == other.query(); + } +} + +size_t QueryOrPipeline::Hash() const { + if (IsPipeline()) { + // Compare pipelines by their canonical representation + return util::Hash(CanonifyPipeline(pipeline())); + } else { + return util::Hash(query()); + } +} + +std::string QueryOrPipeline::CanonicalId() const { + if (IsPipeline()) { + return CanonifyPipeline(pipeline()); + } else { + return query().CanonicalId(); + } +} + +std::string QueryOrPipeline::ToString() const { + if (IsPipeline()) { + // Use the canonical representation as the string representation for + // pipelines + return CanonicalId(); + } else { + return query().ToString(); + } +} + +TargetOrPipeline QueryOrPipeline::ToTargetOrPipeline() const { + if (IsPipeline()) { + return TargetOrPipeline(pipeline()); + } + + return TargetOrPipeline(query().ToTarget()); +} + +bool QueryOrPipeline::MatchesAllDocuments() const { + if (IsPipeline()) { + for (const auto& stage : pipeline().rewritten_stages()) { + // Check for LimitStage + if (stage->name() == "limit") { + return false; + } + + // Check for Where stage + if (auto where_stage = std::dynamic_pointer_cast(stage)) { + // Check if it's the special 'exists(__name__)' case + if (auto func_expr = + dynamic_cast(where_stage->expr())) { + if (func_expr->name() == "exists" && + func_expr->params().size() == 1) { + if (auto field_expr = dynamic_cast( + func_expr->params()[0].get())) { + if (field_expr->field_path().IsKeyFieldPath()) { + continue; // This specific 'exists(__name__)' filter doesn't + // count + } + } + } + } + return false; // Any other Where stage means it filters documents + } + // TODO: Add checks for other filtering stages like Aggregate, + // Distinct, FindNearest once they are implemented in C++. + } + return true; // No filtering stages found (besides allowed ones) + } + + return query().MatchesAllDocuments(); +} + +bool QueryOrPipeline::has_limit() const { + if (this->IsPipeline()) { + for (const auto& stage : this->pipeline().rewritten_stages()) { + // Check for LimitStage + if (stage->name() == "limit") { + return true; + } + // TODO: need to check for other stages that could have a limit, like + // findNearest + } + + return false; + } + + return query().has_limit(); +} + +bool QueryOrPipeline::Matches(const model::Document& doc) const { + if (IsPipeline()) { + const auto result = RunPipeline( + const_cast(this->pipeline()), {doc.get()}); + return result.size() > 0; + } + + return query().Matches(doc); +} + +model::DocumentComparator QueryOrPipeline::Comparator() const { + if (IsPipeline()) { + // Capture pipeline by reference. Orderings captured by value inside lambda. + const auto& p = pipeline(); + const auto& orderings = GetLastEffectiveSortOrderings(p); + return model::DocumentComparator( + [&p, &orderings](const model::Document& d1, + const model::Document& d2) -> util::ComparisonResult { + auto context = + const_cast(p).evaluate_context(); + + for (const auto& ordering : orderings) { + const api::Expr* expr = ordering.expr(); + HARD_ASSERT(expr != nullptr, "Ordering expression cannot be null"); + + // Evaluate expression for both documents using expr->Evaluate + // (assuming this method exists) Pass const references to documents. + EvaluateResult left_value = + expr->ToEvaluable()->Evaluate(context, d1.get()); + EvaluateResult right_value = + expr->ToEvaluable()->Evaluate(context, d2.get()); + + // Compare results, using MinValue for error + util::ComparisonResult comparison = model::Compare( + left_value.IsErrorOrUnset() ? model::MinValue() + : *left_value.value(), + right_value.IsErrorOrUnset() ? model::MinValue() + : *right_value.value()); + + if (comparison != util::ComparisonResult::Same) { + return ordering.direction() == api::Ordering::Direction::ASCENDING + ? comparison + // reverse comparison + : comparison == util::ComparisonResult::Ascending + ? util::ComparisonResult::Descending + : util::ComparisonResult::Ascending; + } + } + return util::ComparisonResult::Same; + }); + } + + return query().Comparator(); +} + +// TargetOrPipeline member function implementations + +bool TargetOrPipeline::operator==(const TargetOrPipeline& other) const { + if (data_.index() != other.data_.index()) { + return false; // Different types stored + } + + if (IsPipeline()) { + // Compare pipelines by their canonical representation + return CanonifyPipeline(pipeline()) == CanonifyPipeline(other.pipeline()); + } else { + // Compare targets using Target::operator== + return target() == other.target(); + } +} + +size_t TargetOrPipeline::Hash() const { + if (IsPipeline()) { + // Compare pipelines by their canonical representation + return util::Hash(CanonifyPipeline(pipeline())); + } else { + return util::Hash(target()); + } +} + +std::string TargetOrPipeline::CanonicalId() const { + if (IsPipeline()) { + return CanonifyPipeline(pipeline()); + } else { + return target().CanonicalId(); + } +} + +std::string TargetOrPipeline::ToString() const { + if (IsPipeline()) { + // Use the canonical representation as the string representation for + // pipelines + return CanonicalId(); + } else { + // Assuming Target has a ToString() method + return target().ToString(); + } +} + +PipelineFlavor GetPipelineFlavor(const api::RealtimePipeline&) { + // For now, it is only possible to construct RealtimePipeline that is kExact. + // PORTING NOTE: the typescript implementation support other flavors already, + // despite not being used. We can port that later. + return PipelineFlavor::kExact; +} + +PipelineSourceType GetPipelineSourceType( + const api::RealtimePipeline& pipeline) { + HARD_ASSERT(!pipeline.stages().empty(), + "Pipeline must have at least one stage to determine its source."); + const auto& first_stage = pipeline.stages().front(); + + if (std::dynamic_pointer_cast(first_stage)) { + return PipelineSourceType::kCollection; + } else if (std::dynamic_pointer_cast( + first_stage)) { + return PipelineSourceType::kCollectionGroup; + } else if (std::dynamic_pointer_cast( + first_stage)) { + return PipelineSourceType::kDatabase; + } else if (std::dynamic_pointer_cast( + first_stage)) { + return PipelineSourceType::kDocuments; + } + + return PipelineSourceType::kUnknown; +} + +absl::optional GetPipelineCollectionGroup( + const api::RealtimePipeline& pipeline) { + if (GetPipelineSourceType(pipeline) == PipelineSourceType::kCollectionGroup) { + HARD_ASSERT(!pipeline.stages().empty(), + "Pipeline source is CollectionGroup but stages are empty."); + const auto& first_stage = pipeline.stages().front(); + if (auto collection_group_source = + std::dynamic_pointer_cast( + first_stage)) { + return std::string{collection_group_source->collection_id()}; + } + } + return absl::nullopt; +} + +absl::optional GetPipelineCollection( + const api::RealtimePipeline& pipeline) { + if (GetPipelineSourceType(pipeline) == PipelineSourceType::kCollection) { + HARD_ASSERT(!pipeline.stages().empty(), + "Pipeline source is Collection but stages are empty."); + const auto& first_stage = pipeline.stages().front(); + if (auto collection_source = + std::dynamic_pointer_cast( + first_stage)) { + return {collection_source->path()}; + } + } + return absl::nullopt; +} + +absl::optional> GetPipelineDocuments( + const api::RealtimePipeline& pipeline) { + if (GetPipelineSourceType(pipeline) == PipelineSourceType::kDocuments) { + HARD_ASSERT(!pipeline.stages().empty(), + "Pipeline source is Documents but stages are empty."); + const auto& first_stage = pipeline.stages().front(); + if (auto documents_stage = + std::dynamic_pointer_cast( + first_stage)) { + return documents_stage->documents(); + } + } + return absl::nullopt; +} + +api::RealtimePipeline AsCollectionPipelineAtPath( + const api::RealtimePipeline& pipeline, const model::ResourcePath& path) { + std::vector> new_stages; + new_stages.reserve(pipeline.stages().size()); + + for (const auto& stage_ptr : pipeline.stages()) { + // Attempt to cast to CollectionGroupSource. + // We use dynamic_pointer_cast because stage_ptr is a shared_ptr. + if (auto collection_group_source = + std::dynamic_pointer_cast( + stage_ptr)) { + // If it's a CollectionGroupSource, replace it with a CollectionSource + // using the provided path. + new_stages.push_back( + std::make_shared(path.CanonicalString())); + } else { + // Otherwise, keep the original stage. + new_stages.push_back(stage_ptr); + } + } + + // Construct a new RealtimePipeline with the (potentially) modified stages + // and the original user_data_reader. + return api::RealtimePipeline(std::move(new_stages), + std::make_unique( + pipeline.evaluate_context().serializer())); +} + +absl::optional GetLastEffectiveLimit( + const api::RealtimePipeline& pipeline) { + const auto& stages = pipeline.rewritten_stages(); + for (auto it = stages.rbegin(); it != stages.rend(); ++it) { + const auto& stage_ptr = *it; + // Check if the stage is a LimitStage + if (auto limit_stage = + std::dynamic_pointer_cast(stage_ptr)) { + return limit_stage->limit(); + } + // TODO(pipeline): Consider other stages that might imply a limit, + // e.g., FindNearestStage, once they are implemented. + } + return absl::nullopt; +} + +// --- ToPipelineStages and helpers --- + +namespace { // Anonymous namespace for ToPipelineStages helpers + +std::shared_ptr ToPipelineBooleanExpr(const Filter& filter) { + if (filter.type() != FieldFilter::Type::kCompositeFilter) { + const auto& field_filter = static_cast(filter); + auto api_field = std::make_shared(field_filter.field()); + auto exists_expr = std::make_shared( + "exists", std::vector>{api_field}); + + const google_firestore_v1_Value& value = field_filter.value(); + FieldFilter::Operator op = field_filter.op(); + + if (model::IsNaNValue(value)) { + auto is_nan_expr = std::make_shared( + "is_nan", std::vector>{api_field}); + if (op == FieldFilter::Operator::Equal) { + return std::make_shared( + "and", + std::vector>{exists_expr, is_nan_expr}); + } else { // Assuming NotEqual for IsNotNan + auto is_not_nan_expr = std::make_shared( + "not", std::vector>{is_nan_expr}); + return std::make_shared( + "and", std::vector>{exists_expr, + is_not_nan_expr}); + } + } else if (model::IsNullValue(value)) { + auto is_null_expr = std::make_shared( + "is_null", std::vector>{api_field}); + if (op == FieldFilter::Operator::Equal) { + return std::make_shared( + "and", + std::vector>{exists_expr, is_null_expr}); + } else { // Assuming NotEqual for IsNotNull + auto is_not_null_expr = std::make_shared( + "not", std::vector>{is_null_expr}); + return std::make_shared( + "and", std::vector>{exists_expr, + is_not_null_expr}); + } + } else { + auto api_constant = + std::make_shared(model::DeepClone(value)); + std::shared_ptr comparison_expr; + std::string func_name; + + switch (op) { + case FieldFilter::Operator::LessThan: + func_name = "lt"; + break; + case FieldFilter::Operator::LessThanOrEqual: + func_name = "lte"; + break; + case FieldFilter::Operator::GreaterThan: + func_name = "gt"; + break; + case FieldFilter::Operator::GreaterThanOrEqual: + func_name = "gte"; + break; + case FieldFilter::Operator::Equal: + func_name = "eq"; + break; + case FieldFilter::Operator::NotEqual: + func_name = "neq"; + break; + case FieldFilter::Operator::ArrayContains: + func_name = "array_contains"; + break; + case FieldFilter::Operator::In: + case FieldFilter::Operator::NotIn: + case FieldFilter::Operator::ArrayContainsAny: { + HARD_ASSERT( + model::IsArray(value), + "Value for IN, NOT_IN, ARRAY_CONTAINS_ANY must be an array."); + + if (op == FieldFilter::Operator::In) + func_name = "eq_any"; + else if (op == FieldFilter::Operator::NotIn) + func_name = "not_eq_any"; + else if (op == FieldFilter::Operator::ArrayContainsAny) + func_name = "array_contains_any"; + break; + } + default: + HARD_FAIL("Unexpected FieldFilter operator."); + } + comparison_expr = std::make_shared( + func_name, + std::vector>{api_field, api_constant}); + return std::make_shared( + "and", std::vector>{exists_expr, + comparison_expr}); + } + } else if (filter.type() == FieldFilter::Type::kCompositeFilter) { + const auto& composite_filter = static_cast(filter); + std::vector> sub_exprs; + for (const auto& sub_filter : composite_filter.filters()) { + sub_exprs.push_back(ToPipelineBooleanExpr(sub_filter)); + } + HARD_ASSERT(!sub_exprs.empty(), "Composite filter must have sub-filters."); + if (sub_exprs.size() == 1) return sub_exprs[0]; + + std::string func_name = + (composite_filter.op() == CompositeFilter::Operator::And) ? "and" + : "or"; + return std::make_shared(func_name, sub_exprs); + } + HARD_FAIL("Unknown filter type."); + return nullptr; +} + +std::vector ReverseOrderings( + const std::vector& orderings) { + std::vector reversed; + reversed.reserve(orderings.size()); + for (const auto& o : orderings) { + const api::Ordering new_order(o); + reversed.push_back(new_order.WithReversedDirection()); + } + return reversed; +} + +std::shared_ptr WhereConditionsFromCursor( + const Bound& bound, + const std::vector& orderings, + bool is_before) { + std::vector> cursors; + const auto& pos = bound.position(); + for (size_t i = 0; i < pos->values_count; ++i) { + cursors.push_back( + std::make_shared(model::DeepClone(pos->values[i]))); + } + + std::string func_name = is_before ? "lt" : "gt"; + std::string func_inclusive_name = is_before ? "lte" : "gte"; + ; + + std::vector> or_conditions; + for (size_t sub_end = 1; sub_end <= orderings.size(); ++sub_end) { + std::vector> conditions; + for (size_t index = 0; index < sub_end; ++index) { + if (index < sub_end - 1) { + conditions.push_back(std::make_shared( + "eq", std::vector>{ + orderings[index].expr_shared(), cursors[index]})); + } else if (bound.inclusive() && sub_end == orderings.size() - 1) { + conditions.push_back(std::make_shared( + func_inclusive_name, + std::vector>{ + orderings[index].expr_shared(), cursors[index]})); + } else { + conditions.push_back(std::make_shared( + func_name, std::vector>{ + orderings[index].expr_shared(), cursors[index]})); + } + } + + if (conditions.size() == 1) { + or_conditions.push_back(conditions[0]); + } else { + or_conditions.push_back( + std::make_shared("and", std::move(conditions))); + } + } + + if (or_conditions.empty()) return nullptr; + if (or_conditions.size() == 1) return or_conditions[0]; + return std::make_shared("or", or_conditions); +} + +} // anonymous namespace + +std::vector> ToPipelineStages( + const Query& query) { + std::vector> stages; + + // 1. Source Stage + if (query.IsCollectionGroupQuery()) { + stages.push_back(std::make_shared( + std::string(*query.collection_group()))); + } else if (query.IsDocumentQuery()) { + std::vector doc_paths; + doc_paths.push_back(query.path().CanonicalString()); + stages.push_back( + std::make_shared(std::move(doc_paths))); + } else { + stages.push_back(std::make_shared( + query.path().CanonicalString())); + } + + // 2. Filter Stages + for (const auto& filter : query.filters()) { + stages.push_back( + std::make_shared(ToPipelineBooleanExpr(filter))); + } + + // 3. OrderBy Existence Checks + const auto& query_order_bys = query.normalized_order_bys(); + if (!query_order_bys.empty()) { + std::vector> exists_exprs; + exists_exprs.reserve(query_order_bys.size()); + for (const auto& core_order_by : query_order_bys) { + exists_exprs.push_back(std::make_shared( + "exists", std::vector>{ + std::make_shared(core_order_by.field())})); + } + if (exists_exprs.size() == 1) { + stages.push_back(std::make_shared(exists_exprs[0])); + } else { + stages.push_back(std::make_shared( + std::make_shared("and", exists_exprs))); + } + } + + // 4. Orderings, Cursors, Limit + std::vector api_orderings; + api_orderings.reserve(query_order_bys.size()); + for (const auto& core_order_by : query_order_bys) { + api_orderings.emplace_back( + std::make_shared(core_order_by.field()), + core_order_by.direction() == Direction::Ascending + ? api::Ordering::Direction::ASCENDING + : api::Ordering::Direction::DESCENDING); + } + + if (!api_orderings.empty()) { + if (query.limit_type() == LimitType::Last) { + auto reversed_api_orderings = ReverseOrderings(api_orderings); + stages.push_back( + std::make_shared(reversed_api_orderings)); + + if (query.start_at()) { + // For limitToLast, start_at defines what to exclude from the *end* of + // the un-reversed result set. With reversed sort, this becomes a + // 'before' cursor. + stages.push_back(std::make_shared(WhereConditionsFromCursor( + *query.start_at(), api_orderings, /*is_before=*/false))); + } + if (query.end_at()) { + // For limitToLast, end_at defines what to exclude from the *start* of + // the un-reversed result set. With reversed sort, this becomes an + // 'after' cursor. + stages.push_back(std::make_shared(WhereConditionsFromCursor( + *query.end_at(), api_orderings, /*is_before=*/true))); + } + stages.push_back(std::make_shared(query.limit())); + stages.push_back( + std::make_shared(api_orderings)); // Sort back + } else { + stages.push_back(std::make_shared(api_orderings)); + if (query.start_at()) { + stages.push_back(std::make_shared(WhereConditionsFromCursor( + *query.start_at(), api_orderings, /*is_before=*/true))); + } + if (query.end_at()) { + stages.push_back(std::make_shared(WhereConditionsFromCursor( + *query.end_at(), api_orderings, /*is_before=*/false))); + } + if (query.limit_type() == LimitType::First && query.limit()) { + stages.push_back(std::make_shared(query.limit())); + } + } + } else if (query.limit_type() == LimitType::First && query.limit()) { + // Limit without order by requires a default sort by __name__ + stages.push_back(std::make_shared( + std::vector{NewKeyOrdering()})); + stages.push_back(std::make_shared(query.limit())); + } + + return stages; +} + } // namespace core } // namespace firestore } // namespace firebase diff --git a/Firestore/core/src/core/pipeline_util.h b/Firestore/core/src/core/pipeline_util.h index 063d1414489..050994bce40 100644 --- a/Firestore/core/src/core/pipeline_util.h +++ b/Firestore/core/src/core/pipeline_util.h @@ -18,12 +18,16 @@ #define FIRESTORE_CORE_SRC_CORE_PIPELINE_UTIL_H_ #include +#include +#include +#include "absl/types/optional.h" +#include "absl/types/variant.h" -#include "Firestore/Protos/nanopb/google/firestore/v1/document.nanopb.h" #include "Firestore/core/src/api/expressions.h" #include "Firestore/core/src/api/realtime_pipeline.h" #include "Firestore/core/src/api/stages.h" -#include "Firestore/core/src/model/mutable_document.h" +#include "Firestore/core/src/core/query.h" +#include "Firestore/core/src/core/target.h" #include "Firestore/core/src/nanopb/message.h" namespace firebase { @@ -33,8 +37,184 @@ namespace core { std::vector> RewriteStages( const std::vector>&); +// A class that wraps a variant holding either a Target or a RealtimePipeline. +class TargetOrPipeline { + public: + // Default constructor (likely results in holding a default Target). + TargetOrPipeline() = default; + + // Constructors from Target and RealtimePipeline. + TargetOrPipeline(const Target& target) : data_(target) { + } // NOLINT + TargetOrPipeline(Target&& target) : data_(std::move(target)) { + } // NOLINT + TargetOrPipeline(const api::RealtimePipeline& pipeline) // NOLINT + : data_(pipeline) { + } + TargetOrPipeline(api::RealtimePipeline&& pipeline) // NOLINT + : data_(std::move(pipeline)) { + } + + // Copy and move constructors/assignment operators are implicitly generated. + + // Accessors + bool IsPipeline() const { + return absl::holds_alternative(data_); + } + const Target& target() const { + return absl::get(data_); + } + const api::RealtimePipeline& pipeline() const { + return absl::get(data_); + } + + // Member functions + bool operator==(const TargetOrPipeline& other) const; + size_t Hash() const; + std::string CanonicalId() const; + std::string ToString() const; // Added for consistency + + private: + absl::variant data_; +}; + +// != operator for TargetOrPipeline +inline bool operator!=(const TargetOrPipeline& lhs, + const TargetOrPipeline& rhs) { + return !(lhs == rhs); +} + +// A class that wraps a variant holding either a Query or a RealtimePipeline. +// This allows defining member functions like operator== and Hash. +class QueryOrPipeline { + public: + // Default constructor (likely results in holding a default Query). + QueryOrPipeline() = default; + + // Constructors from Query and RealtimePipeline. + QueryOrPipeline(const Query& query) : data_(query) { + } // NOLINT + QueryOrPipeline(Query&& query) : data_(std::move(query)) { + } // NOLINT + QueryOrPipeline(const api::RealtimePipeline& pipeline) // NOLINT + : data_(pipeline) { + } + QueryOrPipeline(api::RealtimePipeline&& pipeline) // NOLINT + : data_(std::move(pipeline)) { + } + + // Copy and move constructors/assignment operators are implicitly generated. + + // Accessors + bool IsPipeline() const { + return absl::holds_alternative(data_); + } + const Query& query() const { + return absl::get(data_); + } + const api::RealtimePipeline& pipeline() const { + return absl::get(data_); + } + TargetOrPipeline ToTargetOrPipeline() const; + + bool MatchesAllDocuments() const; + bool has_limit() const; + bool Matches(const model::Document& doc) const; + model::DocumentComparator Comparator() const; + + // Member functions + bool operator==(const QueryOrPipeline& other) const; + size_t Hash() const; + std::string CanonicalId() const; + std::string ToString() const; + + private: + absl::variant data_; +}; + +// != operator for QueryOrPipeline +inline bool operator!=(const QueryOrPipeline& lhs, const QueryOrPipeline& rhs) { + return !(lhs == rhs); +} + +enum class PipelineFlavor { + // The pipeline exactly represents the query. + kExact, + // The pipeline has additional fields projected (e.g., __key__, + // __create_time__). + kAugmented, + // The pipeline has stages that remove document keys (e.g., aggregate, + // distinct). + kKeyless, +}; + +// Describes the source of a pipeline. +enum class PipelineSourceType { + kCollection, + kCollectionGroup, + kDatabase, + kDocuments, + kUnknown, +}; + +// Determines the flavor of the given pipeline based on its stages. +PipelineFlavor GetPipelineFlavor(const api::RealtimePipeline& pipeline); + +// Determines the source type of the given pipeline based on its first stage. +PipelineSourceType GetPipelineSourceType(const api::RealtimePipeline& pipeline); + +// Retrieves the collection group ID if the pipeline's source is a collection +// group. +absl::optional GetPipelineCollectionGroup( + const api::RealtimePipeline& pipeline); + +// Retrieves the collection path if the pipeline's source is a collection. +absl::optional GetPipelineCollection( + const api::RealtimePipeline& pipeline); + +// Retrieves the document pathes if the pipeline's source is a document source. +absl::optional> GetPipelineDocuments( + const api::RealtimePipeline& pipeline); + +// Creates a new pipeline by replacing CollectionGroupSource stages with +// CollectionSource stages using the provided path. +api::RealtimePipeline AsCollectionPipelineAtPath( + const api::RealtimePipeline& pipeline, const model::ResourcePath& path); + +absl::optional GetLastEffectiveLimit( + const api::RealtimePipeline& pipeline); + +/** + * Converts a core::Query into a sequence of pipeline stages. + * + * @param query The query to convert. + * @return A vector of stages representing the query logic. + */ +std::vector> ToPipelineStages( + const Query& query); + } // namespace core } // namespace firestore } // namespace firebase +namespace std { + +template <> +struct hash { + size_t operator()( + const firebase::firestore::core::QueryOrPipeline& query) const { + return query.Hash(); + } +}; + +template <> +struct hash { + size_t operator()( + const firebase::firestore::core::TargetOrPipeline& target) const { + return target.Hash(); + } +}; + +} // namespace std + #endif // FIRESTORE_CORE_SRC_CORE_PIPELINE_UTIL_H_ diff --git a/Firestore/core/src/core/query_listener.cc b/Firestore/core/src/core/query_listener.cc index 579f35ab39c..2bedfc3fdd2 100644 --- a/Firestore/core/src/core/query_listener.cc +++ b/Firestore/core/src/core/query_listener.cc @@ -33,19 +33,21 @@ using model::TargetId; using util::Status; std::shared_ptr QueryListener::Create( - Query query, ListenOptions options, ViewSnapshotSharedListener&& listener) { + QueryOrPipeline query, + ListenOptions options, + ViewSnapshotSharedListener&& listener) { return std::make_shared(std::move(query), std::move(options), std::move(listener)); } std::shared_ptr QueryListener::Create( - Query query, ViewSnapshotSharedListener&& listener) { + QueryOrPipeline query, ViewSnapshotSharedListener&& listener) { return Create(std::move(query), ListenOptions::DefaultOptions(), std::move(listener)); } std::shared_ptr QueryListener::Create( - Query query, + QueryOrPipeline query, ListenOptions options, util::StatusOrCallback&& listener) { auto event_listener = @@ -55,12 +57,12 @@ std::shared_ptr QueryListener::Create( } std::shared_ptr QueryListener::Create( - Query query, util::StatusOrCallback&& listener) { + QueryOrPipeline query, util::StatusOrCallback&& listener) { return Create(std::move(query), ListenOptions::DefaultOptions(), std::move(listener)); } -QueryListener::QueryListener(Query query, +QueryListener::QueryListener(QueryOrPipeline query, ListenOptions options, ViewSnapshotSharedListener&& listener) : query_(std::move(query)), @@ -82,7 +84,7 @@ bool QueryListener::OnViewSnapshot(ViewSnapshot snapshot) { } } - snapshot = ViewSnapshot{snapshot.query(), + snapshot = ViewSnapshot{snapshot.query_or_pipeline(), snapshot.documents(), snapshot.old_documents(), std::move(changes), @@ -185,9 +187,9 @@ void QueryListener::RaiseInitialEvent(const ViewSnapshot& snapshot) { "Trying to raise initial events for second time"); ViewSnapshot modified_snapshot = ViewSnapshot::FromInitialDocuments( - snapshot.query(), snapshot.documents(), snapshot.mutated_keys(), - snapshot.from_cache(), snapshot.excludes_metadata_changes(), - snapshot.has_cached_results()); + snapshot.query_or_pipeline(), snapshot.documents(), + snapshot.mutated_keys(), snapshot.from_cache(), + snapshot.excludes_metadata_changes(), snapshot.has_cached_results()); raised_initial_event_ = true; listener_->OnEvent(std::move(modified_snapshot)); } diff --git a/Firestore/core/src/core/query_listener.h b/Firestore/core/src/core/query_listener.h index 6b934a0de59..dd386e8334c 100644 --- a/Firestore/core/src/core/query_listener.h +++ b/Firestore/core/src/core/query_listener.h @@ -26,6 +26,7 @@ #include "Firestore/core/src/model/types.h" #include "Firestore/core/src/util/status_fwd.h" #include "absl/types/optional.h" +#include "pipeline_util.h" namespace firebase { namespace firestore { @@ -38,28 +39,28 @@ namespace core { class QueryListener { public: static std::shared_ptr Create( - Query query, + QueryOrPipeline query, ListenOptions options, ViewSnapshotSharedListener&& listener); static std::shared_ptr Create( - Query query, ViewSnapshotSharedListener&& listener); + QueryOrPipeline query, ViewSnapshotSharedListener&& listener); static std::shared_ptr Create( - Query query, + QueryOrPipeline query, ListenOptions options, util::StatusOrCallback&& listener); static std::shared_ptr Create( - Query query, util::StatusOrCallback&& listener); + QueryOrPipeline query, util::StatusOrCallback&& listener); - QueryListener(Query query, + QueryListener(QueryOrPipeline query, ListenOptions options, ViewSnapshotSharedListener&& listener); virtual ~QueryListener() = default; - const Query& query() const { + const QueryOrPipeline& query() const { return query_; } @@ -91,7 +92,7 @@ class QueryListener { bool ShouldRaiseEvent(const ViewSnapshot& snapshot) const; void RaiseInitialEvent(const ViewSnapshot& snapshot); - Query query_; + QueryOrPipeline query_; ListenOptions options_; /** diff --git a/Firestore/core/src/core/sync_engine.cc b/Firestore/core/src/core/sync_engine.cc index 77223cb1fed..87be4cbfcd4 100644 --- a/Firestore/core/src/core/sync_engine.cc +++ b/Firestore/core/src/core/sync_engine.cc @@ -19,6 +19,7 @@ #include "Firestore/core/include/firebase/firestore/firestore_errors.h" #include "Firestore/core/src/bundle/bundle_element.h" #include "Firestore/core/src/bundle/bundle_loader.h" +#include "Firestore/core/src/core/pipeline_util.h" #include "Firestore/core/src/core/sync_engine_callback.h" #include "Firestore/core/src/core/transaction.h" #include "Firestore/core/src/core/transaction_runner.h" @@ -104,13 +105,15 @@ void SyncEngine::AssertCallbackExists(absl::string_view source) { "Tried to call '%s' before callback was registered.", source); } -TargetId SyncEngine::Listen(Query query, bool should_listen_to_remote) { +TargetId SyncEngine::Listen(QueryOrPipeline query, + bool should_listen_to_remote) { AssertCallbackExists("Listen"); HARD_ASSERT(query_views_by_query_.find(query) == query_views_by_query_.end(), "We already listen to query: %s", query.ToString()); - TargetData target_data = local_store_->AllocateTarget(query.ToTarget()); + TargetData target_data = + local_store_->AllocateTarget(query.ToTargetOrPipeline()); TargetId target_id = target_data.target_id(); nanopb::ByteString resume_token = target_data.resume_token(); @@ -128,7 +131,9 @@ TargetId SyncEngine::Listen(Query query, bool should_listen_to_remote) { } ViewSnapshot SyncEngine::InitializeViewAndComputeSnapshot( - const Query& query, TargetId target_id, nanopb::ByteString resume_token) { + const QueryOrPipeline& query, + TargetId target_id, + nanopb::ByteString resume_token) { QueryResult query_result = local_store_->ExecuteQuery(query, /* use_previous_results= */ true); @@ -137,7 +142,7 @@ ViewSnapshot SyncEngine::InitializeViewAndComputeSnapshot( auto current_sync_state = SyncState::None; absl::optional synthesized_current_change; if (queries_by_target_.find(target_id) != queries_by_target_.end()) { - const Query& mirror_query = queries_by_target_[target_id][0]; + const QueryOrPipeline& mirror_query = queries_by_target_[target_id][0]; current_sync_state = query_views_by_query_[mirror_query]->view().sync_state(); } @@ -163,27 +168,30 @@ ViewSnapshot SyncEngine::InitializeViewAndComputeSnapshot( return view_change.snapshot().value(); } -void SyncEngine::ListenToRemoteStore(Query query) { +void SyncEngine::ListenToRemoteStore(QueryOrPipeline query) { AssertCallbackExists("ListenToRemoteStore"); - TargetData target_data = local_store_->AllocateTarget(query.ToTarget()); + TargetData target_data = + local_store_->AllocateTarget(query.ToTargetOrPipeline()); remote_store_->Listen(std::move(target_data)); } -void SyncEngine::StopListening(const Query& query, +void SyncEngine::StopListening(const QueryOrPipeline& query, bool should_stop_remote_listening) { AssertCallbackExists("StopListening"); StopListeningAndReleaseTarget(query, /** last_listen= */ true, should_stop_remote_listening); } -void SyncEngine::StopListeningToRemoteStoreOnly(const Query& query) { +void SyncEngine::StopListeningToRemoteStoreOnly(const QueryOrPipeline& query) { AssertCallbackExists("StopListeningToRemoteStoreOnly"); StopListeningAndReleaseTarget(query, /** last_listen= */ false, /** should_stop_remote_listening= */ true); } void SyncEngine::StopListeningAndReleaseTarget( - const Query& query, bool last_listen, bool should_stop_remote_listening) { + const QueryOrPipeline& query, + bool last_listen, + bool should_stop_remote_listening) { auto query_view = query_views_by_query_[query]; HARD_ASSERT(query_view, "Trying to stop listening to a query not found"); @@ -210,13 +218,13 @@ void SyncEngine::StopListeningAndReleaseTarget( } void SyncEngine::RemoveAndCleanupTarget(TargetId target_id, Status status) { - for (const Query& query : queries_by_target_.at(target_id)) { + for (const QueryOrPipeline& query : queries_by_target_.at(target_id)) { query_views_by_query_.erase(query); if (!status.ok()) { sync_engine_callback_->OnError(query, status); if (ErrorIsInteresting(status)) { - LOG_WARN("Listen for query at %s failed: %s", - query.path().CanonicalString(), status.error_message()); + LOG_WARN("Listen for query at %s failed: %s", query.CanonicalId(), + status.error_message()); } } } diff --git a/Firestore/core/src/core/sync_engine.h b/Firestore/core/src/core/sync_engine.h index bcf930fdd0c..1e250d1ba65 100644 --- a/Firestore/core/src/core/sync_engine.h +++ b/Firestore/core/src/core/sync_engine.h @@ -76,27 +76,28 @@ class QueryEventSource { * * @return the target ID assigned to the query. */ - virtual model::TargetId Listen(Query query, bool should_listen_to_remote) = 0; + virtual model::TargetId Listen(QueryOrPipeline query, + bool should_listen_to_remote) = 0; /** * Sends the listen to the RemoteStore to get remote data. Invoked when a * Query starts listening to the remote store, while already listening to the * cache. */ - virtual void ListenToRemoteStore(Query query) = 0; + virtual void ListenToRemoteStore(QueryOrPipeline query) = 0; /** * Stops listening to a query previously listened to via `Listen`. Un-listen * to remote store if there is a watch connection established and stayed open. */ - virtual void StopListening(const Query& query, + virtual void StopListening(const QueryOrPipeline& query, bool should_stop_remote_listening) = 0; /** * Stops listening to a query from watch. Invoked when a Query stops listening * to the remote store, while still listening to the cache. */ - virtual void StopListeningToRemoteStoreOnly(const Query& query) = 0; + virtual void StopListeningToRemoteStoreOnly(const QueryOrPipeline& query) = 0; }; /** @@ -124,12 +125,12 @@ class SyncEngine : public remote::RemoteStoreCallback, public QueryEventSource { void SetCallback(SyncEngineCallback* callback) override { sync_engine_callback_ = callback; } - model::TargetId Listen(Query query, + model::TargetId Listen(QueryOrPipeline query, bool should_listen_to_remote = true) override; - void ListenToRemoteStore(Query query) override; - void StopListening(const Query& query, + void ListenToRemoteStore(QueryOrPipeline query) override; + void StopListening(const QueryOrPipeline& query, bool should_stop_remote_listening = true) override; - void StopListeningToRemoteStoreOnly(const Query& query) override; + void StopListeningToRemoteStoreOnly(const QueryOrPipeline& query) override; /** * Initiates the write of local mutation batch which involves adding the @@ -204,13 +205,13 @@ class SyncEngine : public remote::RemoteStoreCallback, public QueryEventSource { */ class QueryView { public: - QueryView(Query query, model::TargetId target_id, View view) + QueryView(QueryOrPipeline query, model::TargetId target_id, View view) : query_(std::move(query)), target_id_(target_id), view_(std::move(view)) { } - const Query& query() const { + const QueryOrPipeline& query() const { return query_; } @@ -233,7 +234,7 @@ class SyncEngine : public remote::RemoteStoreCallback, public QueryEventSource { } private: - Query query_; + QueryOrPipeline query_; model::TargetId target_id_; View view_; }; @@ -260,12 +261,12 @@ class SyncEngine : public remote::RemoteStoreCallback, public QueryEventSource { void AssertCallbackExists(absl::string_view source); ViewSnapshot InitializeViewAndComputeSnapshot( - const Query& query, + const QueryOrPipeline& query, model::TargetId target_id, nanopb::ByteString resume_token); void RemoveAndCleanupTarget(model::TargetId target_id, util::Status status); - void StopListeningAndReleaseTarget(const Query& query, + void StopListeningAndReleaseTarget(const QueryOrPipeline& query, bool should_stop_remote_listening, bool last_listen); @@ -337,10 +338,12 @@ class SyncEngine : public remote::RemoteStoreCallback, public QueryEventSource { // Shared pointers are used to avoid creating and storing two copies of the // same `QueryView` and for consistency with other platforms. /** QueryViews for all active queries, indexed by query. */ - std::unordered_map> query_views_by_query_; + std::unordered_map> + query_views_by_query_; /** Queries mapped to Targets, indexed by target ID. */ - std::unordered_map> queries_by_target_; + std::unordered_map> + queries_by_target_; const size_t max_concurrent_limbo_resolutions_; diff --git a/Firestore/core/src/core/sync_engine_callback.h b/Firestore/core/src/core/sync_engine_callback.h index 64b2ba70b68..ad975f9b054 100644 --- a/Firestore/core/src/core/sync_engine_callback.h +++ b/Firestore/core/src/core/sync_engine_callback.h @@ -40,7 +40,8 @@ class SyncEngineCallback { /** Handles new view snapshots. */ virtual void OnViewSnapshots(std::vector&& snapshots) = 0; /** Handles the failure of a query. */ - virtual void OnError(const core::Query& query, const util::Status& error) = 0; + virtual void OnError(const core::QueryOrPipeline& query, + const util::Status& error) = 0; }; } // namespace core diff --git a/Firestore/core/src/core/view.cc b/Firestore/core/src/core/view.cc index c812cb0861e..55fbf84dc33 100644 --- a/Firestore/core/src/core/view.cc +++ b/Firestore/core/src/core/view.cc @@ -16,10 +16,13 @@ #include "Firestore/core/src/core/view.h" +#include // For std::sort #include +#include #include "Firestore/core/src/core/target.h" #include "Firestore/core/src/model/document_set.h" +#include "Firestore/core/src/util/hard_assert.h" // For HARD_ASSERT and HARD_FAIL namespace firebase { namespace firestore { @@ -34,6 +37,67 @@ using model::OnlineState; using remote::TargetChange; using util::ComparisonResult; +// MARK: - Helper Functions for View +absl::optional View::GetLimit(const QueryOrPipeline& query) { + if (query.IsPipeline()) { + absl::optional limit = GetLastEffectiveLimit(query.pipeline()); + if (limit) { + return limit; + } + return absl::nullopt; + } else { + const auto& q = query.query(); + if (q.has_limit_to_first()) { + return q.limit(); + } else if (q.has_limit_to_last()) { + return -q.limit(); // Negative to indicate limitToLast + } + return absl::nullopt; + } +} + +LimitType View::GetLimitType(const QueryOrPipeline& query) { + if (query.IsPipeline()) { + absl::optional limit = GetLastEffectiveLimit(query.pipeline()); + return limit > 0 ? LimitType::First : LimitType::Last; + } else { + return query.query().limit_type(); + } +} + +std::pair, absl::optional> +View::GetLimitEdges(const QueryOrPipeline& query, + const model::DocumentSet& old_document_set) { + absl::optional limit_opt = GetLimit(query); + if (!limit_opt) { + return {absl::nullopt, absl::nullopt}; + } + int32_t limit_val = *limit_opt; + + if (query.IsPipeline()) { + // For pipelines, converted_from_limit_to_last in EffectiveLimitDetails + // tells us if it was originally a limitToLast. + // The GetLimit function already encodes this as a negative number. + if (limit_val > 0 && + old_document_set.size() == static_cast(limit_val)) { + return {old_document_set.GetLastDocument(), absl::nullopt}; + } else if (limit_val < 0 && + old_document_set.size() == static_cast(-limit_val)) { + return {absl::nullopt, old_document_set.GetFirstDocument()}; + } + } else { + const auto& q = query.query(); + if (q.has_limit_to_first() && + old_document_set.size() == static_cast(q.limit())) { + return {old_document_set.GetLastDocument(), absl::nullopt}; + } else if (q.has_limit_to_last() && + old_document_set.size() == static_cast(q.limit())) { + return {absl::nullopt, old_document_set.GetFirstDocument()}; + } + } + return {absl::nullopt, absl::nullopt}; +} + // MARK: - LimboDocumentChange LimboDocumentChange::LimboDocumentChange( @@ -82,9 +146,10 @@ int GetDocumentViewChangeTypePosition(DocumentViewChange::Type change_type) { } // namespace -View::View(Query query, DocumentKeySet remote_documents) +View::View(QueryOrPipeline query, DocumentKeySet remote_documents) : query_(std::move(query)), - document_set_(query_.Comparator()), + document_set_(query_.Comparator()), // QueryOrPipeline must provide a + // valid comparator synced_documents_(std::move(remote_documents)) { } @@ -108,25 +173,9 @@ ViewDocumentChanges View::ComputeDocumentChanges( DocumentSet new_document_set = old_document_set; bool needs_refill = false; - // Track the last doc in a (full) limit. This is necessary, because some - // update (a delete, or an update moving a doc past the old limit) might mean - // there is some other document in the local cache that either should come (1) - // between the old last limit doc and the new last document, in the case of - // updates, or (2) after the new last document, in the case of deletes. So we - // keep this doc at the old limit to compare the updates to. - // - // Note that this should never get used in a refill (when previous_changes is - // set), because there will only be adds -- no deletes or updates. - absl::optional last_doc_in_limit; - if (query_.has_limit_to_first() && - old_document_set.size() == static_cast(query_.limit())) { - last_doc_in_limit = old_document_set.GetLastDocument(); - } - absl::optional first_doc_in_limit; - if (query_.has_limit_to_last() && - old_document_set.size() == static_cast(query_.limit())) { - first_doc_in_limit = old_document_set.GetFirstDocument(); - } + auto limit_edges = GetLimitEdges(query_, old_document_set); + absl::optional last_doc_in_limit = limit_edges.first; + absl::optional first_doc_in_limit = limit_edges.second; for (const auto& kv : doc_changes) { const DocumentKey& key = kv.first; @@ -209,13 +258,16 @@ ViewDocumentChanges View::ComputeDocumentChanges( } // Drop documents out to meet limitToFirst/limitToLast requirement. - if (query_.limit_type() != LimitType::None) { - auto limit = static_cast(query_.limit()); - if (limit < new_document_set.size()) { - for (size_t i = new_document_set.size() - limit; i > 0; --i) { + auto limit = GetLimit(query_); + if (limit.has_value()) { + auto limit_type = GetLimitType(query_); + auto abs_limit = std::abs(limit.value()); + if (abs_limit < static_cast(new_document_set.size())) { + for (size_t i = new_document_set.size() - abs_limit; i > 0; --i) { absl::optional found = - query_.has_limit_to_first() ? new_document_set.GetLastDocument() - : new_document_set.GetFirstDocument(); + limit_type == LimitType::First + ? new_document_set.GetLastDocument() + : new_document_set.GetFirstDocument(); const Document& old_doc = *found; new_document_set = new_document_set.erase(old_doc->key()); new_mutated_keys = new_mutated_keys.erase(old_doc->key()); diff --git a/Firestore/core/src/core/view.h b/Firestore/core/src/core/view.h index c6c41b3c8dc..1ced53ec108 100644 --- a/Firestore/core/src/core/view.h +++ b/Firestore/core/src/core/view.h @@ -20,6 +20,7 @@ #include #include +#include "Firestore/core/src/core/pipeline_util.h" #include "Firestore/core/src/core/view_snapshot.h" #include "Firestore/core/src/model/document_key_set.h" #include "Firestore/core/src/model/document_set.h" @@ -135,7 +136,7 @@ class ViewChange { */ class View { public: - View(Query query, model::DocumentKeySet remote_documents); + View(QueryOrPipeline query, model::DocumentKeySet remote_documents); /** * The set of remote documents that the server has told us belongs to the @@ -189,6 +190,14 @@ class View { } private: + // Helper methods to encapsulate limit logic based on query type + static absl::optional GetLimit(const QueryOrPipeline& query); + static LimitType GetLimitType(const QueryOrPipeline& query); + static std::pair, + absl::optional> + GetLimitEdges(const QueryOrPipeline& query, + const model::DocumentSet& old_document_set); + util::ComparisonResult Compare(const model::Document& lhs, const model::Document& rhs) const; @@ -202,7 +211,7 @@ class View { std::vector UpdateLimboDocuments(); - Query query_; + QueryOrPipeline query_; model::DocumentSet document_set_; diff --git a/Firestore/core/src/core/view_snapshot.cc b/Firestore/core/src/core/view_snapshot.cc index 6daa64d27cb..e208ca95a73 100644 --- a/Firestore/core/src/core/view_snapshot.cc +++ b/Firestore/core/src/core/view_snapshot.cc @@ -136,7 +136,7 @@ std::string DocumentViewChangeSet::ToString() const { // ViewSnapshot -ViewSnapshot::ViewSnapshot(Query query, +ViewSnapshot::ViewSnapshot(QueryOrPipeline query, DocumentSet documents, DocumentSet old_documents, std::vector document_changes, @@ -156,7 +156,7 @@ ViewSnapshot::ViewSnapshot(Query query, has_cached_results_{has_cached_results} { } -ViewSnapshot ViewSnapshot::FromInitialDocuments(Query query, +ViewSnapshot ViewSnapshot::FromInitialDocuments(QueryOrPipeline query, DocumentSet documents, DocumentKeySet mutated_keys, bool from_cache, @@ -179,7 +179,7 @@ ViewSnapshot ViewSnapshot::FromInitialDocuments(Query query, has_cached_results}; } -const Query& ViewSnapshot::query() const { +const QueryOrPipeline& ViewSnapshot::query_or_pipeline() const { return query_; } @@ -202,13 +202,14 @@ size_t ViewSnapshot::Hash() const { // straightforward way to compute its hash value. Since `ViewSnapshot` is // currently not stored in any dictionaries, this has no side effects. - return util::Hash(query(), documents(), old_documents(), document_changes(), - from_cache(), sync_state_changed(), + return util::Hash(query_or_pipeline(), documents(), old_documents(), + document_changes(), from_cache(), sync_state_changed(), excludes_metadata_changes(), has_cached_results()); } bool operator==(const ViewSnapshot& lhs, const ViewSnapshot& rhs) { - return lhs.query() == rhs.query() && lhs.documents() == rhs.documents() && + return lhs.query_or_pipeline() == rhs.query_or_pipeline() && + lhs.documents() == rhs.documents() && lhs.old_documents() == rhs.old_documents() && lhs.document_changes() == rhs.document_changes() && lhs.from_cache() == rhs.from_cache() && diff --git a/Firestore/core/src/core/view_snapshot.h b/Firestore/core/src/core/view_snapshot.h index 9ce1f164f78..93e55be9316 100644 --- a/Firestore/core/src/core/view_snapshot.h +++ b/Firestore/core/src/core/view_snapshot.h @@ -25,6 +25,7 @@ #include #include "Firestore/core/src/core/event_listener.h" +#include "Firestore/core/src/core/pipeline_util.h" #include "Firestore/core/src/core/query.h" #include "Firestore/core/src/immutable/sorted_map.h" #include "Firestore/core/src/model/document.h" @@ -97,7 +98,7 @@ class DocumentViewChangeSet { */ class ViewSnapshot { public: - ViewSnapshot(Query query, + ViewSnapshot(QueryOrPipeline query, model::DocumentSet documents, model::DocumentSet old_documents, std::vector document_changes, @@ -111,7 +112,7 @@ class ViewSnapshot { * Returns a view snapshot as if all documents in the snapshot were * added. */ - static ViewSnapshot FromInitialDocuments(Query query, + static ViewSnapshot FromInitialDocuments(QueryOrPipeline query, model::DocumentSet documents, model::DocumentKeySet mutated_keys, bool from_cache, @@ -119,7 +120,7 @@ class ViewSnapshot { bool has_cached_results); /** The query this view is tracking the results for. */ - const Query& query() const; + const QueryOrPipeline& query_or_pipeline() const; /** The documents currently known to be results of the query. */ const model::DocumentSet& documents() const { @@ -171,7 +172,7 @@ class ViewSnapshot { size_t Hash() const; private: - Query query_; + QueryOrPipeline query_; model::DocumentSet documents_; model::DocumentSet old_documents_; diff --git a/Firestore/core/src/local/leveldb_migrations.cc b/Firestore/core/src/local/leveldb_migrations.cc index 2df16fbb560..ddfe6aae433 100644 --- a/Firestore/core/src/local/leveldb_migrations.cc +++ b/Firestore/core/src/local/leveldb_migrations.cc @@ -343,7 +343,7 @@ void RewriteTargetsCanonicalIds(leveldb::DB* db, } auto new_key = LevelDbQueryTargetKey::Key( - target_data.ValueOrDie().target().CanonicalId(), + target_data.ValueOrDie().target_or_pipeline().CanonicalId(), target_data.ValueOrDie().target_id()); transaction.Delete(it->key()); diff --git a/Firestore/core/src/local/leveldb_remote_document_cache.cc b/Firestore/core/src/local/leveldb_remote_document_cache.cc index 73342b886e9..789dbf12216 100644 --- a/Firestore/core/src/local/leveldb_remote_document_cache.cc +++ b/Firestore/core/src/local/leveldb_remote_document_cache.cc @@ -21,6 +21,7 @@ #include #include "Firestore/Protos/nanopb/firestore/local/maybe_document.nanopb.h" +#include "Firestore/core/src/core/pipeline_util.h" // Added #include "Firestore/core/src/core/query.h" #include "Firestore/core/src/local/leveldb_key.h" #include "Firestore/core/src/local/leveldb_persistence.h" @@ -34,6 +35,7 @@ #include "Firestore/core/src/nanopb/reader.h" #include "Firestore/core/src/util/background_queue.h" #include "Firestore/core/src/util/executor.h" +#include "Firestore/core/src/util/log.h" #include "Firestore/core/src/util/status.h" #include "Firestore/core/src/util/string_util.h" #include "leveldb/db.h" @@ -175,7 +177,7 @@ MutableDocumentMap LevelDbRemoteDocumentCache::GetAll( MutableDocumentMap LevelDbRemoteDocumentCache::GetAllExisting( DocumentVersionMap&& remote_map, - const core::Query& query, + const core::QueryOrPipeline& query, const model::OverlayByDocumentKeyMap& mutated_docs) const { BackgroundQueue tasks(executor_.get()); AsyncResults> results; @@ -224,27 +226,41 @@ MutableDocumentMap LevelDbRemoteDocumentCache::GetAll( } MutableDocumentMap LevelDbRemoteDocumentCache::GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::IndexOffset& offset, absl::optional limit, const model::OverlayByDocumentKeyMap& mutated_docs) const { absl::optional context; - return GetDocumentsMatchingQuery(query, offset, context, limit, mutated_docs); + return GetDocumentsMatchingQuery(query_or_pipeline, offset, context, limit, + mutated_docs); } MutableDocumentMap LevelDbRemoteDocumentCache::GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::IndexOffset& offset, absl::optional& context, absl::optional limit, const model::OverlayByDocumentKeyMap& mutated_docs) const { // Use the query path as a prefix for testing if a document matches the query. + model::ResourcePath path; + if (query_or_pipeline.IsPipeline()) { + const auto& collection = + core::GetPipelineCollection(query_or_pipeline.pipeline()); + if (!collection.has_value()) { + LOG_WARN( + "LevelDbRemoteDocumentCache: No collection found for pipeline %s", + query_or_pipeline.ToString()); + return MutableDocumentMap(); + } + path = model::ResourcePath::FromString(collection.value()); + } else { + path = query_or_pipeline.query().path(); + } // Execute an index-free query and filter by read time. This is safe since // all document changes to queries that have a // last_limbo_free_snapshot_version (`since_read_time`) have a read time // set. - auto path = query.path(); std::string start_key = LevelDbRemoteDocumentReadTimeKey::KeyPrefix(path, offset.read_time()); auto it = db_->current_transaction()->NewIterator(); @@ -279,8 +295,7 @@ MutableDocumentMap LevelDbRemoteDocumentCache::GetDocumentsMatchingQuery( context.value().IncrementDocumentReadCount(remote_map.size()); } - return LevelDbRemoteDocumentCache::GetAllExisting(std::move(remote_map), - query, mutated_docs); + return GetAllExisting(std::move(remote_map), query_or_pipeline, mutated_docs); } MutableDocument LevelDbRemoteDocumentCache::DecodeMaybeDocument( diff --git a/Firestore/core/src/local/leveldb_remote_document_cache.h b/Firestore/core/src/local/leveldb_remote_document_cache.h index a9236184d49..11aa38ac080 100644 --- a/Firestore/core/src/local/leveldb_remote_document_cache.h +++ b/Firestore/core/src/local/leveldb_remote_document_cache.h @@ -22,6 +22,7 @@ #include #include +#include "Firestore/core/src/core/pipeline_util.h" // Added #include "Firestore/core/src/core/query.h" #include "Firestore/core/src/local/leveldb_index_manager.h" #include "Firestore/core/src/local/remote_document_cache.h" @@ -66,12 +67,12 @@ class LevelDbRemoteDocumentCache : public RemoteDocumentCache { const model::IndexOffset& offset, size_t limit) const override; model::MutableDocumentMap GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::IndexOffset& offset, absl::optional limit = absl::nullopt, const model::OverlayByDocumentKeyMap& mutated_docs = {}) const override; model::MutableDocumentMap GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::IndexOffset& offset, absl::optional& context, absl::optional limit = absl::nullopt, @@ -86,7 +87,7 @@ class LevelDbRemoteDocumentCache : public RemoteDocumentCache { */ model::MutableDocumentMap GetAllExisting( model::DocumentVersionMap&& remote_map, - const core::Query& query, + const core::QueryOrPipeline& query, const model::OverlayByDocumentKeyMap& mutated_docs = {}) const; model::MutableDocument DecodeMaybeDocument( diff --git a/Firestore/core/src/local/leveldb_target_cache.cc b/Firestore/core/src/local/leveldb_target_cache.cc index 2635be8fb9c..bcdd1d32876 100644 --- a/Firestore/core/src/local/leveldb_target_cache.cc +++ b/Firestore/core/src/local/leveldb_target_cache.cc @@ -102,7 +102,8 @@ void LevelDbTargetCache::Start() { void LevelDbTargetCache::AddTarget(const TargetData& target_data) { Save(target_data); - const std::string& canonical_id = target_data.target().CanonicalId(); + const std::string& canonical_id = + target_data.target_or_pipeline().CanonicalId(); std::string index_key = LevelDbQueryTargetKey::Key(canonical_id, target_data.target_id()); std::string empty_buffer; @@ -129,19 +130,20 @@ void LevelDbTargetCache::RemoveTarget(const TargetData& target_data) { std::string key = LevelDbTargetKey::Key(target_id); db_->current_transaction()->Delete(key); - std::string index_key = - LevelDbQueryTargetKey::Key(target_data.target().CanonicalId(), target_id); + std::string index_key = LevelDbQueryTargetKey::Key( + target_data.target_or_pipeline().CanonicalId(), target_id); db_->current_transaction()->Delete(index_key); metadata_->target_count--; SaveMetadata(); } -absl::optional LevelDbTargetCache::GetTarget(const Target& target) { +absl::optional LevelDbTargetCache::GetTarget( + const core::TargetOrPipeline& target_or_pipeline) { // Scan the query-target index starting with a prefix starting with the given - // target's canonical_id. Note that this is a scan rather than a get because - // canonical_ids are not required to be unique per target. - const std::string& canonical_id = target.CanonicalId(); + // target's or pipeline's canonical_id. Note that this is a scan rather than + // a get because canonical_ids are not required to be unique per target. + const std::string& canonical_id = target_or_pipeline.CanonicalId(); auto index_iterator = db_->current_transaction()->NewIterator(); std::string index_prefix = LevelDbQueryTargetKey::KeyPrefix(canonical_id); index_iterator->Seek(index_prefix); @@ -157,6 +159,9 @@ absl::optional LevelDbTargetCache::GetTarget(const Target& target) { for (; index_iterator->Valid(); index_iterator->Next()) { // Only consider rows matching exactly the specific canonical_id of // interest. + auto kk = index_iterator->key(); + (void)kk; + if (!absl::StartsWith(index_iterator->key(), index_prefix) || !row_key.Decode(index_iterator->key()) || canonical_id != row_key.canonical_id()) { @@ -177,10 +182,10 @@ absl::optional LevelDbTargetCache::GetTarget(const Target& target) { continue; } - // Finally after finding a potential match, check that the target is - // actually equal to the requested target. + // Finally after finding a potential match, check that the target or + // pipeline is actually equal to the requested one. TargetData target_data = DecodeTarget(target_iterator->value()); - if (target_data.target() == target) { + if (target_data.target_or_pipeline() == target_or_pipeline) { return target_data; } } diff --git a/Firestore/core/src/local/leveldb_target_cache.h b/Firestore/core/src/local/leveldb_target_cache.h index a6e8935f1ca..4083ab852df 100644 --- a/Firestore/core/src/local/leveldb_target_cache.h +++ b/Firestore/core/src/local/leveldb_target_cache.h @@ -70,7 +70,8 @@ class LevelDbTargetCache : public TargetCache { void RemoveTarget(const TargetData& target_data) override; - absl::optional GetTarget(const core::Target& target) override; + absl::optional GetTarget( + const core::TargetOrPipeline& target_or_pipeline) override; void EnumerateSequenceNumbers( const SequenceNumberCallback& callback) override; diff --git a/Firestore/core/src/local/local_documents_view.cc b/Firestore/core/src/local/local_documents_view.cc index d3812e42a5f..25dd5d51e05 100644 --- a/Firestore/core/src/local/local_documents_view.cc +++ b/Firestore/core/src/local/local_documents_view.cc @@ -17,6 +17,7 @@ #include "Firestore/core/src/local/local_documents_view.h" #include +#include // Added for std::function #include #include #include @@ -25,6 +26,8 @@ #include #include +#include "Firestore/core/src/api/realtime_pipeline.h" +#include "Firestore/core/src/core/pipeline_util.h" #include "Firestore/core/src/core/query.h" #include "Firestore/core/src/immutable/sorted_set.h" #include "Firestore/core/src/local/local_write_result.h" @@ -38,6 +41,7 @@ #include "Firestore/core/src/model/overlayed_document.h" #include "Firestore/core/src/model/resource_path.h" #include "Firestore/core/src/model/snapshot_version.h" +#include "Firestore/core/src/util/exception.h" // Added for ThrowInvalidArgument #include "Firestore/core/src/util/hard_assert.h" #include "absl/types/optional.h" @@ -45,7 +49,9 @@ namespace firebase { namespace firestore { namespace local { +using api::RealtimePipeline; // Added using core::Query; +using core::QueryOrPipeline; // Added using model::BatchId; using model::Document; using model::DocumentKey; @@ -73,25 +79,35 @@ Document LocalDocumentsView::GetDocument( return Document{std::move(document)}; } +// Main entry point for matching documents, handles both Query and Pipeline. DocumentMap LocalDocumentsView::GetDocumentsMatchingQuery( - const Query& query, const model::IndexOffset& offset) { - absl::optional null_context; - return GetDocumentsMatchingQuery(query, offset, null_context); -} - -DocumentMap LocalDocumentsView::GetDocumentsMatchingQuery( - const Query& query, + const QueryOrPipeline& query_or_pipeline, const model::IndexOffset& offset, absl::optional& context) { - if (query.IsDocumentQuery()) { - return GetDocumentsMatchingDocumentQuery(query.path()); - } else if (query.IsCollectionGroupQuery()) { - return GetDocumentsMatchingCollectionGroupQuery(query, offset, context); + if (query_or_pipeline.IsPipeline()) { + return GetDocumentsMatchingPipeline(query_or_pipeline, offset, context); } else { - return GetDocumentsMatchingCollectionQuery(query, offset, context); + // Handle standard queries + const Query& query = query_or_pipeline.query(); + if (query.IsDocumentQuery()) { + return GetDocumentsMatchingDocumentQuery(query.path()); + } else if (query.IsCollectionGroupQuery()) { + return GetDocumentsMatchingCollectionGroupQuery(query, offset, context); + } else { + return GetDocumentsMatchingCollectionQuery(query, offset, context); + } } } +// Overload without QueryContext (calls the main one with QueryOrPipeline) +// This definition now matches the remaining declaration in the header. +DocumentMap LocalDocumentsView::GetDocumentsMatchingQuery( + const QueryOrPipeline& query, const model::IndexOffset& offset) { + absl::optional null_context; + // Wrap Query in QueryOrPipeline for the call + return GetDocumentsMatchingQuery(query, offset, null_context); +} + DocumentMap LocalDocumentsView::GetDocumentsMatchingDocumentQuery( const ResourcePath& doc_path) { DocumentMap result; @@ -173,34 +189,9 @@ DocumentMap LocalDocumentsView::GetDocumentsMatchingCollectionQuery( remote_document_cache_->GetDocumentsMatchingQuery( query, offset, context, absl::nullopt, overlays); - // As documents might match the query because of their overlay we need to - // include documents for all overlays in the initial document set. - for (const auto& entry : overlays) { - if (remote_documents.find(entry.first) == remote_documents.end()) { - remote_documents = remote_documents.insert( - entry.first, MutableDocument::InvalidDocument(entry.first)); - } - } - - // Apply the overlays and match against the query. - DocumentMap results; - for (const auto& entry : remote_documents) { - const auto& key = entry.first; - MutableDocument doc = entry.second; - - auto overlay_it = overlays.find(key); - if (overlay_it != overlays.end()) { - (*overlay_it) - .second.mutation() - .ApplyToLocalView(doc, FieldMask(), Timestamp::Now()); - } - // Finally, insert the documents that still match the query - if (query.Matches(doc)) { - results = results.insert(key, std::move(doc)); - } - } - - return results; + return RetrieveMatchingLocalDocuments( + std::move(overlays), std::move(remote_documents), + [&query](const Document& doc) { return query.Matches(doc); }); } Document LocalDocumentsView::GetDocument(const DocumentKey& key) { @@ -377,6 +368,146 @@ MutableDocument LocalDocumentsView::GetBaseDocument( : MutableDocument::InvalidDocument(key); } +// Helper function to apply overlays and filter documents. +DocumentMap LocalDocumentsView::RetrieveMatchingLocalDocuments( + OverlayByDocumentKeyMap overlays, + MutableDocumentMap remote_documents, + const std::function& matcher) { + // As documents might match the query because of their overlay we need to + // include documents for all overlays in the initial document set. + for (const auto& entry : overlays) { + const DocumentKey& key = entry.first; + if (remote_documents.find(key) == remote_documents.end()) { + remote_documents = + remote_documents.insert(key, MutableDocument::InvalidDocument(key)); + } + } + + DocumentMap results; + for (const auto& entry : remote_documents) { + const DocumentKey& key = entry.first; + MutableDocument doc = entry.second; // Make a copy to modify + + auto overlay_it = overlays.find(key); + if (overlay_it != overlays.end()) { + // Apply the overlay mutation + overlay_it->second.mutation().ApplyToLocalView(doc, FieldMask(), + Timestamp::Now()); + } + + // Finally, insert the documents that match the filter + if (matcher(doc)) { + results = results.insert(key, std::move(doc)); + } + } + + return results; +} + +// Handles querying the local view for pipelines. +DocumentMap LocalDocumentsView::GetDocumentsMatchingPipeline( + const QueryOrPipeline& query_or_pipeline, + const IndexOffset& offset, + absl::optional& context) { + const auto& pipeline = query_or_pipeline.pipeline(); + + if (core::GetPipelineSourceType(pipeline) == + core::PipelineSourceType::kCollectionGroup) { + auto collection_id = core::GetPipelineCollectionGroup(pipeline); + HARD_ASSERT( + collection_id.has_value(), + "Pipeline source type is kCollectionGroup but first stage is not " + "a CollectionGroupSource."); + + DocumentMap results; + std::vector parents = + index_manager_->GetCollectionParents(collection_id.value()); + + for (const ResourcePath& parent : parents) { + RealtimePipeline collection_pipeline = core::AsCollectionPipelineAtPath( + pipeline, parent.Append(collection_id.value())); + DocumentMap collection_results = + GetDocumentsMatchingPipeline(collection_pipeline, offset, context); + for (const auto& kv : collection_results) { + results = results.insert(kv.first, kv.second); + } + } + return results; + } else { + // Non-collection-group pipelines: + OverlayByDocumentKeyMap overlays = + GetOverlaysForPipeline(pipeline, offset.largest_batch_id()); + + MutableDocumentMap remote_documents; + switch (core::GetPipelineSourceType(pipeline)) { + case core::PipelineSourceType::kCollection: { + remote_documents = remote_document_cache_->GetDocumentsMatchingQuery( + query_or_pipeline, offset, context, absl::nullopt, overlays); + break; + } + case core::PipelineSourceType::kDocuments: { + const auto keys = + core::GetPipelineDocuments(query_or_pipeline.pipeline()); + DocumentKeySet key_set; + for (const auto& key : keys.value()) { + key_set = key_set.insert(DocumentKey::FromPathString(key)); + } + + remote_documents = remote_document_cache_->GetAll(key_set); + break; + } + default: + util::ThrowInvalidArgument( + "Invalid pipeline source to execute offline: %s", + query_or_pipeline.ToString()); // Assuming ToString exists + } + + return RetrieveMatchingLocalDocuments( + std::move(overlays), std::move(remote_documents), + [&query_or_pipeline](const model::Document& doc) { + return query_or_pipeline.Matches(doc); + }); + } +} + +OverlayByDocumentKeyMap LocalDocumentsView::GetOverlaysForPipeline( + const QueryOrPipeline& query_or_pipeline, BatchId largest_batch_id) { + const auto& pipeline = query_or_pipeline.pipeline(); + switch (core::GetPipelineSourceType(pipeline)) { + case core::PipelineSourceType::kCollection: { + auto collection = core::GetPipelineCollection(pipeline); + HARD_ASSERT(collection.has_value(), + "Pipeline source type is kCollection but collection source " + "is missing"); + + return document_overlay_cache_->GetOverlays( + ResourcePath::FromString(collection.value()), largest_batch_id); + } + case core::PipelineSourceType::kDocuments: { + auto documents = core::GetPipelineDocuments(pipeline); + HARD_ASSERT(documents.has_value(), + "Pipeline source type is kDocuments but documents source " + "is missing"); + + std::set key_set; + for (const auto& key_string : documents.value()) { + key_set.insert(DocumentKey::FromPathString(key_string)); + } + + OverlayByDocumentKeyMap results; + document_overlay_cache_->GetOverlays(results, key_set); + + return results; + } + default: { + HARD_FAIL( + "GetOverlaysForPipeline: Unrecognized pipeline source type for " + "pipeline %s}", + query_or_pipeline.ToString()); + } + } +} + } // namespace local } // namespace firestore } // namespace firebase diff --git a/Firestore/core/src/local/local_documents_view.h b/Firestore/core/src/local/local_documents_view.h index 549656dc44e..4bcb49c3aac 100644 --- a/Firestore/core/src/local/local_documents_view.h +++ b/Firestore/core/src/local/local_documents_view.h @@ -22,23 +22,34 @@ #include #include +#include // Added for std::function #include "Firestore/core/src/immutable/sorted_set.h" #include "Firestore/core/src/local/document_overlay_cache.h" #include "Firestore/core/src/local/index_manager.h" #include "Firestore/core/src/local/mutation_queue.h" #include "Firestore/core/src/local/query_context.h" #include "Firestore/core/src/local/remote_document_cache.h" + #include "Firestore/core/src/model/document.h" #include "Firestore/core/src/model/model_fwd.h" #include "Firestore/core/src/model/overlayed_document.h" #include "Firestore/core/src/util/range.h" +// Forward declarations namespace firebase { namespace firestore { - namespace core { class Query; +class QueryOrPipeline; // Added forward declaration } // namespace core +namespace api { +class RealtimePipeline; // Added forward declaration +} // namespace api +} // namespace firestore +} // namespace firebase + +namespace firebase { +namespace firestore { namespace local { @@ -140,19 +151,20 @@ class LocalDocumentsView { */ // Virtual for testing. virtual model::DocumentMap GetDocumentsMatchingQuery( - const core::Query& query, const model::IndexOffset& offset); + const core::QueryOrPipeline& query, const model::IndexOffset& offset); /** * Performs a query against the local view of all documents. * - * @param query The query to match documents against. + * @param query_or_pipeline The query to match documents against. * @param offset Read time and document key to start scanning by (exclusive). * @param context A optional tracker to keep a record of important details * during database local query execution. */ // Virtual for testing. + // Changed parameter type from Query to QueryOrPipeline virtual model::DocumentMap GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::IndexOffset& offset, absl::optional& context); @@ -174,12 +186,33 @@ class LocalDocumentsView { const model::IndexOffset& offset, absl::optional& context); - /** Queries the remote documents and overlays mutations. */ + /** Queries the remote documents and overlays mutations for standard queries. + */ model::DocumentMap GetDocumentsMatchingCollectionQuery( const core::Query& query, const model::IndexOffset& offset, absl::optional& context); + /** Queries the remote documents and overlays mutations for pipelines. */ + model::DocumentMap GetDocumentsMatchingPipeline( + const core::QueryOrPipeline& pipeline, + const model::IndexOffset& offset, + absl::optional& context); + + /** Gets the overlays for the given pipeline. */ + model::OverlayByDocumentKeyMap GetOverlaysForPipeline( + const core::QueryOrPipeline& query_or_pipeline, + model::BatchId largest_batch_id); + + /** + * Takes a base document map and overlays, applies the overlays, and filters + * the documents using the provided matcher. + */ + model::DocumentMap RetrieveMatchingLocalDocuments( + model::OverlayByDocumentKeyMap overlays, + model::MutableDocumentMap remote_documents, + const std::function& matcher); + RemoteDocumentCache* remote_document_cache() { return remote_document_cache_; } diff --git a/Firestore/core/src/local/local_serializer.cc b/Firestore/core/src/local/local_serializer.cc index 14d1a5502b9..f84560e8576 100644 --- a/Firestore/core/src/local/local_serializer.cc +++ b/Firestore/core/src/local/local_serializer.cc @@ -242,13 +242,19 @@ Message LocalSerializer::EncodeTargetData( result->resume_token = nanopb::CopyBytesArray(target_data.resume_token().get()); - const Target& target = target_data.target(); - if (target.IsDocumentQuery()) { + const core::TargetOrPipeline& target = target_data.target_or_pipeline(); + if (target.IsPipeline()) { + result->which_target_type = firestore_client_Target_pipeline_query_tag; + result->pipeline_query.which_pipeline_type = + google_firestore_v1_Target_PipelineQueryTarget_structured_pipeline_tag; + result->pipeline_query.structured_pipeline = + rpc_serializer_.EncodeRealtimePipeline(target.pipeline()); + } else if (target.target().IsDocumentQuery()) { result->which_target_type = firestore_client_Target_documents_tag; - result->documents = rpc_serializer_.EncodeDocumentsTarget(target); + result->documents = rpc_serializer_.EncodeDocumentsTarget(target.target()); } else { result->which_target_type = firestore_client_Target_query_tag; - result->query = rpc_serializer_.EncodeQueryTarget(target); + result->query = rpc_serializer_.EncodeQueryTarget(target.target()); } return result; @@ -268,9 +274,19 @@ TargetData LocalSerializer::DecodeTargetData( rpc_serializer_.DecodeVersion(reader->context(), proto.last_limbo_free_snapshot_version); ByteString resume_token(proto.resume_token); - Target target; + core::TargetOrPipeline target; switch (proto.which_target_type) { + case firestore_client_Target_pipeline_query_tag: { + const auto result = rpc_serializer_.DecodePipelineTarget( + reader->context(), proto.pipeline_query); + if (!result.has_value()) { + reader->Fail("Unable to decode pipeline target"); + } else { + target = result.value(); + } + break; + } case firestore_client_Target_query_tag: target = rpc_serializer_.DecodeQueryTarget(reader->context(), proto.query); diff --git a/Firestore/core/src/local/local_store.cc b/Firestore/core/src/local/local_store.cc index 5a054a31a67..9300aed0dea 100644 --- a/Firestore/core/src/local/local_store.cc +++ b/Firestore/core/src/local/local_store.cc @@ -439,7 +439,7 @@ bool LocalStore::ShouldPersistTargetData(const TargetData& new_target_data, } absl::optional LocalStore::GetTargetData( - const core::Target& target) { + const core::TargetOrPipeline& target) { auto target_id = target_id_by_target_.find(target); if (target_id != target_id_by_target_.end()) { return target_data_by_target_[target_id->second]; @@ -502,14 +502,16 @@ BatchId LocalStore::GetHighestUnacknowledgedBatchId() { }); } -TargetData LocalStore::AllocateTarget(Target target) { +TargetData LocalStore::AllocateTarget( + const core::TargetOrPipeline& target_or_pipeline) { TargetData target_data = persistence_->Run("Allocate target", [&] { - absl::optional cached = target_cache_->GetTarget(target); + absl::optional cached = + target_cache_->GetTarget(target_or_pipeline); // TODO(mcg): freshen last accessed date if cached exists? if (!cached) { - cached = TargetData(std::move(target), target_id_generator_.NextId(), - persistence_->current_sequence_number(), - QueryPurpose::Listen); + cached = TargetData( + std::move(target_or_pipeline), target_id_generator_.NextId(), + persistence_->current_sequence_number(), QueryPurpose::Listen); target_cache_->AddTarget(*cached); } return *cached; @@ -520,7 +522,7 @@ TargetData LocalStore::AllocateTarget(Target target) { TargetId target_id = target_data.target_id(); if (target_data_by_target_.find(target_id) == target_data_by_target_.end()) { target_data_by_target_[target_id] = target_data; - target_id_by_target_[target_data.target()] = target_id; + target_id_by_target_[target_data.target_or_pipeline()] = target_id; } return target_data; @@ -547,14 +549,15 @@ void LocalStore::ReleaseTarget(TargetId target_id) { // Note: This also updates the target cache. persistence_->reference_delegate()->RemoveTarget(target_data); target_data_by_target_.erase(target_id); - target_id_by_target_.erase(target_data.target()); + target_id_by_target_.erase(target_data.target_or_pipeline()); }); } -QueryResult LocalStore::ExecuteQuery(const Query& query, - bool use_previous_results) { +QueryResult LocalStore::ExecuteQuery( + const core::QueryOrPipeline& query_or_pipeline, bool use_previous_results) { return persistence_->Run("ExecuteQuery", [&] { - absl::optional target_data = GetTargetData(query.ToTarget()); + absl::optional target_data = + GetTargetData(query_or_pipeline.ToTargetOrPipeline()); SnapshotVersion last_limbo_free_snapshot_version; DocumentKeySet remote_keys; @@ -565,7 +568,7 @@ QueryResult LocalStore::ExecuteQuery(const Query& query, } model::DocumentMap documents = query_engine_->GetDocumentsMatchingQuery( - query, + query_or_pipeline, use_previous_results ? last_limbo_free_snapshot_version : SnapshotVersion::None(), use_previous_results ? remote_keys : DocumentKeySet{}); @@ -609,7 +612,8 @@ DocumentMap LocalStore::ApplyBundledDocuments( const MutableDocumentMap& bundled_documents, const std::string& bundle_id) { // Allocates a target to hold all document keys from the bundle, such that // they will not get garbage collected right away. - TargetData umbrella_target = AllocateTarget(NewUmbrellaTarget(bundle_id)); + TargetData umbrella_target = + AllocateTarget(core::TargetOrPipeline(NewUmbrellaTarget(bundle_id))); return persistence_->Run("Apply bundle documents", [&] { DocumentKeySet keys; DocumentUpdateMap document_updates; @@ -642,7 +646,8 @@ void LocalStore::SaveNamedQuery(const bundle::NamedQuery& query, // associated read time if users use it to listen. NOTE: this also means if no // corresponding target exists, the new target will remain active and will not // get collected, unless users happen to unlisten the query. - TargetData existing = AllocateTarget(query.bundled_query().target()); + TargetData existing = + AllocateTarget(core::TargetOrPipeline(query.bundled_query().target())); int target_id = existing.target_id(); return persistence_->Run("Save named query", [&] { diff --git a/Firestore/core/src/local/local_store.h b/Firestore/core/src/local/local_store.h index 8f2a0872f52..4de433783df 100644 --- a/Firestore/core/src/local/local_store.h +++ b/Firestore/core/src/local/local_store.h @@ -25,6 +25,7 @@ #include "Firestore/core/src/bundle/bundle_callback.h" #include "Firestore/core/src/bundle/bundle_metadata.h" #include "Firestore/core/src/bundle/named_query.h" +#include "Firestore/core/src/core/pipeline_util.h" // Added for TargetOrPipeline #include "Firestore/core/src/core/target_id_generator.h" #include "Firestore/core/src/local/document_overlay_cache.h" #include "Firestore/core/src/local/overlay_migration_manager.h" @@ -205,7 +206,7 @@ class LocalStore : public bundle::BundleCallback { * Allocating an already allocated target will return the existing * `TargetData` for that target. */ - TargetData AllocateTarget(core::Target target); + TargetData AllocateTarget(const core::TargetOrPipeline& target_or_pipeline); /** * Unpin all the documents associated with a target. @@ -222,7 +223,8 @@ class LocalStore : public bundle::BundleCallback { * @param use_previous_results Whether results from previous executions can be * used to optimize this query execution. */ - QueryResult ExecuteQuery(const core::Query& query, bool use_previous_results); + QueryResult ExecuteQuery(const core::QueryOrPipeline& query_or_pipeline, + bool use_previous_results); /** * Notify the local store of the changed views to locally pin / unpin @@ -341,7 +343,8 @@ class LocalStore : public bundle::BundleCallback { * Returns the TargetData as seen by the LocalStore, including updates that * may have not yet been persisted to the TargetCache. */ - absl::optional GetTargetData(const core::Target& target); + absl::optional GetTargetData( + const core::TargetOrPipeline& target); /** * Creates a new target using the given bundle name, which will be used to @@ -433,8 +436,9 @@ class LocalStore : public bundle::BundleCallback { /** Maps target ids to data about their queries. */ std::unordered_map target_data_by_target_; - /** Maps a target to its targetID. */ - std::unordered_map target_id_by_target_; + /** Maps a target or pipeline to its targetID. */ + std::unordered_map + target_id_by_target_; }; } // namespace local diff --git a/Firestore/core/src/local/memory_remote_document_cache.cc b/Firestore/core/src/local/memory_remote_document_cache.cc index 70e69b0cc77..bcdca84380b 100644 --- a/Firestore/core/src/local/memory_remote_document_cache.cc +++ b/Firestore/core/src/local/memory_remote_document_cache.cc @@ -16,6 +16,7 @@ #include "Firestore/core/src/local/memory_remote_document_cache.h" +#include "Firestore/core/src/core/pipeline_util.h" // Added #include "Firestore/core/src/core/query.h" #include "Firestore/core/src/local/memory_lru_reference_delegate.h" #include "Firestore/core/src/local/memory_persistence.h" @@ -24,6 +25,7 @@ #include "Firestore/core/src/model/document.h" #include "Firestore/core/src/model/overlay.h" #include "Firestore/core/src/util/hard_assert.h" +#include "Firestore/core/src/util/log.h" namespace firebase { namespace firestore { @@ -86,25 +88,37 @@ MutableDocumentMap MemoryRemoteDocumentCache::GetAll(const std::string&, } MutableDocumentMap MemoryRemoteDocumentCache::GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::IndexOffset& offset, absl::optional limit, const model::OverlayByDocumentKeyMap& mutated_docs) const { absl::optional context; - return GetDocumentsMatchingQuery(query, offset, context, limit, mutated_docs); + return GetDocumentsMatchingQuery(query_or_pipeline, offset, context, limit, + mutated_docs); } MutableDocumentMap MemoryRemoteDocumentCache::GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::IndexOffset& offset, absl::optional&, absl::optional, const model::OverlayByDocumentKeyMap& mutated_docs) const { MutableDocumentMap results; - // Documents are ordered by key, so we can use a prefix scan to narrow down - // the documents we need to match the query against. - auto path = query.path(); + model::ResourcePath path; + if (query_or_pipeline.IsPipeline()) { + const auto& collection = + core::GetPipelineCollection(query_or_pipeline.pipeline()); + if (!collection.has_value()) { + LOG_WARN("RemoteDocumentCache: No collection found for pipeline %s", + query_or_pipeline.ToString()); + return results; + } + path = model::ResourcePath::FromString(collection.value()); + } else { + path = query_or_pipeline.query().path(); + } + DocumentKey prefix{path.Append("")}; size_t immediate_children_path_length = path.size() + 1; for (auto it = docs_.lower_bound(prefix); it != docs_.end(); ++it) { @@ -125,7 +139,7 @@ MutableDocumentMap MemoryRemoteDocumentCache::GetDocumentsMatchingQuery( } if (mutated_docs.find(document.key()) == mutated_docs.end() && - !query.Matches(document)) { + !query_or_pipeline.Matches(document)) { continue; } diff --git a/Firestore/core/src/local/memory_remote_document_cache.h b/Firestore/core/src/local/memory_remote_document_cache.h index a637cbeceaf..bb2e020bb41 100644 --- a/Firestore/core/src/local/memory_remote_document_cache.h +++ b/Firestore/core/src/local/memory_remote_document_cache.h @@ -21,6 +21,7 @@ #include #include +#include "Firestore/core/src/core/pipeline_util.h" // Added #include "Firestore/core/src/immutable/sorted_map.h" #include "Firestore/core/src/local/memory_index_manager.h" #include "Firestore/core/src/local/remote_document_cache.h" @@ -54,12 +55,12 @@ class MemoryRemoteDocumentCache : public RemoteDocumentCache { const model::IndexOffset&, size_t) const override; model::MutableDocumentMap GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::IndexOffset& offset, absl::optional limit = absl::nullopt, const model::OverlayByDocumentKeyMap& mutated_docs = {}) const override; model::MutableDocumentMap GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::IndexOffset& offset, absl::optional&, absl::optional limit = absl::nullopt, diff --git a/Firestore/core/src/local/memory_target_cache.cc b/Firestore/core/src/local/memory_target_cache.cc index 49b1e69e1d1..72f60103d05 100644 --- a/Firestore/core/src/local/memory_target_cache.cc +++ b/Firestore/core/src/local/memory_target_cache.cc @@ -44,7 +44,7 @@ MemoryTargetCache::MemoryTargetCache(MemoryPersistence* persistence) } void MemoryTargetCache::AddTarget(const TargetData& target_data) { - targets_[target_data.target()] = target_data; + targets_[target_data.target_or_pipeline()] = target_data; if (target_data.target_id() > highest_target_id_) { highest_target_id_ = target_data.target_id(); } @@ -59,12 +59,13 @@ void MemoryTargetCache::UpdateTarget(const TargetData& target_data) { } void MemoryTargetCache::RemoveTarget(const TargetData& target_data) { - targets_.erase(target_data.target()); + targets_.erase(target_data.target_or_pipeline()); references_.RemoveReferences(target_data.target_id()); } -absl::optional MemoryTargetCache::GetTarget(const Target& target) { - auto iter = targets_.find(target); +absl::optional MemoryTargetCache::GetTarget( + const core::TargetOrPipeline& target_or_pipeline) { + auto iter = targets_.find(target_or_pipeline); return iter == targets_.end() ? absl::optional{} : iter->second; } @@ -78,20 +79,23 @@ void MemoryTargetCache::EnumerateSequenceNumbers( size_t MemoryTargetCache::RemoveTargets( model::ListenSequenceNumber upper_bound, const std::unordered_map& live_targets) { - std::vector to_remove; + // Use pointers to the keys in the map. + std::vector to_remove; for (const auto& kv : targets_) { - const Target& target = kv.first; + const core::TargetOrPipeline& target_or_pipeline = kv.first; const TargetData& target_data = kv.second; if (target_data.sequence_number() <= upper_bound) { if (live_targets.find(target_data.target_id()) == live_targets.end()) { - to_remove.push_back(&target); + // Store the address of the key. + to_remove.push_back(&target_or_pipeline); references_.RemoveReferences(target_data.target_id()); } } } - for (const Target* element : to_remove) { + for (const core::TargetOrPipeline* element : to_remove) { + // Erase using the dereferenced pointer (the key itself). targets_.erase(*element); } return to_remove.size(); diff --git a/Firestore/core/src/local/memory_target_cache.h b/Firestore/core/src/local/memory_target_cache.h index 0c33b8a49a0..eebb19a0dda 100644 --- a/Firestore/core/src/local/memory_target_cache.h +++ b/Firestore/core/src/local/memory_target_cache.h @@ -47,7 +47,8 @@ class MemoryTargetCache : public TargetCache { void RemoveTarget(const TargetData& target_data) override; - absl::optional GetTarget(const core::Target& target) override; + absl::optional GetTarget( + const core::TargetOrPipeline& target_or_pipeline) override; void EnumerateSequenceNumbers( const SequenceNumberCallback& callback) override; @@ -99,8 +100,8 @@ class MemoryTargetCache : public TargetCache { /** The last received snapshot version. */ model::SnapshotVersion last_remote_snapshot_version_; - /** Maps a target to the data about that query. */ - std::unordered_map targets_; + /** Maps a target or pipeline to the data about that query. */ + std::unordered_map targets_; /** * A ordered bidirectional mapping between documents and the remote target diff --git a/Firestore/core/src/local/query_engine.cc b/Firestore/core/src/local/query_engine.cc index 9d5aa38d3df..c6f3397881b 100644 --- a/Firestore/core/src/local/query_engine.cc +++ b/Firestore/core/src/local/query_engine.cc @@ -65,35 +65,41 @@ void QueryEngine::Initialize(LocalDocumentsView* local_documents) { } const DocumentMap QueryEngine::GetDocumentsMatchingQuery( - const Query& query, + const core::QueryOrPipeline& query_or_pipeline, const SnapshotVersion& last_limbo_free_snapshot_version, const DocumentKeySet& remote_keys) const { HARD_ASSERT(local_documents_view_ && index_manager_, "Initialize() not called"); const absl::optional index_result = - PerformQueryUsingIndex(query); + PerformQueryUsingIndex(query_or_pipeline); if (index_result.has_value()) { return index_result.value(); } const absl::optional key_result = PerformQueryUsingRemoteKeys( - query, remote_keys, last_limbo_free_snapshot_version); + query_or_pipeline, remote_keys, last_limbo_free_snapshot_version); if (key_result.has_value()) { return key_result.value(); } absl::optional context = QueryContext(); - auto full_scan_result = ExecuteFullCollectionScan(query, context); + auto full_scan_result = ExecuteFullCollectionScan(query_or_pipeline, context); if (index_auto_creation_enabled_) { - CreateCacheIndexes(query, context.value(), full_scan_result.size()); + CreateCacheIndexes(query_or_pipeline, context.value(), + full_scan_result.size()); } return full_scan_result; } -void QueryEngine::CreateCacheIndexes(const core::Query& query, +void QueryEngine::CreateCacheIndexes(const core::QueryOrPipeline& query, const QueryContext& context, size_t result_size) const { + if (query.IsPipeline()) { + LOG_DEBUG("SDK will skip creating cache indexes for pipelines."); + return; + } + if (context.GetDocumentReadCount() < index_auto_creation_min_collection_size_) { LOG_DEBUG( @@ -111,7 +117,7 @@ void QueryEngine::CreateCacheIndexes(const core::Query& query, if (context.GetDocumentReadCount() > relative_index_read_cost_per_document_ * result_size) { - index_manager_->CreateTargetIndexes(query.ToTarget()); + index_manager_->CreateTargetIndexes(query.query().ToTarget()); LOG_DEBUG( "The SDK decides to create cache indexes for query: %s, as using cache " "indexes may help improve performance.", @@ -124,7 +130,13 @@ void QueryEngine::SetIndexAutoCreationEnabled(bool is_enabled) { } absl::optional QueryEngine::PerformQueryUsingIndex( - const Query& query) const { + const core::QueryOrPipeline& query_or_pipeline) const { + if (query_or_pipeline.IsPipeline()) { + LOG_DEBUG("Skipping using indexes for pipelines."); + return absl::nullopt; + } + + const auto& query = query_or_pipeline.query(); if (query.MatchesAllDocuments()) { // Don't use indexes for queries that can be executed by scanning the // collection. @@ -184,7 +196,7 @@ absl::optional QueryEngine::PerformQueryUsingIndex( } absl::optional QueryEngine::PerformQueryUsingRemoteKeys( - const Query& query, + const core::QueryOrPipeline& query, const DocumentKeySet& remote_keys, const SnapshotVersion& last_limbo_free_snapshot_version) const { // Queries that match all documents don't benefit from using key-based @@ -203,9 +215,8 @@ absl::optional QueryEngine::PerformQueryUsingRemoteKeys( DocumentMap documents = local_documents_view_->GetDocuments(remote_keys); DocumentSet previous_results = ApplyQuery(query, documents); - if ((query.has_limit_to_first() || query.has_limit_to_last()) && - NeedsRefill(query, previous_results, remote_keys, - last_limbo_free_snapshot_version)) { + if ((query.has_limit()) && NeedsRefill(query, previous_results, remote_keys, + last_limbo_free_snapshot_version)) { return absl::nullopt; } @@ -219,7 +230,7 @@ absl::optional QueryEngine::PerformQueryUsingRemoteKeys( model::IndexOffset::CreateSuccessor(last_limbo_free_snapshot_version)); } -DocumentSet QueryEngine::ApplyQuery(const Query& query, +DocumentSet QueryEngine::ApplyQuery(const core::QueryOrPipeline& query, const DocumentMap& documents) const { // Sort the documents and re-apply the query filter since previously matching // documents do not necessarily still match the query. @@ -237,10 +248,18 @@ DocumentSet QueryEngine::ApplyQuery(const Query& query, } bool QueryEngine::NeedsRefill( - const Query& query, + const core::QueryOrPipeline& query_or_pipeline, const DocumentSet& sorted_previous_results, const DocumentKeySet& remote_keys, const SnapshotVersion& limbo_free_snapshot_version) const { + // TODO(pipeline): For pipelines it is simple for now, we refill for all + // limit/offset. we should implement a similar approach for query at some + // point. + if (query_or_pipeline.IsPipeline()) { + return query_or_pipeline.has_limit(); + } + + const auto& query = query_or_pipeline.query(); if (!query.has_limit()) { // Queries without limits do not need to be refilled. return false; @@ -273,7 +292,8 @@ bool QueryEngine::NeedsRefill( } const DocumentMap QueryEngine::ExecuteFullCollectionScan( - const Query& query, absl::optional& context) const { + const core::QueryOrPipeline& query, + absl::optional& context) const { LOG_DEBUG("Using full collection scan to execute query: %s", query.ToString()); return local_documents_view_->GetDocumentsMatchingQuery( @@ -282,7 +302,7 @@ const DocumentMap QueryEngine::ExecuteFullCollectionScan( const DocumentMap QueryEngine::AppendRemainingResults( const DocumentSet& indexed_results, - const Query& query, + const core::QueryOrPipeline& query, const model::IndexOffset& offset) const { // Retrieve all results for documents that were updated since the offset. DocumentMap remaining_results = diff --git a/Firestore/core/src/local/query_engine.h b/Firestore/core/src/local/query_engine.h index 7573bbcad8a..031ec1bdb62 100644 --- a/Firestore/core/src/local/query_engine.h +++ b/Firestore/core/src/local/query_engine.h @@ -17,6 +17,7 @@ #ifndef FIRESTORE_CORE_SRC_LOCAL_QUERY_ENGINE_H_ #define FIRESTORE_CORE_SRC_LOCAL_QUERY_ENGINE_H_ +#include "Firestore/core/src/core/pipeline_util.h" // Added for QueryOrPipeline #include "Firestore/core/src/model/model_fwd.h" namespace firebase { @@ -75,7 +76,7 @@ class QueryEngine { virtual void Initialize(LocalDocumentsView* local_documents); const model::DocumentMap GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::SnapshotVersion& last_limbo_free_snapshot_version, const model::DocumentKeySet& remote_keys) const; @@ -90,26 +91,26 @@ class QueryEngine { * persisted index values. Returns nullopt if an index is not available. */ absl::optional PerformQueryUsingIndex( - const core::Query& query) const; + const core::QueryOrPipeline& query_or_pipeline) const; /** * Performs a query based on the target's persisted query mapping. Returns * nullopt if the mapping is not available or cannot be used. */ absl::optional PerformQueryUsingRemoteKeys( - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::DocumentKeySet& remote_keys, const model::SnapshotVersion& last_limbo_free_snapshot_version) const; /** Applies the query filter and sorting to the provided documents. */ - model::DocumentSet ApplyQuery(const core::Query& query, + model::DocumentSet ApplyQuery(const core::QueryOrPipeline& query_or_pipeline, const model::DocumentMap& documents) const; /** * Determines if a limit query needs to be refilled from cache, making it * ineligible for index-free execution. * - * @param query The query for refill calculation. + * @param query_or_pipeline The query for refill calculation. * @param sorted_previous_results The documents that matched the query when it * was last synchronized, sorted by the query's comparator. * @param remote_keys The document keys that matched the query at the last @@ -118,13 +119,14 @@ class QueryEngine { * query was last synchronized. */ bool NeedsRefill( - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::DocumentSet& sorted_previous_results, const model::DocumentKeySet& remote_keys, const model::SnapshotVersion& limbo_free_snapshot_version) const; const model::DocumentMap ExecuteFullCollectionScan( - const core::Query& query, absl::optional& context) const; + const core::QueryOrPipeline& query_or_pipeline, + absl::optional& context) const; /** * Combines the results from an indexed execution with the remaining documents @@ -132,10 +134,10 @@ class QueryEngine { */ const model::DocumentMap AppendRemainingResults( const model::DocumentSet& indexedResults, - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::IndexOffset& offset) const; - void CreateCacheIndexes(const core::Query& query, + void CreateCacheIndexes(const core::QueryOrPipeline& query_or_pipeline, const QueryContext& context, size_t result_size) const; diff --git a/Firestore/core/src/local/remote_document_cache.h b/Firestore/core/src/local/remote_document_cache.h index bfe84648c93..2afe0aac43e 100644 --- a/Firestore/core/src/local/remote_document_cache.h +++ b/Firestore/core/src/local/remote_document_cache.h @@ -19,6 +19,7 @@ #include +#include "Firestore/core/src/core/pipeline_util.h" // Added #include "Firestore/core/src/model/document_key.h" #include "Firestore/core/src/model/model_fwd.h" #include "Firestore/core/src/model/overlay.h" @@ -103,7 +104,7 @@ class RemoteDocumentCache { * * Cached DeletedDocument entries have no bearing on query results. * - * @param query The query to match documents against. + * @param query_or_pipeline The query to match documents against. * @param offset The read time and document key to start scanning at * (exclusive). * @param limit The maximum number of results to return. @@ -113,7 +114,7 @@ class RemoteDocumentCache { * @return The set of matching documents. */ virtual model::MutableDocumentMap GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::IndexOffset& offset, absl::optional limit = absl::nullopt, const model::OverlayByDocumentKeyMap& mutated_docs = {}) const = 0; @@ -126,7 +127,7 @@ class RemoteDocumentCache { * * Cached DeletedDocument entries have no bearing on query results. * - * @param query The query to match documents against. + * @param query_or_pipeline The query to match documents against. * @param offset The read time and document key to start scanning at * (exclusive). * @param context A optional tracker to keep a record of important details @@ -138,7 +139,7 @@ class RemoteDocumentCache { * @return The set of matching documents. */ virtual model::MutableDocumentMap GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query_or_pipeline, const model::IndexOffset& offset, absl::optional& context, absl::optional limit = absl::nullopt, diff --git a/Firestore/core/src/local/target_cache.h b/Firestore/core/src/local/target_cache.h index 08afe46fbf2..bef2976103b 100644 --- a/Firestore/core/src/local/target_cache.h +++ b/Firestore/core/src/local/target_cache.h @@ -20,6 +20,7 @@ #include #include +#include "Firestore/core/src/core/pipeline_util.h" // Added for TargetOrPipeline #include "Firestore/core/src/model/model_fwd.h" #include "Firestore/core/src/model/types.h" @@ -81,13 +82,16 @@ class TargetCache { virtual void RemoveTarget(const TargetData& target_data) = 0; /** - * Looks up a TargetData entry in the cache. + * Looks up a TargetData entry in the cache using either a Target or a + * RealtimePipeline. * - * @param target The target corresponding to the entry to look up. + * @param target_or_pipeline The target or pipeline corresponding to the + * entry to look up. * @return The cached TargetData entry, or nullopt if the cache has no entry - * for the target. + * for the target or pipeline. */ - virtual absl::optional GetTarget(const core::Target& target) = 0; + virtual absl::optional GetTarget( + const core::TargetOrPipeline& target_or_pipeline) = 0; /** Enumerates all sequence numbers in the TargetCache. */ virtual void EnumerateSequenceNumbers( diff --git a/Firestore/core/src/local/target_data.cc b/Firestore/core/src/local/target_data.cc index 4512e2f5d89..7df1d8a312c 100644 --- a/Firestore/core/src/local/target_data.cc +++ b/Firestore/core/src/local/target_data.cc @@ -26,6 +26,7 @@ namespace local { namespace { using core::Target; +using core::TargetOrPipeline; using model::ListenSequenceNumber; using model::SnapshotVersion; using model::TargetId; @@ -56,7 +57,7 @@ std::ostream& operator<<(std::ostream& os, QueryPurpose purpose) { // MARK: - TargetData -TargetData::TargetData(Target target, +TargetData::TargetData(TargetOrPipeline target, TargetId target_id, ListenSequenceNumber sequence_number, QueryPurpose purpose, @@ -75,7 +76,7 @@ TargetData::TargetData(Target target, expected_count_(std::move(expected_count)) { } -TargetData::TargetData(Target target, +TargetData::TargetData(TargetOrPipeline target, int target_id, ListenSequenceNumber sequence_number, QueryPurpose purpose) @@ -128,7 +129,8 @@ TargetData TargetData::WithLastLimboFreeSnapshotVersion( } bool operator==(const TargetData& lhs, const TargetData& rhs) { - return lhs.target() == rhs.target() && lhs.target_id() == rhs.target_id() && + return lhs.target_or_pipeline() == rhs.target_or_pipeline() && + lhs.target_id() == rhs.target_id() && lhs.sequence_number() == rhs.sequence_number() && lhs.purpose() == rhs.purpose() && lhs.snapshot_version() == rhs.snapshot_version() && @@ -148,7 +150,7 @@ std::string TargetData::ToString() const { } std::ostream& operator<<(std::ostream& os, const TargetData& value) { - return os << "TargetData(target=" << value.target_ + return os << "TargetData(target=" << value.target_.ToString() << ", target_id=" << value.target_id_ << ", purpose=" << value.purpose_ << ", version=" << value.snapshot_version_ diff --git a/Firestore/core/src/local/target_data.h b/Firestore/core/src/local/target_data.h index 5a6a53370e0..f3c9411cbee 100644 --- a/Firestore/core/src/local/target_data.h +++ b/Firestore/core/src/local/target_data.h @@ -22,6 +22,7 @@ #include #include +#include "Firestore/core/src/core/pipeline_util.h" #include "Firestore/core/src/core/target.h" #include "Firestore/core/src/model/snapshot_version.h" #include "Firestore/core/src/model/types.h" @@ -77,7 +78,7 @@ class TargetData { * at the resume token or read time. Documents are counted only when making a * listen request with resume token or read time, otherwise, keep it null. */ - TargetData(core::Target target, + TargetData(core::TargetOrPipeline target, model::TargetId target_id, model::ListenSequenceNumber sequence_number, QueryPurpose purpose, @@ -90,7 +91,7 @@ class TargetData { * Convenience constructor for use when creating a TargetData for the first * time. */ - TargetData(const core::Target target, + TargetData(const core::TargetOrPipeline target, int target_id, model::ListenSequenceNumber sequence_number, QueryPurpose purpose); @@ -108,7 +109,7 @@ class TargetData { static TargetData Invalid(); /** The target being listened to. */ - const core::Target& target() const { + const core::TargetOrPipeline& target_or_pipeline() const { return target_; } @@ -191,7 +192,7 @@ class TargetData { friend std::ostream& operator<<(std::ostream& os, const TargetData& value); private: - core::Target target_; + core::TargetOrPipeline target_; model::TargetId target_id_ = 0; model::ListenSequenceNumber sequence_number_ = 0; QueryPurpose purpose_ = QueryPurpose::Listen; diff --git a/Firestore/core/src/remote/remote_event.cc b/Firestore/core/src/remote/remote_event.cc index 52e83cbfbaf..4ab8e9132e6 100644 --- a/Firestore/core/src/remote/remote_event.cc +++ b/Firestore/core/src/remote/remote_event.cc @@ -237,6 +237,13 @@ create_existence_filter_mismatch_info_for_testing_hooks( std::move(bloom_filter_info)}; } +bool IsSingleDocumentTarget(const core::TargetOrPipeline target_or_pipeline) { + // TODO(pipeline): We only handle the non-pipeline case because realtime + // pipeline does not support single document lookup yet. + return !target_or_pipeline.IsPipeline() && + target_or_pipeline.target().IsDocumentQuery(); +} + } // namespace void WatchChangeAggregator::HandleExistenceFilter( @@ -246,25 +253,10 @@ void WatchChangeAggregator::HandleExistenceFilter( absl::optional target_data = TargetDataForActiveTarget(target_id); if (target_data) { - const Target& target = target_data->target(); - if (target.IsDocumentQuery()) { - if (expected_count == 0) { - // The existence filter told us the document does not exist. We deduce - // that this document does not exist and apply a deleted document to our - // updates. Without applying this deleted document there might be - // another query that will raise this document as part of a snapshot - // until it is resolved, essentially exposing inconsistency between - // queries. - DocumentKey key{target.path()}; - RemoveDocumentFromTarget( - target_id, key, - MutableDocument::NoDocument(key, SnapshotVersion::None())); - } else { - HARD_ASSERT(expected_count == 1, - "Single document existence filter with count: %s", - expected_count); - } - } else { + const core::TargetOrPipeline& target_or_pipeline = + target_data->target_or_pipeline(); + + if (!IsSingleDocumentTarget(target_or_pipeline)) { int current_size = GetCurrentDocumentCountForTarget(target_id); if (current_size != expected_count) { // Apply bloom filter to identify and mark removed documents. @@ -292,6 +284,23 @@ void WatchChangeAggregator::HandleExistenceFilter( target_metadata_provider_->GetDatabaseId(), std::move(bloom_filter), status)); } + } else { + if (expected_count == 0) { + // The existence filter told us the document does not exist. We deduce + // that this document does not exist and apply a deleted document to our + // updates. Without applying this deleted document there might be + // another query that will raise this document as part of a snapshot + // until it is resolved, essentially exposing inconsistency between + // queries. + DocumentKey key{target_or_pipeline.target().path()}; + RemoveDocumentFromTarget( + target_id, key, + MutableDocument::NoDocument(key, SnapshotVersion::None())); + } else { + HARD_ASSERT(expected_count == 1, + "Single document existence filter with count: %s", + expected_count); + } } } } @@ -368,13 +377,14 @@ RemoteEvent WatchChangeAggregator::CreateRemoteEvent( absl::optional target_data = TargetDataForActiveTarget(target_id); if (target_data) { - if (target_state.current() && target_data->target().IsDocumentQuery()) { + if (target_state.current() && + IsSingleDocumentTarget(target_data->target_or_pipeline())) { // Document queries for document that don't exist can produce an empty // result set. To update our local cache, we synthesize a document // delete if we have not previously received the document. This resolves // the limbo state of the document, removing it from // SyncEngine::limbo_document_refs_. - DocumentKey key{target_data->target().path()}; + DocumentKey key{target_data->target_or_pipeline().target().path()}; if (pending_document_updates_.find(key) == pending_document_updates_.end() && !TargetContainsDocument(target_id, key)) { diff --git a/Firestore/core/src/remote/remote_store.cc b/Firestore/core/src/remote/remote_store.cc index 1bf4370240b..19d9852a6ef 100644 --- a/Firestore/core/src/remote/remote_store.cc +++ b/Firestore/core/src/remote/remote_store.cc @@ -342,7 +342,7 @@ void RemoteStore::RaiseWatchSnapshot(const SnapshotVersion& snapshot_version) { // Clear the resume token for the query, since we're in a known mismatch // state. target_data = - TargetData(target_data.target(), target_id, + TargetData(target_data.target_or_pipeline(), target_id, target_data.sequence_number(), target_data.purpose()); listen_targets_[target_id] = target_data; @@ -354,7 +354,7 @@ void RemoteStore::RaiseWatchSnapshot(const SnapshotVersion& snapshot_version) { // mismatch, but don't actually retain that in listen_targets_. This ensures // that we flag the first re-listen this way without impacting future // listens of this target (that might happen e.g. on reconnect). - TargetData request_target_data(target_data.target(), target_id, + TargetData request_target_data(target_data.target_or_pipeline(), target_id, target_data.sequence_number(), purpose); SendWatchRequest(request_target_data); } diff --git a/Firestore/core/src/remote/serializer.cc b/Firestore/core/src/remote/serializer.cc index 889767b84ee..faa2e687a69 100644 --- a/Firestore/core/src/remote/serializer.cc +++ b/Firestore/core/src/remote/serializer.cc @@ -34,6 +34,7 @@ #include "Firestore/core/include/firebase/firestore/timestamp.h" #include "Firestore/core/src/core/bound.h" #include "Firestore/core/src/core/field_filter.h" +#include "Firestore/core/src/core/pipeline_util.h" #include "Firestore/core/src/core/query.h" #include "Firestore/core/src/local/target_data.h" #include "Firestore/core/src/model/delete_mutation.h" @@ -633,14 +634,22 @@ FieldTransform Serializer::DecodeFieldTransform( google_firestore_v1_Target Serializer::EncodeTarget( const TargetData& target_data) const { google_firestore_v1_Target result{}; - const Target& target = target_data.target(); - - if (target.IsDocumentQuery()) { + const core::TargetOrPipeline& target_or_pipeline = + target_data.target_or_pipeline(); + + if (target_or_pipeline.IsPipeline()) { + result.which_target_type = google_firestore_v1_Target_pipeline_query_tag; + result.target_type.pipeline_query.which_pipeline_type = + google_firestore_v1_Target_PipelineQueryTarget_structured_pipeline_tag; + result.target_type.pipeline_query.structured_pipeline = + EncodeRealtimePipeline(target_or_pipeline.pipeline()); + } else if (target_or_pipeline.target().IsDocumentQuery()) { result.which_target_type = google_firestore_v1_Target_documents_tag; - result.target_type.documents = EncodeDocumentsTarget(target); - } else { + result.target_type.documents = + EncodeDocumentsTarget(target_or_pipeline.target()); + } else { // query target result.which_target_type = google_firestore_v1_Target_query_tag; - result.target_type.query = EncodeQueryTarget(target); + result.target_type.query = EncodeQueryTarget(target_or_pipeline.target()); } result.target_id = target_data.target_id(); @@ -1206,17 +1215,33 @@ Serializer::DecodeCursorValue(google_firestore_v1_Cursor& cursor) const { return index_components; } -google_firestore_v1_StructuredPipeline Serializer::EncodePipeline( - const api::Pipeline& pipeline) const { +namespace { +template +google_firestore_v1_StructuredPipeline EncodeStages( + const std::vector>& stage_list) { google_firestore_v1_StructuredPipeline result; - result.pipeline = pipeline.to_proto().pipeline_value; + result.pipeline = google_firestore_v1_Pipeline{}; + nanopb::SetRepeatedField( + &result.pipeline.stages, &result.pipeline.stages_count, stage_list, + [](const std::shared_ptr& arg) { return arg->to_proto(); }); result.options_count = 0; result.options = nullptr; return result; } +} // namespace + +google_firestore_v1_StructuredPipeline Serializer::EncodePipeline( + const api::Pipeline& pipeline) const { + return EncodeStages(pipeline.stages()); +} + +google_firestore_v1_StructuredPipeline Serializer::EncodeRealtimePipeline( + const api::RealtimePipeline& pipeline) const { + return EncodeStages(pipeline.rewritten_stages()); +} /* static */ pb_bytes_array_t* Serializer::EncodeFieldPath(const FieldPath& field_path) { @@ -1530,6 +1555,244 @@ api::PipelineSnapshot Serializer::DecodePipelineResponse( return api::PipelineSnapshot(std::move(results), execution_time); } +absl::optional Serializer::DecodePipelineTarget( + util::ReadContext* context, + const google_firestore_v1_Target_PipelineQueryTarget& proto) const { + if (!context->status().ok()) { + return absl::nullopt; + } + + if (proto.which_pipeline_type != + google_firestore_v1_Target_PipelineQueryTarget_structured_pipeline_tag) { + context->Fail( + StringFormat("Unknown pipeline_type in PipelineQueryTarget: %d", + proto.which_pipeline_type)); + return absl::nullopt; + } + + const auto& pipeline_proto = proto.structured_pipeline.pipeline; + std::vector> decoded_stages; + decoded_stages.reserve(pipeline_proto.stages_count); + + for (pb_size_t i = 0; i < pipeline_proto.stages_count; ++i) { + auto stage_ptr = DecodeStage(context, pipeline_proto.stages[i]); + if (!context->status().ok()) { + return absl::nullopt; + } + decoded_stages.push_back(std::move(stage_ptr)); + } + + return core::TargetOrPipeline(api::RealtimePipeline( + std::move(decoded_stages), std::make_unique(*this))); +} + +std::unique_ptr Serializer::DecodeStage( + util::ReadContext* context, + const google_firestore_v1_Pipeline_Stage& proto_stage) + const { // Corrected proto type + if (!context->status().ok()) return nullptr; + + std::string stage_name = DecodeString(proto_stage.name); + + // Access args from google_firestore_v1_Pipeline_Stage + const pb_size_t args_count = proto_stage.args_count; + const google_firestore_v1_Value* current_args = proto_stage.args; + + if (stage_name == "collection") { + if (args_count >= 1 && current_args[0].which_value_type == + google_firestore_v1_Value_reference_value_tag) { + return std::make_unique( + DecodeString(current_args[0].reference_value)); + } + context->Fail("Invalid 'collection' stage: missing or invalid arguments"); + return nullptr; + } else if (stage_name == "collection_group") { + if (args_count >= 1 && current_args[0].which_value_type == + google_firestore_v1_Value_string_value_tag) { + return std::make_unique( + DecodeString(current_args[0].string_value)); + } + context->Fail( + "Invalid 'collection_group' stage: missing or invalid arguments"); + return nullptr; + } else if (stage_name == "documents") { + std::vector document_paths; + // args_count can be 0 for an empty DocumentsSource. + // nanopb guarantees that if args_count > 0, args will not be null. + document_paths.reserve(args_count); + for (pb_size_t i = 0; i < args_count; ++i) { + if (current_args[i].which_value_type == + google_firestore_v1_Value_string_value_tag) { + document_paths.push_back(DecodeString(current_args[i].string_value)); + } else { + context->Fail(StringFormat( + "Invalid argument type for 'documents' stage at index %zu: " + "expected string_value, got %d", + i, current_args[i].which_value_type)); + return nullptr; + } + } + return std::make_unique(std::move(document_paths)); + } else if (stage_name == "where") { + if (args_count >= 1) { + auto expr = DecodeExpression(context, current_args[0]); + if (!context->status().ok()) return nullptr; + return std::make_unique(std::move(expr)); + } + context->Fail("Invalid 'where' stage: missing or invalid arguments"); + return nullptr; + } else if (stage_name == "limit") { + if (args_count >= 1) { + const auto& limit_arg = current_args[0]; + if (limit_arg.which_value_type == + google_firestore_v1_Value_integer_value_tag) { + return std::make_unique(limit_arg.integer_value); + } + } + context->Fail("Invalid 'limit' stage: missing or invalid arguments"); + return nullptr; + } else if (stage_name == "sort") { + if (args_count > 0) { + std::vector orderings; + orderings.reserve(args_count); + for (pb_size_t i = 0; i < args_count; ++i) { + auto ordering = DecodeOrdering(context, current_args[i]); + if (!context->status().ok()) return nullptr; + orderings.push_back(ordering); + } + return std::make_unique( + std::move(orderings)); // Corrected class name + } + context->Fail("Invalid 'sort' stage: missing arguments"); + return nullptr; + } + + context->Fail(StringFormat("Unsupported stage type: %s", stage_name)); + return nullptr; +} + +std::unique_ptr Serializer::DecodeExpression( + util::ReadContext* context, + const google_firestore_v1_Value& proto_value) const { + if (!context->status().ok()) return nullptr; + + switch (proto_value.which_value_type) { + case google_firestore_v1_Value_field_reference_value_tag: { + // This could be a document name, OR if used for field paths in + // expressions: + StatusOr path = FieldPath::FromDotSeparatedString( + DecodeString(proto_value.reference_value)); + if (path.ok()) { + return std::make_unique(path.ConsumeValueOrDie()); + } + context->Fail("Unable to parse field from proto"); + return nullptr; + } + + case google_firestore_v1_Value_function_value_tag: + return std::make_unique(DecodeFunctionExpression( + context, + proto_value + .function_value)); // Pass proto_value.function_value directly + + default: + // All other types are constants + // DeepClone to avoid double-free + return std::make_unique( + SharedMessage(DeepClone(proto_value))); + } +} + +api::FunctionExpr Serializer::DecodeFunctionExpression( + util::ReadContext* context, + const google_firestore_v1_Function& proto_function) const { + if (!context->status().ok()) return api::FunctionExpr("", {}); + + std::string func_name = DecodeString(proto_function.name); + std::vector> decoded_args; + decoded_args.reserve(proto_function.args_count); + + for (pb_size_t i = 0; i < proto_function.args_count; ++i) { + auto arg_expr = DecodeExpression(context, proto_function.args[i]); + if (!context->status().ok()) return api::FunctionExpr("", {}); + decoded_args.push_back(std::move(arg_expr)); + } + return api::FunctionExpr(std::move(func_name), std::move(decoded_args)); +} + +api::Ordering Serializer::DecodeOrdering( + util::ReadContext* context, + const google_firestore_v1_Value& proto_value) const { + if (!context->status().ok()) { + return api::Ordering(nullptr, api::Ordering::Direction::ASCENDING); + } + + if (proto_value.which_value_type != google_firestore_v1_Value_map_value_tag) { + context->Fail("Invalid proto_value type for Ordering, expected map_value."); + return api::Ordering(nullptr, api::Ordering::Direction::ASCENDING); + } + + std::shared_ptr decoded_expr = nullptr; + absl::optional decoded_direction; + + const auto& map_value = proto_value.map_value; + for (pb_size_t i = 0; i < map_value.fields_count; ++i) { + const auto& field = map_value.fields[i]; + std::string key = DecodeString(field.key); + + if (key == "expression") { + if (decoded_expr) { + context->Fail("Duplicate 'expression' field in Ordering proto."); + return api::Ordering(nullptr, api::Ordering::Direction::ASCENDING); + } + decoded_expr = DecodeExpression(context, field.value); + if (!context->status().ok()) { + // Error already set by DecodeExpression + return api::Ordering(nullptr, api::Ordering::Direction::ASCENDING); + } + } else if (key == "direction") { + if (decoded_direction) { + context->Fail("Duplicate 'direction' field in Ordering proto."); + return api::Ordering(nullptr, api::Ordering::Direction::ASCENDING); + } + if (field.value.which_value_type != + google_firestore_v1_Value_string_value_tag) { + context->Fail( + "Invalid type for 'direction' field in Ordering proto, expected " + "string_value."); + return api::Ordering(nullptr, api::Ordering::Direction::ASCENDING); + } + std::string direction_str = DecodeString(field.value.string_value); + if (direction_str == "ascending") { + decoded_direction = api::Ordering::Direction::ASCENDING; + } else if (direction_str == "descending") { + decoded_direction = api::Ordering::Direction::DESCENDING; + } else { + context->Fail(StringFormat( + "Invalid string value '%s' for 'direction' field in Ordering " + "proto.", + direction_str)); + return api::Ordering(nullptr, api::Ordering::Direction::ASCENDING); + } + } else { + // Unknown fields are ignored by protobuf spec, but we can be stricter + // if needed. For now, ignore. + } + } + + if (!decoded_expr) { + context->Fail("Missing 'expression' field in Ordering proto."); + return api::Ordering(nullptr, api::Ordering::Direction::ASCENDING); + } + + if (!decoded_direction) { + context->Fail("Missing 'direction' field in Ordering proto."); + return api::Ordering(nullptr, api::Ordering::Direction::ASCENDING); + } + + return api::Ordering(std::move(decoded_expr), decoded_direction.value()); +} + } // namespace remote } // namespace firestore } // namespace firebase diff --git a/Firestore/core/src/remote/serializer.h b/Firestore/core/src/remote/serializer.h index 2105b7eb754..f8d8015a81d 100644 --- a/Firestore/core/src/remote/serializer.h +++ b/Firestore/core/src/remote/serializer.h @@ -27,8 +27,12 @@ #include "Firestore/Protos/nanopb/google/firestore/v1/document.nanopb.h" #include "Firestore/Protos/nanopb/google/firestore/v1/firestore.nanopb.h" +#include "Firestore/Protos/nanopb/google/firestore/v1/query.nanopb.h" #include "Firestore/Protos/nanopb/google/type/latlng.nanopb.h" +#include "Firestore/core/src/api/expressions.h" #include "Firestore/core/src/api/pipeline.h" +#include "Firestore/core/src/api/realtime_pipeline.h" +#include "Firestore/core/src/api/stages.h" #include "Firestore/core/src/core/composite_filter.h" #include "Firestore/core/src/core/core_fwd.h" #include "Firestore/core/src/core/field_filter.h" @@ -57,8 +61,6 @@ enum class QueryPurpose; namespace remote { -core::Target InvalidTarget(); - /** * @brief Converts internal model objects to their equivalent protocol buffer * form, and protocol buffer objects to their equivalent bytes. @@ -208,6 +210,13 @@ class Serializer { google_firestore_v1_StructuredPipeline EncodePipeline( const api::Pipeline& pipeline) const; + google_firestore_v1_StructuredPipeline EncodeRealtimePipeline( + const api::RealtimePipeline& pipeline) const; + + absl::optional DecodePipelineTarget( + util::ReadContext* context, + const google_firestore_v1_Target_PipelineQueryTarget& proto) const; + /** * Decodes the watch change. Modifies the provided proto to release * ownership of any Value messages. @@ -356,6 +365,20 @@ class Serializer { model::DatabaseId database_id_; // TODO(varconst): Android caches the result of calling `EncodeDatabaseName` // as well, consider implementing that. + + // Helper methods for DecodePipelineTarget + std::unique_ptr DecodeStage( + util::ReadContext* context, + const google_firestore_v1_Pipeline_Stage& proto_stage) const; + std::unique_ptr DecodeExpression( + util::ReadContext* context, + const google_firestore_v1_Value& proto_value) const; + api::FunctionExpr DecodeFunctionExpression( + util::ReadContext* context, + const google_firestore_v1_Function& proto_function) const; + api::Ordering DecodeOrdering( + util::ReadContext* context, + const google_firestore_v1_Value& proto_value) const; }; } // namespace remote diff --git a/Firestore/core/test/unit/core/event_manager_test.cc b/Firestore/core/test/unit/core/event_manager_test.cc index 2a9d7a49f75..40943b2a16c 100644 --- a/Firestore/core/test/unit/core/event_manager_test.cc +++ b/Firestore/core/test/unit/core/event_manager_test.cc @@ -51,13 +51,14 @@ ViewSnapshotListener NoopViewSnapshotHandler() { [](const StatusOr&) {}); } -std::shared_ptr NoopQueryListener(core::Query query) { +std::shared_ptr NoopQueryListener(core::QueryOrPipeline query) { return QueryListener::Create(std::move(query), ListenOptions::DefaultOptions(), NoopViewSnapshotHandler()); } -std::shared_ptr NoopQueryCacheListener(core::Query query) { +std::shared_ptr NoopQueryCacheListener( + core::QueryOrPipeline query) { return QueryListener::Create( std::move(query), ListenOptions::FromOptions(/** include_metadata_changes= */ false, @@ -68,14 +69,15 @@ std::shared_ptr NoopQueryCacheListener(core::Query query) { class MockEventSource : public core::QueryEventSource { public: MOCK_METHOD1(SetCallback, void(core::SyncEngineCallback*)); - MOCK_METHOD2(Listen, model::TargetId(core::Query, bool)); - MOCK_METHOD1(ListenToRemoteStore, void(core::Query)); - MOCK_METHOD2(StopListening, void(const core::Query&, bool)); - MOCK_METHOD1(StopListeningToRemoteStoreOnly, void(const core::Query&)); + MOCK_METHOD2(Listen, model::TargetId(core::QueryOrPipeline, bool)); + MOCK_METHOD1(ListenToRemoteStore, void(core::QueryOrPipeline)); + MOCK_METHOD2(StopListening, void(const core::QueryOrPipeline&, bool)); + MOCK_METHOD1(StopListeningToRemoteStoreOnly, + void(const core::QueryOrPipeline&)); }; TEST(EventManagerTest, HandlesManyListenersPerQuery) { - core::Query query = Query("foo/bar"); + auto query = QueryOrPipeline(Query("foo/bar")); auto listener1 = NoopQueryListener(query); auto listener2 = NoopQueryListener(query); @@ -95,7 +97,7 @@ TEST(EventManagerTest, HandlesManyListenersPerQuery) { } TEST(EventManagerTest, HandlesManyCacheListenersPerQuery) { - core::Query query = Query("foo/bar"); + auto query = QueryOrPipeline(Query("foo/bar")); auto listener1 = NoopQueryCacheListener(query); auto listener2 = NoopQueryCacheListener(query); @@ -125,7 +127,7 @@ TEST(EventManagerTest, HandlesUnlistenOnUnknownListenerGracefully) { event_manager.RemoveQueryListener(listener); } -ViewSnapshot make_empty_view_snapshot(const core::Query& query) { +ViewSnapshot make_empty_view_snapshot(const core::QueryOrPipeline& query) { DocumentSet empty_docs{query.Comparator()}; // sync_state_changed has to be `true` to prevent an assertion about a // meaningless view snapshot. @@ -141,8 +143,8 @@ ViewSnapshot make_empty_view_snapshot(const core::Query& query) { } TEST(EventManagerTest, NotifiesListenersInTheRightOrder) { - core::Query query1 = Query("foo/bar"); - core::Query query2 = Query("bar/baz"); + auto query1 = QueryOrPipeline(Query("foo/bar")); + auto query2 = QueryOrPipeline(Query("bar/baz")); std::vector event_order; auto listener1 = QueryListener::Create(query1, [&](StatusOr) { diff --git a/Firestore/core/test/unit/core/pipeline/canonify_eq_test.cc b/Firestore/core/test/unit/core/pipeline/canonify_eq_test.cc new file mode 100644 index 00000000000..c8c2e7b8bf7 --- /dev/null +++ b/Firestore/core/test/unit/core/pipeline/canonify_eq_test.cc @@ -0,0 +1,317 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "Firestore/core/src/api/expressions.h" +#include "Firestore/core/src/api/firestore.h" +#include "Firestore/core/src/api/ordering.h" +#include "Firestore/core/src/api/realtime_pipeline.h" +#include "Firestore/core/src/api/stages.h" +#include "Firestore/core/src/core/pipeline_util.h" // Target of testing +#include "Firestore/core/src/model/database_id.h" +#include "Firestore/core/src/model/document_key.h" +#include "Firestore/core/src/model/field_path.h" +#include "Firestore/core/src/model/resource_path.h" +#include "Firestore/core/test/unit/core/pipeline/utils.h" +#include "Firestore/core/test/unit/testutil/expression_test_util.h" +#include "Firestore/core/test/unit/testutil/testutil.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace firebase { +namespace firestore { +namespace core { + +using api::AggregateStage; +using api::CollectionGroupSource; +using api::CollectionSource; +using api::DatabaseSource; +using api::DocumentsSource; +using api::EvaluableStage; +using api::Expr; +using api::Field; +using api::FindNearestStage; +using api::Firestore; +using api::LimitStage; +using api::OffsetStage; +using api::Ordering; +using api::RealtimePipeline; +using api::SelectStage; +using api::SortStage; +using api::Where; +// using api::AddFields; // Not EvaluableStage +// using api::DistinctStage; // Not EvaluableStage + +using model::DatabaseId; +using model::DocumentKey; +using model::FieldPath; +using model::ResourcePath; +using testing::ElementsAre; +using testing::UnorderedElementsAre; +using testutil::Array; +using testutil::Doc; +using testutil::Map; +using testutil::SharedConstant; +using testutil::Value; +// Expression helpers +using testutil::EqAnyExpr; +using testutil::EqExpr; + +// Helper to get canonical ID directly for RealtimePipeline +std::string GetPipelineCanonicalId(const RealtimePipeline& pipeline) { + QueryOrPipeline variant = pipeline; + // Use the specific helper for QueryOrPipeline canonicalization + return variant.CanonicalId(); +} + +// Test Fixture +class CanonifyEqPipelineTest : public ::testing::Test { + public: + // Helper to create a pipeline starting with a collection stage + RealtimePipeline StartPipeline(const std::string& collection_path) { + std::vector> stages; + stages.push_back(std::make_shared(collection_path)); + return RealtimePipeline(std::move(stages), TestSerializer()); + } + // Helper to create a pipeline starting with a collection group stage + RealtimePipeline StartCollectionGroupPipeline( + const std::string& collection_id) { + std::vector> stages; + stages.push_back(std::make_shared(collection_id)); + return RealtimePipeline(std::move(stages), TestSerializer()); + } + // Helper to create a pipeline starting with a database stage + RealtimePipeline StartDatabasePipeline() { + std::vector> stages; + stages.push_back(std::make_shared()); + return RealtimePipeline(std::move(stages), TestSerializer()); + } + // Helper to create a pipeline starting with a documents stage + // Note: DocumentsSource is not EvaluableStage, this helper is problematic + RealtimePipeline StartDocumentsPipeline( + const std::vector& /* doc_paths */) { + std::vector> stages; + // Cannot construct RealtimePipeline with DocumentsSource directly + return RealtimePipeline({}, TestSerializer()); + } +}; + +// =================================================================== +// Canonify Tests (Using EXACT expected strings from TS tests) +// These will FAIL until C++ canonicalization is implemented correctly. +// =================================================================== + +TEST_F(CanonifyEqPipelineTest, CanonifySimpleWhere) { + RealtimePipeline p = StartPipeline("test"); + p = p.AddingStage(std::make_shared(EqExpr( + {std::make_shared("foo"), SharedConstant(Value(42LL))}))); + + EXPECT_EQ(GetPipelineCanonicalId(p), + "collection(test)|where(fn(eq[fld(foo),cst(42)]))|sort(fld(__name__" + ")asc)"); +} + +TEST_F(CanonifyEqPipelineTest, CanonifyMultipleStages) { + RealtimePipeline p = StartPipeline("test"); + p = p.AddingStage(std::make_shared(EqExpr( + {std::make_shared("foo"), SharedConstant(Value(42LL))}))); + p = p.AddingStage(std::make_shared(10)); + p = p.AddingStage(std::make_shared( + std::vector{Ordering(std::make_shared("bar"), + api::Ordering::Direction::DESCENDING)})); + EXPECT_EQ(GetPipelineCanonicalId(p), + "collection(test)|where(fn(eq[fld(foo),cst(42)]))|sort(fld(__name__" + ")asc)|limit(10)|sort(fld(bar)desc,fld(__name__)asc)"); +} + +// TEST_F(CanonifyEqPipelineTest, CanonifyAddFields) { +// // Requires constructing pipeline with AddFields stage +// // RealtimePipeline p = StartPipeline("test"); +// // p = p.AddingStage(std::make_shared(...)); // AddFields +// not Evaluable +// // EXPECT_EQ(GetPipelineCanonicalId(p), +// // +// "collection(/test)|add_fields(__create_time__=fld(__create_time__),__name__=fld(__name__),__update_time__=fld(__update_time__),existingField=fld(existingField),val=cst(10))|sort(fld(__name__)ascending)"); +// } + +// TEST_F(CanonifyEqPipelineTest, CanonifyAggregateWithGrouping) { +// // Requires constructing pipeline with AggregateStage stage +// // RealtimePipeline p = StartPipeline("test"); +// // std::unordered_map> +// accumulators; +// // accumulators["totalValue"] = std::make_shared("sum", +// std::vector>{std::make_shared("value")}); +// // std::unordered_map> groups; +// // groups["category"] = std::make_shared("category"); +// // p = +// p.AddingStage(std::make_shared(std::move(accumulators), +// std::move(groups))); // AggregateStage not Evaluable +// // EXPECT_EQ(GetPipelineCanonicalId(p), +// // +// "collection(/test)|aggregate(totalValue=fn(sum,[fld(value)]))grouping(category=fld(category))|sort(fld(__name__)ascending)"); +// } + +// TEST_F(CanonifyEqPipelineTest, CanonifyDistinct) { +// // Requires constructing pipeline with DistinctStage stage +// // RealtimePipeline p = StartPipeline("test"); +// // p = p.AddingStage(std::make_shared(...)); // +// DistinctStage not Evaluable +// // EXPECT_EQ(GetPipelineCanonicalId(p), +// // +// "collection(/test)|distinct(category=fld(category),city=fld(city))|sort(fld(__name__)ascending)"); +// } + +// TEST_F(CanonifyEqPipelineTest, CanonifySelect) { +// // Requires constructing pipeline with SelectStage stage +// // RealtimePipeline p = StartPipeline("test"); +// // p = p.AddingStage(std::make_shared(...)); // +// SelectStage not Evaluable +// // EXPECT_EQ(GetPipelineCanonicalId(p), +// // +// "collection(/test)|select(__create_time__=fld(__create_time__),__name__=fld(__name__),__update_time__=fld(__update_time__),age=fld(age),name=fld(name))|sort(fld(__name__)ascending)"); +// } + +// TEST_F(CanonifyEqPipelineTest, CanonifyOffset) { +// // OffsetStage is not EvaluableStage. Test skipped. +// RealtimePipeline p = StartPipeline("test"); +// EXPECT_EQ(GetPipelineCanonicalId(p), +// "collection(/test)|offset(5)|sort(fld(__name__)ascending)"); +// } + +// TEST_F(CanonifyEqPipelineTest, CanonifyFindNearest) { +// // FindNearestStage is not EvaluableStage. Test skipped. +// RealtimePipeline p = StartPipeline("test"); +// // EXPECT_EQ(GetPipelineCanonicalId(p), +// // +// "collection(/test)|find_nearest(fld(location),cosine,[1,2,3],10,distance)|sort(fld(__name__)ascending)"); +// } + +TEST_F(CanonifyEqPipelineTest, CanonifyCollectionGroupSource) { + RealtimePipeline p = StartCollectionGroupPipeline("cities"); + EXPECT_EQ(GetPipelineCanonicalId(p), + "collection_group(cities)|sort(fld(__name__)asc)"); +} + +// TEST_F(CanonifyEqPipelineTest, CanonifyDatabaseSource) { +// RealtimePipeline p = StartDatabasePipeline(); +// EXPECT_EQ(GetPipelineCanonicalId(p), +// "database()|sort(fld(__name__)ascending)"); +// } + +// TEST_F(CanonifyEqPipelineTest, CanonifyDocumentsSource) { +// // DocumentsSource is not EvaluableStage. Test skipped. +// // RealtimePipeline p = StartDocumentsPipeline({"cities/SF", "cities/LA"}); +// // EXPECT_EQ(GetPipelineCanonicalId(p), +// // "documents(/cities/LA,/cities/SF)|sort(fld(__name__)ascending)"); +// } + +// TEST_F(CanonifyEqPipelineTest, CanonifyEqAnyArrays) { +// RealtimePipeline p = StartPipeline("foo"); +// p = p.AddingStage(std::make_shared(EqAnyExpr( +// std::make_shared("bar"), SharedConstant(Array(Value("a"), +// Value("b")))))); +// +// EXPECT_EQ(GetPipelineCanonicalId(p), +// "collection(/foo)|where(fn(eq_any,[fld(bar),list([cst(\"a\"),cst(\"b\")])]))|sort(fld(__name__)asc)"); +// } + +// =================================================================== +// Equality Tests (Using QueryOrPipelineEquals) +// These should pass/fail based on the TS expectation, even with placeholder C++ +// canonicalization. +// =================================================================== + +TEST_F(CanonifyEqPipelineTest, EqReturnsTrueForIdenticalPipelines) { + RealtimePipeline p1 = StartPipeline("test"); + p1 = p1.AddingStage(std::make_shared(EqExpr( + {std::make_shared("foo"), SharedConstant(Value(42LL))}))); + + RealtimePipeline p2 = StartPipeline("test"); + p2 = p2.AddingStage(std::make_shared(EqExpr( + {std::make_shared("foo"), SharedConstant(Value(42LL))}))); + + QueryOrPipeline v1 = p1; + QueryOrPipeline v2 = p2; + EXPECT_TRUE(v1 == v2); // Expect TRUE based on TS +} + +TEST_F(CanonifyEqPipelineTest, EqReturnsFalseForDifferentStages) { + RealtimePipeline p1 = StartPipeline("test"); + p1 = p1.AddingStage(std::make_shared(EqExpr( + {std::make_shared("foo"), SharedConstant(Value(42LL))}))); + + RealtimePipeline p2 = StartPipeline("test"); + p2 = p2.AddingStage(std::make_shared(10)); + + QueryOrPipeline v1 = p1; + QueryOrPipeline v2 = p2; + EXPECT_FALSE(v1 == v2); // Expect FALSE based on TS +} + +TEST_F(CanonifyEqPipelineTest, EqReturnsFalseForDifferentParamsInStage) { + RealtimePipeline p1 = StartPipeline("test"); + p1 = p1.AddingStage(std::make_shared(EqExpr( + {std::make_shared("foo"), SharedConstant(Value(42LL))}))); + + RealtimePipeline p2 = StartPipeline("test"); + p2 = p2.AddingStage(std::make_shared( + EqExpr({std::make_shared("bar"), + SharedConstant(Value(42LL))}))); // Different field + + QueryOrPipeline v1 = p1; + QueryOrPipeline v2 = p2; + EXPECT_FALSE(v1 == v2); // Expect FALSE based on TS +} + +TEST_F(CanonifyEqPipelineTest, EqReturnsFalseForDifferentStageOrder) { + RealtimePipeline p1 = StartPipeline("test"); + p1 = p1.AddingStage(std::make_shared(EqExpr( + {std::make_shared("foo"), SharedConstant(Value(42LL))}))); + p1 = p1.AddingStage(std::make_shared(10)); + + RealtimePipeline p2 = StartPipeline("test"); + p2 = p2.AddingStage(std::make_shared(10)); + p2 = p2.AddingStage(std::make_shared(EqExpr( + {std::make_shared("foo"), SharedConstant(Value(42LL))}))); + + QueryOrPipeline v1 = p1; + QueryOrPipeline v2 = p2; + EXPECT_FALSE(v1 == v2); // Expect FALSE based on TS +} + +// TEST_F(CanonifyEqPipelineTest, EqReturnsTrueForDifferentSelectOrder) { +// // Requires constructing pipeline with SelectStage stage +// // RealtimePipeline p1 = StartPipeline("test"); +// // p1 = p1.AddingStage(std::make_shared(...)); +// // p1 = p1.AddingStage(std::make_shared(...)); // SelectStage +// not Evaluable +// +// // RealtimePipeline p2 = StartPipeline("test"); +// // p2 = p2.AddingStage(std::make_shared(...)); +// // p2 = p2.AddingStage(std::make_shared(...)); // SelectStage +// not Evaluable +// +// // QueryOrPipeline v1 = p1; +// // QueryOrPipeline v2 = p2; +// // EXPECT_TRUE(v1 == v2); // Expect TRUE based on TS +// } + +} // namespace core +} // namespace firestore +} // namespace firebase diff --git a/Firestore/core/test/unit/core/pipeline/complex_test.cc b/Firestore/core/test/unit/core/pipeline/complex_test.cc index 94fc3aaac5c..b0ba24fdb8b 100644 --- a/Firestore/core/test/unit/core/pipeline/complex_test.cc +++ b/Firestore/core/test/unit/core/pipeline/complex_test.cc @@ -125,10 +125,6 @@ TEST_F(ComplexPipelineTest, WhereWithMaxNumberOfStages) { SeedDatabase(10, num_of_fields, [&]() { return Value(value_counter++); }); RealtimePipeline pipeline = StartPipeline("/" + COLLECTION_ID); - // Add the initial dummy 'where' from TS? Seems unnecessary if stages > 0. - // pipeline = - // pipeline.AddingStage(std::make_shared(EqExpr({SharedConstant(1LL), - // SharedConstant(1LL)}))); for (int i = 1; i <= num_of_fields; ++i) { std::string field_name = "field_" + std::to_string(i); diff --git a/Firestore/core/test/unit/core/pipeline/utils.cc b/Firestore/core/test/unit/core/pipeline/utils.cc index 50cf2777164..f3672db3877 100644 --- a/Firestore/core/test/unit/core/pipeline/utils.cc +++ b/Firestore/core/test/unit/core/pipeline/utils.cc @@ -24,9 +24,9 @@ namespace firebase { namespace firestore { namespace core { -remote::Serializer TestSerializer() { - static remote::Serializer serializer(model::DatabaseId("test-project")); - return serializer; +std::unique_ptr TestSerializer() { + return std::make_unique( + model::DatabaseId("test-project")); } } // namespace core diff --git a/Firestore/core/test/unit/core/pipeline/utils.h b/Firestore/core/test/unit/core/pipeline/utils.h index d70b1a5fce5..121cf6ef8cf 100644 --- a/Firestore/core/test/unit/core/pipeline/utils.h +++ b/Firestore/core/test/unit/core/pipeline/utils.h @@ -30,7 +30,7 @@ namespace firestore { namespace core { // Provides a shared placeholder Firestore instance for pipeline tests. -remote::Serializer TestSerializer(); +std::unique_ptr TestSerializer(); // Basic matcher to compare document vectors by key. // TODO(wuandy): Enhance to compare contents if necessary. diff --git a/Firestore/core/test/unit/core/pipeline_util_test.cc b/Firestore/core/test/unit/core/pipeline_util_test.cc new file mode 100644 index 00000000000..c944a842337 --- /dev/null +++ b/Firestore/core/test/unit/core/pipeline_util_test.cc @@ -0,0 +1,272 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Firestore/core/src/core/pipeline_util.h" + +#include +#include + +#include "Firestore/core/src/api/expressions.h" +#include "Firestore/core/src/api/realtime_pipeline.h" +#include "Firestore/core/src/api/stages.h" +#include "Firestore/core/src/core/query.h" +#include "Firestore/core/src/core/target.h" +#include "Firestore/core/src/model/field_path.h" +#include "Firestore/core/src/model/resource_path.h" +#include "Firestore/core/test/unit/core/pipeline/utils.h" +#include "Firestore/core/test/unit/testutil/expression_test_util.h" +#include "Firestore/core/test/unit/testutil/testutil.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace firebase { +namespace firestore { +namespace core { + +using api::Field; +using model::FieldPath; +using model::ResourcePath; + +// Helper to create a core::Query +core::Query TestCoreQuery(const std::string& path_str) { + return core::Query(ResourcePath::FromString(path_str)); +} + +// Helper to create a core::Target (from a Query) +core::Target TestCoreTarget(const std::string& path_str) { + return TestCoreQuery(path_str).ToTarget(); +} + +api::RealtimePipeline StartPipeline( + const std::string& collection_path) { // Return RealtimePipeline + std::vector> + stages; // Use EvaluableStage + stages.push_back(std::make_shared(collection_path)); + return api::RealtimePipeline(std::move(stages), + TestSerializer()); // Construct RealtimePipeline +} + +// Helper to create a simple api::RealtimePipeline +api::RealtimePipeline TestPipeline(int id) { + auto pipeline = StartPipeline("coll"); + if (id == 1) { + pipeline = pipeline.AddingStage( + std::make_shared(testutil::NotExpr(testutil::GtExpr( + {std::make_shared("score"), + testutil::SharedConstant(testutil::Value(90LL))})))); + } else if (id == 2) { + pipeline = pipeline.AddingStage( + std::make_shared(testutil::NotExpr(testutil::LtExpr( + {std::make_shared("score"), + testutil::SharedConstant(testutil::Value(90LL))})))); + } else if (id == 3) { // Same as id 1 + pipeline = pipeline.AddingStage( + std::make_shared(testutil::NotExpr(testutil::GtExpr( + {std::make_shared("score"), + testutil::SharedConstant(testutil::Value(90LL))})))); + } + return pipeline; +} + +TEST(PipelineUtilTest, QueryOrPipelineEquality) { + core::Query q1 = TestCoreQuery("coll/doc1"); + core::Query q2 = TestCoreQuery("coll/doc1"); // Same as q1 + core::Query q3 = TestCoreQuery("coll/doc2"); // Different from q1 + api::RealtimePipeline p1 = TestPipeline(1); + api::RealtimePipeline p2 = TestPipeline(3); // Same as p1 + api::RealtimePipeline p3 = TestPipeline(2); // Different from p1 + + QueryOrPipeline qop_q1(q1); + QueryOrPipeline qop_q2(q2); + QueryOrPipeline qop_q3(q3); + QueryOrPipeline qop_p1(p1); + QueryOrPipeline qop_p2(p2); + QueryOrPipeline qop_p3(p3); + QueryOrPipeline default_qop1; + QueryOrPipeline default_qop2; + QueryOrPipeline qop_default_query(core::Query{}); + + EXPECT_EQ(qop_q1, qop_q2); + EXPECT_NE(qop_q1, qop_q3); + EXPECT_NE(qop_q1, qop_p1); // Query vs Pipeline + EXPECT_EQ(qop_p1, qop_p2); + EXPECT_NE(qop_p1, qop_p3); + + EXPECT_EQ(default_qop1, default_qop2); + EXPECT_EQ(default_qop1, qop_default_query); + EXPECT_NE(default_qop1, qop_q1); +} + +TEST(PipelineUtilTest, QueryOrPipelineHashing) { + core::Query q1 = TestCoreQuery("coll/doc1"); + core::Query q2 = TestCoreQuery("coll/doc1"); + core::Query q3 = TestCoreQuery("coll/doc2"); + api::RealtimePipeline p1 = TestPipeline(1); + api::RealtimePipeline p2 = TestPipeline(3); + api::RealtimePipeline p3 = TestPipeline(2); + + QueryOrPipeline qop_q1(q1); + QueryOrPipeline qop_q2(q2); + QueryOrPipeline qop_q3(q3); + QueryOrPipeline qop_p1(p1); + QueryOrPipeline qop_p2(p2); + QueryOrPipeline qop_p3(p3); + QueryOrPipeline default_qop1; + QueryOrPipeline qop_default_query(core::Query{}); + + std::hash hasher; + EXPECT_EQ(hasher(qop_q1), hasher(qop_q2)); + EXPECT_EQ(qop_q1.Hash(), qop_q2.Hash()); + + // Note: Hashes are not guaranteed to be different for different objects, + // but they should be for the ones we construct here. + EXPECT_NE(hasher(qop_q1), hasher(qop_q3)); + EXPECT_NE(qop_q1.Hash(), qop_q3.Hash()); + + EXPECT_NE(hasher(qop_q1), hasher(qop_p1)); + EXPECT_NE(qop_q1.Hash(), qop_p1.Hash()); + + EXPECT_EQ(hasher(qop_p1), hasher(qop_p2)); + EXPECT_EQ(qop_p1.Hash(), qop_p2.Hash()); + + EXPECT_NE(hasher(qop_p1), hasher(qop_p3)); + EXPECT_NE(qop_p1.Hash(), qop_p3.Hash()); + + EXPECT_EQ(hasher(default_qop1), hasher(QueryOrPipeline(core::Query{}))); + EXPECT_EQ(default_qop1.Hash(), QueryOrPipeline(core::Query{}).Hash()); +} + +TEST(PipelineUtilTest, QueryOrPipelineInUnorderedMap) { + std::unordered_map map; + core::Query q_a = TestCoreQuery("coll/docA"); + api::RealtimePipeline p_a = TestPipeline(1); // Unique pipeline A + core::Query q_b = TestCoreQuery("coll/docB"); + api::RealtimePipeline p_b = TestPipeline(2); // Unique pipeline B + + QueryOrPipeline key_q_a(q_a); + QueryOrPipeline key_p_a(p_a); + + map[key_q_a] = 100; + map[key_p_a] = 200; + + ASSERT_EQ(map.size(), 2); + EXPECT_EQ(map.at(key_q_a), 100); + EXPECT_EQ(map.at(QueryOrPipeline(TestCoreQuery("coll/docA"))), 100); + EXPECT_EQ(map.at(key_p_a), 200); + EXPECT_EQ(map.at(QueryOrPipeline(TestPipeline(1))), + 200); // TestPipeline(1) is same as p_a + + EXPECT_EQ(map.count(QueryOrPipeline(q_b)), 0); + EXPECT_EQ(map.count(QueryOrPipeline(p_b)), 0); + EXPECT_EQ(map.count(QueryOrPipeline(TestCoreQuery("coll/nonexistent"))), 0); + EXPECT_EQ(map.count(QueryOrPipeline(TestPipeline(0))), 0); // Empty pipeline +} + +TEST(PipelineUtilTest, TargetOrPipelineEquality) { + core::Target t1 = TestCoreTarget("coll/doc1"); + core::Target t2 = TestCoreTarget("coll/doc1"); // Same as t1 + core::Target t3 = TestCoreTarget("coll/doc2"); // Different from t1 + api::RealtimePipeline p1 = TestPipeline(1); + api::RealtimePipeline p2 = TestPipeline(3); // Same as p1 + api::RealtimePipeline p3 = TestPipeline(2); // Different from p1 + + TargetOrPipeline top_t1(t1); + TargetOrPipeline top_t2(t2); + TargetOrPipeline top_t3(t3); + TargetOrPipeline top_p1(p1); + TargetOrPipeline top_p2(p2); + TargetOrPipeline top_p3(p3); + TargetOrPipeline default_top1; + TargetOrPipeline default_top2; + TargetOrPipeline top_default_target(core::Target{}); + + EXPECT_EQ(top_t1, top_t2); + EXPECT_NE(top_t1, top_t3); + EXPECT_NE(top_t1, top_p1); // Target vs Pipeline + EXPECT_EQ(top_p1, top_p2); + EXPECT_NE(top_p1, top_p3); + + EXPECT_EQ(default_top1, default_top2); + EXPECT_EQ(default_top1, top_default_target); + EXPECT_NE(default_top1, top_t1); +} + +TEST(PipelineUtilTest, TargetOrPipelineHashing) { + core::Target t1 = TestCoreTarget("coll/doc1"); + core::Target t2 = TestCoreTarget("coll/doc1"); + core::Target t3 = TestCoreTarget("coll/doc2"); + api::RealtimePipeline p1 = TestPipeline(1); + api::RealtimePipeline p2 = TestPipeline(3); + api::RealtimePipeline p3 = TestPipeline(2); + + TargetOrPipeline top_t1(t1); + TargetOrPipeline top_t2(t2); + TargetOrPipeline top_t3(t3); + TargetOrPipeline top_p1(p1); + TargetOrPipeline top_p2(p2); + TargetOrPipeline top_p3(p3); + TargetOrPipeline default_top1; + + std::hash hasher; + EXPECT_EQ(hasher(top_t1), hasher(top_t2)); + EXPECT_EQ(top_t1.Hash(), top_t2.Hash()); + + EXPECT_NE(hasher(top_t1), hasher(top_t3)); + EXPECT_NE(top_t1.Hash(), top_t3.Hash()); + + EXPECT_NE(hasher(top_t1), hasher(top_p1)); + EXPECT_NE(top_t1.Hash(), top_p1.Hash()); + + EXPECT_EQ(hasher(top_p1), hasher(top_p2)); + EXPECT_EQ(top_p1.Hash(), top_p2.Hash()); + + EXPECT_NE(hasher(top_p1), hasher(top_p3)); + EXPECT_NE(top_p1.Hash(), top_p3.Hash()); + + EXPECT_EQ(hasher(default_top1), hasher(TargetOrPipeline(core::Target{}))); + EXPECT_EQ(default_top1.Hash(), TargetOrPipeline(core::Target{}).Hash()); +} + +TEST(PipelineUtilTest, TargetOrPipelineInUnorderedMap) { + std::unordered_map map; + core::Target t_x = TestCoreTarget("coll/docX"); + api::RealtimePipeline p_x = + TestPipeline(1); // Unique pipeline X (same as p_a before) + core::Target t_y = TestCoreTarget("coll/docY"); + api::RealtimePipeline p_y = + TestPipeline(2); // Unique pipeline Y (same as p_b before) + + TargetOrPipeline key_t_x(t_x); + TargetOrPipeline key_p_x(p_x); + + map[key_t_x] = 300; + map[key_p_x] = 400; + + ASSERT_EQ(map.size(), 2); + EXPECT_EQ(map.at(key_t_x), 300); + EXPECT_EQ(map.at(TargetOrPipeline(TestCoreTarget("coll/docX"))), 300); + EXPECT_EQ(map.at(key_p_x), 400); + EXPECT_EQ(map.at(TargetOrPipeline(TestPipeline(1))), 400); + + EXPECT_EQ(map.count(TargetOrPipeline(t_y)), 0); + EXPECT_EQ(map.count(TargetOrPipeline(p_y)), 0); + EXPECT_EQ(map.count(TargetOrPipeline(TestCoreTarget("coll/nonexistent"))), 0); + EXPECT_EQ(map.count(TargetOrPipeline(TestPipeline(0))), 0); // Empty pipeline +} + +} // namespace core +} // namespace firestore +} // namespace firebase diff --git a/Firestore/core/test/unit/core/query_listener_test.cc b/Firestore/core/test/unit/core/query_listener_test.cc index 9d447167be7..4dff0a086db 100644 --- a/Firestore/core/test/unit/core/query_listener_test.cc +++ b/Firestore/core/test/unit/core/query_listener_test.cc @@ -65,7 +65,7 @@ using testutil::MarkCurrent; namespace { ViewSnapshot ExcludingMetadataChanges(const ViewSnapshot& snapshot) { - return ViewSnapshot{snapshot.query(), + return ViewSnapshot{snapshot.query_or_pipeline(), snapshot.documents(), snapshot.old_documents(), snapshot.document_changes(), @@ -129,9 +129,9 @@ TEST_F(QueryListenerTest, RaisesCollectionEvents) { ASSERT_THAT(accum[1].document_changes(), ElementsAre(change3)); ViewSnapshot expected_snap2{ - snap2.query(), + snap2.query_or_pipeline(), snap2.documents(), - /*old_documents=*/DocumentSet{snap2.query().Comparator()}, + /*old_documents=*/DocumentSet{snap2.query_or_pipeline().Comparator()}, /*document_changes=*/{change1, change4}, snap2.mutated_keys(), snap2.from_cache(), @@ -323,7 +323,7 @@ TEST_F(QueryListenerTest, full_listener->OnViewSnapshot(snap3); full_listener->OnViewSnapshot(snap4); // Metadata change event. - ViewSnapshot expected_snap4{snap4.query(), + ViewSnapshot expected_snap4{snap4.query_or_pipeline(), snap4.documents(), snap3.documents(), /*document_changes=*/{}, @@ -362,7 +362,7 @@ TEST_F(QueryListenerTest, filtered_listener->OnViewSnapshot(snap1); filtered_listener->OnViewSnapshot(snap2); - ViewSnapshot expected_snap2{snap2.query(), + ViewSnapshot expected_snap2{snap2.query_or_pipeline(), snap2.documents(), snap1.documents(), /*document_changes=*/{change3}, @@ -403,9 +403,9 @@ TEST_F(QueryListenerTest, WillWaitForSyncIfOnline) { DocumentViewChange change1{doc1, DocumentViewChange::Type::Added}; DocumentViewChange change2{doc2, DocumentViewChange::Type::Added}; ViewSnapshot expected_snap{ - snap3.query(), + snap3.query_or_pipeline(), snap3.documents(), - /*old_documents=*/DocumentSet{snap3.query().Comparator()}, + /*old_documents=*/DocumentSet{snap3.query_or_pipeline().Comparator()}, /*document_changes=*/{change1, change2}, snap3.mutated_keys(), /*from_cache=*/false, @@ -445,7 +445,7 @@ TEST_F(QueryListenerTest, WillRaiseInitialEventWhenGoingOffline) { ViewSnapshot expected_snap1{ query, /*documents=*/snap1.documents(), - /*old_documents=*/DocumentSet{snap1.query().Comparator()}, + /*old_documents=*/DocumentSet{snap1.query_or_pipeline().Comparator()}, /*document_changes=*/{change1}, snap1.mutated_keys(), /*from_cache=*/true, @@ -482,7 +482,7 @@ TEST_F(QueryListenerTest, ViewSnapshot expected_snap{ query, /*documents=*/snap1.documents(), - /*old_documents=*/DocumentSet{snap1.query().Comparator()}, + /*old_documents=*/DocumentSet{snap1.query_or_pipeline().Comparator()}, /*document_changes=*/{}, snap1.mutated_keys(), /*from_cache=*/true, @@ -508,7 +508,7 @@ TEST_F(QueryListenerTest, ViewSnapshot expected_snap{ query, /*documents=*/snap1.documents(), - /*old_documents=*/DocumentSet{snap1.query().Comparator()}, + /*old_documents=*/DocumentSet{snap1.query_or_pipeline().Comparator()}, /*document_changes=*/{}, snap1.mutated_keys(), /*from_cache=*/true, diff --git a/Firestore/core/test/unit/core/view_snapshot_test.cc b/Firestore/core/test/unit/core/view_snapshot_test.cc index 0af09a28101..8b4d00b8e73 100644 --- a/Firestore/core/test/unit/core/view_snapshot_test.cc +++ b/Firestore/core/test/unit/core/view_snapshot_test.cc @@ -119,7 +119,7 @@ TEST(ViewSnapshotTest, ViewSnapshotConstructor) { /*excludes_metadata_changes=*/false, has_cached_results}; - ASSERT_EQ(snapshot.query(), query); + ASSERT_EQ(snapshot.query_or_pipeline(), query); ASSERT_EQ(snapshot.documents(), documents); ASSERT_EQ(snapshot.old_documents(), old_documents); ASSERT_EQ(snapshot.document_changes(), document_changes); diff --git a/Firestore/core/test/unit/core/view_test.cc b/Firestore/core/test/unit/core/view_test.cc index 7c4ac029b75..2c7010f0a5a 100644 --- a/Firestore/core/test/unit/core/view_test.cc +++ b/Firestore/core/test/unit/core/view_test.cc @@ -93,7 +93,7 @@ TEST(ViewTest, AddsDocumentsBasedOnQuery) { ASSERT_TRUE(maybe_snapshot.has_value()); ViewSnapshot snapshot = std::move(maybe_snapshot).value(); - ASSERT_EQ(snapshot.query(), query); + ASSERT_EQ(snapshot.query_or_pipeline(), query); ASSERT_THAT(snapshot.documents(), ElementsAre(doc1, doc2)); @@ -125,7 +125,7 @@ TEST(ViewTest, RemovesDocuments) { ASSERT_TRUE(maybe_snapshot.has_value()); ViewSnapshot snapshot = std::move(maybe_snapshot).value(); - ASSERT_EQ(snapshot.query(), query); + ASSERT_EQ(snapshot.query_or_pipeline(), query); ASSERT_THAT(snapshot.documents(), ElementsAre(doc1, doc3)); @@ -178,7 +178,7 @@ TEST(ViewTest, FiltersDocumentsBasedOnQueryWithFilter) { ASSERT_TRUE(maybe_snapshot.has_value()); ViewSnapshot snapshot = std::move(maybe_snapshot).value(); - ASSERT_EQ(snapshot.query(), query); + ASSERT_EQ(snapshot.query_or_pipeline(), query); ASSERT_THAT(snapshot.documents(), ElementsAre(doc1, doc5, doc2)); @@ -204,7 +204,7 @@ TEST(ViewTest, UpdatesDocumentsBasedOnQueryWithFilter) { ViewSnapshot snapshot = ApplyChanges(&view, {doc1, doc2, doc3, doc4}, absl::nullopt).value(); - ASSERT_EQ(snapshot.query(), query); + ASSERT_EQ(snapshot.query_or_pipeline(), query); ASSERT_THAT(snapshot.documents(), ElementsAre(doc1, doc3)); @@ -215,7 +215,7 @@ TEST(ViewTest, UpdatesDocumentsBasedOnQueryWithFilter) { snapshot = ApplyChanges(&view, {new_doc2, new_doc3, new_doc4}, absl::nullopt) .value(); - ASSERT_EQ(snapshot.query(), query); + ASSERT_EQ(snapshot.query_or_pipeline(), query); ASSERT_THAT(snapshot.documents(), ElementsAre(new_doc4, doc1, new_doc2)); @@ -245,7 +245,7 @@ TEST(ViewTest, RemovesDocumentsForQueryWithLimit) { ViewSnapshot snapshot = ApplyChanges(&view, {doc2}, AckTarget({doc1, doc2, doc3})).value(); - ASSERT_EQ(snapshot.query(), query); + ASSERT_EQ(snapshot.query_or_pipeline(), query); ASSERT_THAT(snapshot.documents(), ElementsAre(doc1, doc2)); @@ -288,7 +288,7 @@ TEST(ViewTest, DoesntReportChangesForDocumentBeyondLimitOfQuery) { ASSERT_TRUE(maybe_snapshot.has_value()); ViewSnapshot snapshot = std::move(maybe_snapshot).value(); - ASSERT_EQ(snapshot.query(), query); + ASSERT_EQ(snapshot.query_or_pipeline(), query); ASSERT_THAT(snapshot.documents(), ElementsAre(doc1, doc3)); diff --git a/Firestore/core/test/unit/local/counting_query_engine.cc b/Firestore/core/test/unit/local/counting_query_engine.cc index 3ad9e16614b..ba052fd4c3b 100644 --- a/Firestore/core/test/unit/local/counting_query_engine.cc +++ b/Firestore/core/test/unit/local/counting_query_engine.cc @@ -186,7 +186,7 @@ model::MutableDocumentMap WrappedRemoteDocumentCache::GetAll( } model::MutableDocumentMap WrappedRemoteDocumentCache::GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query, const model::IndexOffset& offset, absl::optional limit, const model::OverlayByDocumentKeyMap& mutated_docs) const { @@ -195,7 +195,7 @@ model::MutableDocumentMap WrappedRemoteDocumentCache::GetDocumentsMatchingQuery( } model::MutableDocumentMap WrappedRemoteDocumentCache::GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query, const model::IndexOffset& offset, absl::optional& context, absl::optional limit, diff --git a/Firestore/core/test/unit/local/counting_query_engine.h b/Firestore/core/test/unit/local/counting_query_engine.h index 98853f4443b..b8ed9abbd52 100644 --- a/Firestore/core/test/unit/local/counting_query_engine.h +++ b/Firestore/core/test/unit/local/counting_query_engine.h @@ -197,13 +197,13 @@ class WrappedRemoteDocumentCache : public RemoteDocumentCache { size_t limit) const override; model::MutableDocumentMap GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query, const model::IndexOffset& offset, absl::optional, const model::OverlayByDocumentKeyMap& mutated_docs) const override; model::MutableDocumentMap GetDocumentsMatchingQuery( - const core::Query& query, + const core::QueryOrPipeline& query, const model::IndexOffset& offset, absl::optional& context, absl::optional limit, diff --git a/Firestore/core/test/unit/local/leveldb_local_store_test.cc b/Firestore/core/test/unit/local/leveldb_local_store_test.cc index 85e4286698b..6af9ccdba18 100644 --- a/Firestore/core/test/unit/local/leveldb_local_store_test.cc +++ b/Firestore/core/test/unit/local/leveldb_local_store_test.cc @@ -81,9 +81,11 @@ auto convertToSet = [](std::vector&& vec) { } // namespace -INSTANTIATE_TEST_SUITE_P(LevelDbLocalStoreTest, - LocalStoreTest, - ::testing::Values(Factory)); +INSTANTIATE_TEST_SUITE_P( + LevelDbLocalStoreTest, + LocalStoreTest, + testing::Values(LocalStoreTestParams{Factory, /*use_pipeline=*/false}, + LocalStoreTestParams{Factory, /*use_pipeline=*/true})); class LevelDbLocalStoreTest : public LocalStoreTestBase { public: diff --git a/Firestore/core/test/unit/local/leveldb_migrations_test.cc b/Firestore/core/test/unit/local/leveldb_migrations_test.cc index 65c5b97ad83..61c6c4847c5 100644 --- a/Firestore/core/test/unit/local/leveldb_migrations_test.cc +++ b/Firestore/core/test/unit/local/leveldb_migrations_test.cc @@ -498,9 +498,9 @@ TEST_F(LevelDbMigrationsTest, RewritesCanonicalIds) { LevelDbTransaction transaction( db_.get(), "Read target to verify canonical ID rewritten"); - auto query_target_key = - LevelDbQueryTargetKey::Key(initial_target_data.target().CanonicalId(), - initial_target_data.target_id()); + auto query_target_key = LevelDbQueryTargetKey::Key( + initial_target_data.target_or_pipeline().CanonicalId(), + initial_target_data.target_id()); auto it = transaction.NewIterator(); // Verify we are able to seek to the key built with proper canonical ID. it->Seek(query_target_key); diff --git a/Firestore/core/test/unit/local/leveldb_query_engine_test.cc b/Firestore/core/test/unit/local/leveldb_query_engine_test.cc index bfef87e62fe..fc32ee0fc9c 100644 --- a/Firestore/core/test/unit/local/leveldb_query_engine_test.cc +++ b/Firestore/core/test/unit/local/leveldb_query_engine_test.cc @@ -65,9 +65,12 @@ model::DocumentMap DocumentMap( } // namespace -INSTANTIATE_TEST_SUITE_P(LevelDbQueryEngineTest, - QueryEngineTest, - testing::Values(PersistenceFactory)); +INSTANTIATE_TEST_SUITE_P( + LevelDbQueryEngineTest, + QueryEngineTest, + testing::Values( + QueryEngineTestParams{PersistenceFactory, /*use_pipeline=*/false}, + QueryEngineTestParams{PersistenceFactory, /*use_pipeline=*/true})); class LevelDbQueryEngineTest : public QueryEngineTestBase { public: diff --git a/Firestore/core/test/unit/local/local_serializer_test.cc b/Firestore/core/test/unit/local/local_serializer_test.cc index ab760e73a57..6b827eddd91 100644 --- a/Firestore/core/test/unit/local/local_serializer_test.cc +++ b/Firestore/core/test/unit/local/local_serializer_test.cc @@ -49,12 +49,18 @@ #include "google/protobuf/util/message_differencer.h" #include "gtest/gtest.h" +#include "Firestore/core/src/api/expressions.h" +#include "Firestore/core/src/api/realtime_pipeline.h" +#include "Firestore/core/src/api/stages.h" +#include "Firestore/core/test/unit/testutil/expression_test_util.h" + namespace firebase { namespace firestore { namespace local { namespace { namespace v1 = google::firestore::v1; +namespace api = firebase::firestore::api; using bundle::BundledQuery; using bundle::NamedQuery; using core::Query; @@ -244,6 +250,14 @@ class LocalSerializerTest : public ::testing::Test { EXPECT_EQ(0, encoded.update_transforms_count); } + api::RealtimePipeline StartPipeline(const std::string& collection_path) { + std::vector> stages; + stages.push_back(std::make_shared(collection_path)); + return api::RealtimePipeline( + std::move(stages), + std::make_unique(remote_serializer.database_id())); + } + private: void ExpectSerializationRoundTrip( const MutableDocument& model, @@ -706,6 +720,117 @@ TEST_F(LocalSerializerTest, EncodesMutation) { ExpectRoundTrip(mutation, expected_mutation); } +TEST_F(LocalSerializerTest, EncodesTargetDataWithPipeline) { + TargetId target_id = 42; + ListenSequenceNumber sequence_number = 10; + SnapshotVersion version = testutil::Version(1039); + SnapshotVersion limbo_free_version = testutil::Version(1000); + ByteString resume_token = testutil::ResumeToken(1039); + + // Construct the pipeline + auto ppl = StartPipeline("rooms"); + ppl = ppl.AddingStage(std::make_shared( + testutil::EqExpr({std::make_shared("name"), + testutil::SharedConstant("testroom")}))); + api::Ordering ordering(std::make_unique("age"), + api::Ordering::DESCENDING); + ppl = ppl.AddingStage( + std::make_shared(std::vector{ordering})); + ppl = ppl.AddingStage(std::make_shared(10)); + + TargetData target_data( + core::TargetOrPipeline(std::move(ppl)), target_id, sequence_number, + QueryPurpose::Listen, SnapshotVersion(version), + SnapshotVersion(limbo_free_version), ByteString(resume_token), + /*expected_count=*/absl::nullopt); + + // Construct the expected protobuf + ::firestore::client::Target expected_proto; + expected_proto.set_target_id(target_id); + expected_proto.set_last_listen_sequence_number(sequence_number); + expected_proto.mutable_snapshot_version()->set_nanos(1039000); + expected_proto.mutable_last_limbo_free_snapshot_version()->set_nanos(1000000); + expected_proto.set_resume_token(resume_token.data(), resume_token.size()); + + v1::Target::PipelineQueryTarget* pipeline_query_proto = + expected_proto.mutable_pipeline_query(); + v1::StructuredPipeline* structured_pipeline_proto = + pipeline_query_proto->mutable_structured_pipeline(); + v1::Pipeline* pipeline_proto_obj = + structured_pipeline_proto->mutable_pipeline(); + + // Stage 1: CollectionSource("rooms") + { + google::firestore::v1::Pipeline_Stage* stage1_proto = + pipeline_proto_obj->add_stages(); // Changed type + stage1_proto->set_name("collection"); + v1::Value* stage1_arg1 = stage1_proto->add_args(); + stage1_arg1->set_reference_value("rooms"); + } + + // Stage 2: Where(EqExpr(Field("name"), Value("testroom"))) + { + google::firestore::v1::Pipeline_Stage* stage2_proto = + pipeline_proto_obj->add_stages(); // Changed type + stage2_proto->set_name("where"); + v1::Value* stage2_arg1_expr = stage2_proto->add_args(); // The EqExpr + v1::Function* eq_func = stage2_arg1_expr->mutable_function_value(); + eq_func->set_name("eq"); + + v1::Value* eq_arg1_field = eq_func->add_args(); // Field("name") + eq_arg1_field->set_field_reference_value("name"); + + v1::Value* eq_arg2_value = eq_func->add_args(); // Value("testroom") + eq_arg2_value->set_string_value("testroom"); + } + + // Stage 3: Sort(Field("age").descending(), Field("__name__").ascending()) + { + google::firestore::v1::Pipeline_Stage* stage3_proto = + pipeline_proto_obj->add_stages(); + stage3_proto->set_name("sort"); + + // First ordering: age descending + v1::Value* sort_arg1 = stage3_proto->add_args(); + v1::MapValue* sort_arg1_map = sort_arg1->mutable_map_value(); + google::protobuf::Map* sort_arg1_fields = + sort_arg1_map->mutable_fields(); + + v1::Value direction_val_desc; + direction_val_desc.set_string_value("descending"); + (*sort_arg1_fields)["direction"] = direction_val_desc; + + v1::Value expr_val_age; + expr_val_age.set_field_reference_value("age"); + (*sort_arg1_fields)["expression"] = expr_val_age; + + // Second ordering: __name__ ascending + v1::Value* sort_arg2 = stage3_proto->add_args(); + v1::MapValue* sort_arg2_map = sort_arg2->mutable_map_value(); + google::protobuf::Map* sort_arg2_fields = + sort_arg2_map->mutable_fields(); + + v1::Value direction_val_asc; + direction_val_asc.set_string_value("ascending"); + (*sort_arg2_fields)["direction"] = direction_val_asc; + + v1::Value expr_val_name; + expr_val_name.set_field_reference_value("__name__"); + (*sort_arg2_fields)["expression"] = expr_val_name; + } + + // Stage 4: Limit(10) + { + google::firestore::v1::Pipeline_Stage* stage4_proto = + pipeline_proto_obj->add_stages(); + stage4_proto->set_name("limit"); + v1::Value* limit_arg = stage4_proto->add_args(); + limit_arg->set_integer_value(10); + } + + ExpectRoundTrip(target_data, expected_proto); +} + } // namespace } // namespace local } // namespace firestore diff --git a/Firestore/core/test/unit/local/local_store_test.cc b/Firestore/core/test/unit/local/local_store_test.cc index 2c0affe91ee..1292ef536db 100644 --- a/Firestore/core/test/unit/local/local_store_test.cc +++ b/Firestore/core/test/unit/local/local_store_test.cc @@ -262,21 +262,36 @@ void LocalStoreTestBase::ConfigureFieldIndexes( } TargetId LocalStoreTestBase::AllocateQuery(core::Query query) { - TargetData target_data = local_store_.AllocateTarget(query.ToTarget()); + core::QueryOrPipeline query_or_pipeline_to_use = query; + if (should_use_pipeline_) { + query_or_pipeline_to_use = ConvertQueryToPipeline(query); + } + + TargetData target_data = local_store_.AllocateTarget( + query_or_pipeline_to_use.ToTargetOrPipeline()); last_target_id_ = target_data.target_id(); return target_data.target_id(); } TargetData LocalStoreTestBase::GetTargetData(const core::Query& query) { return persistence_->Run("GetTargetData", [&] { - return *local_store_.GetTargetData(query.ToTarget()); + core::QueryOrPipeline query_or_pipeline_to_use = query; + if (should_use_pipeline_) { + query_or_pipeline_to_use = ConvertQueryToPipeline(query); + } + return *local_store_.GetTargetData( + query_or_pipeline_to_use.ToTargetOrPipeline()); }); } QueryResult LocalStoreTestBase::ExecuteQuery(const core::Query& query) { ResetPersistenceStats(); - last_query_result_ = - local_store_.ExecuteQuery(query, /* use_previous_results= */ true); + core::QueryOrPipeline query_or_pipeline_to_run = query; + if (should_use_pipeline_) { + query_or_pipeline_to_run = ConvertQueryToPipeline(query); + } + last_query_result_ = local_store_.ExecuteQuery( + query_or_pipeline_to_run, /* use_previous_results= */ true); return last_query_result_; } @@ -306,7 +321,18 @@ void LocalStoreTestBase::ResetPersistenceStats() { query_engine_.ResetCounts(); } -LocalStoreTest::LocalStoreTest() : LocalStoreTestBase(GetParam()()) { +// Helper to convert a Query to a RealtimePipeline. +// This is identical to the one in QueryEngineTestBase. +api::RealtimePipeline LocalStoreTestBase::ConvertQueryToPipeline( + const core::Query& query) { + return { + core::ToPipelineStages(query), + std::make_unique(model::DatabaseId("test-project"))}; +} + +LocalStoreTest::LocalStoreTest() + : LocalStoreTestBase(GetParam().local_store_helper_factory()) { + should_use_pipeline_ = GetParam().use_pipeline; } TEST_P(LocalStoreTest, MutationBatchKeys) { diff --git a/Firestore/core/test/unit/local/local_store_test.h b/Firestore/core/test/unit/local/local_store_test.h index 1271bc4fa1b..e5dd028472d 100644 --- a/Firestore/core/test/unit/local/local_store_test.h +++ b/Firestore/core/test/unit/local/local_store_test.h @@ -21,11 +21,14 @@ #include #include +#include "Firestore/core/src/api/realtime_pipeline.h" // Added for RealtimePipeline #include "Firestore/core/src/core/core_fwd.h" +#include "Firestore/core/src/core/pipeline_util.h" // Added for QueryOrPipeline #include "Firestore/core/src/local/local_store.h" #include "Firestore/core/src/local/query_engine.h" #include "Firestore/core/src/local/query_result.h" #include "Firestore/core/src/model/mutation_batch.h" +#include "Firestore/core/src/remote/serializer.h" // Added for Serializer #include "Firestore/core/test/unit/local/counting_query_engine.h" #include "gtest/gtest.h" @@ -59,11 +62,20 @@ class LocalStoreTestHelper { using FactoryFunc = std::unique_ptr (*)(); +// Parameters for LocalStore tests, combining helper factory and pipeline flag. +struct LocalStoreTestParams { + FactoryFunc local_store_helper_factory; + bool use_pipeline; +}; + class LocalStoreTestBase : public testing::Test { protected: explicit LocalStoreTestBase( std::unique_ptr&& test_helper); + // Helper to convert a Query to a RealtimePipeline. + api::RealtimePipeline ConvertQueryToPipeline(const core::Query& query); + bool IsGcEager() const { return test_helper_->IsGcEager(); } @@ -108,6 +120,7 @@ class LocalStoreTestBase : public testing::Test { std::unique_ptr persistence_; CountingQueryEngine query_engine_; LocalStore local_store_; + bool should_use_pipeline_ = false; // Flag for pipeline usage std::vector batches_; model::DocumentMap last_changes_; @@ -126,10 +139,10 @@ class LocalStoreTestBase : public testing::Test { * testing::Values(MyNewLocalStoreTestHelper)); */ -class LocalStoreTest : public LocalStoreTestBase, - public testing::WithParamInterface { +class LocalStoreTest + : public LocalStoreTestBase, + public testing::WithParamInterface { public: - // `GetParam()` must return a factory function. LocalStoreTest(); }; diff --git a/Firestore/core/test/unit/local/lru_garbage_collector_test.cc b/Firestore/core/test/unit/local/lru_garbage_collector_test.cc index eba852e1469..6c5084a084c 100644 --- a/Firestore/core/test/unit/local/lru_garbage_collector_test.cc +++ b/Firestore/core/test/unit/local/lru_garbage_collector_test.cc @@ -382,7 +382,7 @@ TEST_P(LruGarbageCollectorTest, RemoveQueriesUpThroughSequenceNumber) { // Make sure we removed the next 10 even targets. persistence_->Run("verify remaining targets", [&] { for (const auto& target : targets) { - auto entry = target_cache_->GetTarget(target.target()); + auto entry = target_cache_->GetTarget(target.target_or_pipeline()); if (live_queries.find(target.target_id()) != live_queries.end()) { ASSERT_TRUE(entry.has_value()); diff --git a/Firestore/core/test/unit/local/memory_local_store_test.cc b/Firestore/core/test/unit/local/memory_local_store_test.cc index f4a8ff24850..a418f0cb028 100644 --- a/Firestore/core/test/unit/local/memory_local_store_test.cc +++ b/Firestore/core/test/unit/local/memory_local_store_test.cc @@ -43,9 +43,11 @@ std::unique_ptr Factory() { } // namespace -INSTANTIATE_TEST_SUITE_P(MemoryLocalStoreTest, - LocalStoreTest, - ::testing::Values(Factory)); +INSTANTIATE_TEST_SUITE_P( + MemoryLocalStoreTest, + LocalStoreTest, + testing::Values(LocalStoreTestParams{Factory, /*use_pipeline=*/false}, + LocalStoreTestParams{Factory, /*use_pipeline=*/true})); } // namespace local } // namespace firestore diff --git a/Firestore/core/test/unit/local/memory_query_engine_test.cc b/Firestore/core/test/unit/local/memory_query_engine_test.cc index 0d2c0a96943..94eae12f66c 100644 --- a/Firestore/core/test/unit/local/memory_query_engine_test.cc +++ b/Firestore/core/test/unit/local/memory_query_engine_test.cc @@ -30,9 +30,12 @@ std::unique_ptr PersistenceFactory() { } // namespace -INSTANTIATE_TEST_SUITE_P(MemoryQueryEngineTest, - QueryEngineTest, - testing::Values(PersistenceFactory)); +INSTANTIATE_TEST_SUITE_P( + MemoryQueryEngineTest, + QueryEngineTest, + testing::Values( + QueryEngineTestParams{PersistenceFactory, /*use_pipeline=*/false}, + QueryEngineTestParams{PersistenceFactory, /*use_pipeline=*/true})); } // namespace local } // namespace firestore diff --git a/Firestore/core/test/unit/local/query_engine_test.cc b/Firestore/core/test/unit/local/query_engine_test.cc index 84363714d4c..0499a2fb4a8 100644 --- a/Firestore/core/test/unit/local/query_engine_test.cc +++ b/Firestore/core/test/unit/local/query_engine_test.cc @@ -14,14 +14,19 @@ * limitations under the License. */ +#include "query_engine_test.h" #include "Firestore/core/test/unit/local/query_engine_test.h" #include "Firestore/core/src/local/query_engine.h" #include #include +#include // For std::vector in ConvertQueryToPipeline +#include "Firestore/core/src/api/realtime_pipeline.h" +#include "Firestore/core/src/api/stages.h" #include "Firestore/core/src/core/field_filter.h" +#include "Firestore/core/src/core/pipeline_util.h" #include "Firestore/core/src/core/view.h" #include "Firestore/core/src/credentials/user.h" #include "Firestore/core/src/local/memory_index_manager.h" @@ -37,6 +42,7 @@ #include "Firestore/core/src/model/object_value.h" #include "Firestore/core/src/model/precondition.h" #include "Firestore/core/src/model/snapshot_version.h" +#include "Firestore/core/src/remote/serializer.h" #include "Firestore/core/test/unit/testutil/testutil.h" namespace firebase { @@ -105,7 +111,7 @@ const SnapshotVersion kMissingLastLimboFreeSnapshot = SnapshotVersion::None(); } // namespace DocumentMap TestLocalDocumentsView::GetDocumentsMatchingQuery( - const core::Query& query, const model::IndexOffset& offset) { + const core::QueryOrPipeline& query, const model::IndexOffset& offset) { bool full_collection_scan = offset.read_time() == SnapshotVersion::None(); EXPECT_TRUE(expect_full_collection_scan_.has_value()); @@ -133,6 +139,8 @@ QueryEngineTestBase::QueryEngineTestBase( document_overlay_cache_, index_manager_), target_cache_(persistence_->target_cache()) { + // should_use_pipeline_ is initialized by the derived QueryEngineTest + // constructor remote_document_cache_->SetIndexManager(index_manager_); query_engine_.Initialize(&local_documents_view_); } @@ -181,18 +189,40 @@ T QueryEngineTestBase::ExpectFullCollectionScan( return fun(); } +api::RealtimePipeline QueryEngineTestBase::ConvertQueryToPipeline( + const core::Query& query) { + return {ToPipelineStages(query), + std::make_unique( + model::DatabaseId("test-project"))}; +} + DocumentSet QueryEngineTestBase::RunQuery( const core::Query& query, const SnapshotVersion& last_limbo_free_snapshot_version) { + core::QueryOrPipeline query_or_pipeline_to_run = + query; // Default to original query + + if (should_use_pipeline_) { + query_or_pipeline_to_run = + core::QueryOrPipeline(ConvertQueryToPipeline(query)); + } + DocumentKeySet remote_keys = target_cache_->GetMatchingKeys(kTestTargetId); const auto docs = query_engine_.GetDocumentsMatchingQuery( - query, last_limbo_free_snapshot_version, remote_keys); + query_or_pipeline_to_run, last_limbo_free_snapshot_version, remote_keys); + + // The View is always constructed based on the original query's intent, + // regardless of whether it was executed as a query or pipeline. View view(query, DocumentKeySet()); ViewDocumentChanges view_doc_changes = view.ComputeDocumentChanges(docs, {}); return view.ApplyChanges(view_doc_changes).snapshot()->documents(); } -QueryEngineTest::QueryEngineTest() : QueryEngineTestBase(GetParam()()) { +QueryEngineTest::QueryEngineTest() + : QueryEngineTestBase(GetParam().persistence_factory()) { + // Initialize should_use_pipeline_ from the parameter for the specific test + // instance + should_use_pipeline_ = GetParam().use_pipeline; } TEST_P(QueryEngineTest, UsesTargetMappingForInitialView) { diff --git a/Firestore/core/test/unit/local/query_engine_test.h b/Firestore/core/test/unit/local/query_engine_test.h index 98def0df06c..77c552d0aed 100644 --- a/Firestore/core/test/unit/local/query_engine_test.h +++ b/Firestore/core/test/unit/local/query_engine_test.h @@ -25,6 +25,11 @@ #include "Firestore/core/src/local/query_engine.h" #include "Firestore/core/src/model/mutable_document.h" #include "Firestore/core/src/model/patch_mutation.h" +// For QueryOrPipeline, absl::optional +#include "Firestore/core/src/api/realtime_pipeline.h" // Full definition for api::RealtimePipeline +#include "Firestore/core/src/core/pipeline_util.h" // Defines QueryOrPipeline +#include "Firestore/core/src/remote/serializer.h" // For remote::Serializer if needed by ConvertQueryToPipeline +#include "absl/types/optional.h" #include "gtest/gtest.h" namespace firebase { @@ -32,6 +37,9 @@ namespace firestore { namespace core { class Query; +// Forward declare RealtimePipeline if its full definition isn't needed here +// yet. However, QueryOrPipeline will bring it in. class RealtimePipeline; // +// from api/realtime_pipeline.h } // namespace core namespace model { @@ -45,6 +53,11 @@ namespace local { class TargetCache; class Persistence; class MemoryRemoteDocumentCache; +// api::RealtimePipeline is now fully included above. +// No need to forward-declare if full header included. +namespace remote { +class Serializer; // Forward declaration +} // namespace remote class DocumentOverlayCache; class MemoryIndexManager; class MutationQueue; @@ -54,7 +67,8 @@ class TestLocalDocumentsView : public LocalDocumentsView { using LocalDocumentsView::LocalDocumentsView; model::DocumentMap GetDocumentsMatchingQuery( - const core::Query& query, const model::IndexOffset& offset) override; + const core::QueryOrPipeline& query, + const model::IndexOffset& offset) override; void ExpectFullCollectionScan(bool full_collection_scan); @@ -64,6 +78,13 @@ class TestLocalDocumentsView : public LocalDocumentsView { using FactoryFunc = std::unique_ptr (*)(); +// Parameters for QueryEngine tests, combining persistence factory and pipeline +// flag. +struct QueryEngineTestParams { + FactoryFunc persistence_factory; + bool use_pipeline; +}; + /** * A test fixture for implementing tests of the QueryEngine interface. * @@ -97,11 +118,16 @@ class QueryEngineTestBase : public testing::Test { template T ExpectFullCollectionScan(const std::function& f); + // RunQuery will now use the should_use_pipeline_ member. model::DocumentSet RunQuery( const core::Query& query, const model::SnapshotVersion& last_limbo_free_snapshot_version); + api::RealtimePipeline ConvertQueryToPipeline(const core::Query& query); + std::unique_ptr persistence_; + bool should_use_pipeline_ = + false; // Flag to indicate if pipeline conversion should be attempted. RemoteDocumentCache* remote_document_cache_ = nullptr; DocumentOverlayCache* document_overlay_cache_; IndexManager* index_manager_; @@ -119,13 +145,16 @@ class QueryEngineTestBase : public testing::Test { * + Write a persistence factory function * + Call INSTANTIATE_TEST_SUITE_P(MyNewQueryEngineTest, * QueryEngineTest, - * testing::Values(PersistenceFactory)); + * testing::Values( + * QueryEngineTestParams{&CreateMemoryPersistence, + * false}, QueryEngineTestParams{&CreateMemoryPersistence, true} + * )); */ -class QueryEngineTest : public QueryEngineTestBase, - public testing::WithParamInterface { +class QueryEngineTest + : public QueryEngineTestBase, + public testing::WithParamInterface { public: - // `GetParam()` must return a factory function. QueryEngineTest(); }; diff --git a/Firestore/core/test/unit/local/target_cache_test.cc b/Firestore/core/test/unit/local/target_cache_test.cc index 262d46edfca..843d876cd02 100644 --- a/Firestore/core/test/unit/local/target_cache_test.cc +++ b/Firestore/core/test/unit/local/target_cache_test.cc @@ -113,7 +113,7 @@ TEST_P(TargetCacheTest, SetAndReadAQuery) { auto result = cache_->GetTarget(query_rooms_.ToTarget()); ASSERT_NE(result, absl::nullopt); - ASSERT_EQ(result->target(), target_data.target()); + ASSERT_EQ(result->target_or_pipeline(), target_data.target_or_pipeline()); ASSERT_EQ(result->target_id(), target_data.target_id()); ASSERT_EQ(result->resume_token(), target_data.resume_token()); }); @@ -239,9 +239,9 @@ TEST_P(TargetCacheTest, RemoveTargets) { cache_->RemoveTargets(target_data2.sequence_number(), {}); - auto result = cache_->GetTarget(target_data1.target()); + auto result = cache_->GetTarget(target_data1.target_or_pipeline()); ASSERT_EQ(result, absl::nullopt); - result = cache_->GetTarget(target_data2.target()); + result = cache_->GetTarget(target_data2.target_or_pipeline()); ASSERT_EQ(result, absl::nullopt); }); } diff --git a/Firestore/core/test/unit/remote/serializer_test.cc b/Firestore/core/test/unit/remote/serializer_test.cc index 14b08b1e13f..740aa4af976 100644 --- a/Firestore/core/test/unit/remote/serializer_test.cc +++ b/Firestore/core/test/unit/remote/serializer_test.cc @@ -526,7 +526,7 @@ class SerializerTest : public ::testing::Test { std::mem_fn(&Serializer::DecodeQueryTarget), proto.query()); } - EXPECT_EQ(model.target(), actual_model); + EXPECT_EQ(model.target_or_pipeline(), actual_model); } void ExpectSerializationRoundTrip(const Mutation& model,