diff --git a/cpp_src/cluster/replication/updatesqueue.h b/cpp_src/cluster/replication/updatesqueue.h index 0ab455813..4a8c54558 100644 --- a/cpp_src/cluster/replication/updatesqueue.h +++ b/cpp_src/cluster/replication/updatesqueue.h @@ -278,12 +278,12 @@ class UpdatesQueue { logTraceW([&] { rtfmt("Push new sync updates (%d) for %s", localData.dataSize, data[0].GetNsName()); }); std::cout << fmt::sprintf("[cluster:queue] Duplicated: Pushing new sync updates (%d) for %s. Last ID: %d\n", localData.dataSize, - nsName, queue_.size() ? queue_.back()->ID() : 0); + nsName, queue_.size() ? (queue_.back()->ID() + queue_.back()->Count()) : -1); entriesRange = addDataToQueue(std::move(data), &onResult, dropped); std::cout << fmt::sprintf("[cluster:queue] Duplicated: Added new sync updates (%d) for %s. Last ID: %d\n", localData.dataSize, - nsName, queue_.size() ? queue_.back()->ID() : 0); + nsName, queue_.size() ? (queue_.back()->ID() + queue_.back()->Count()) : -1); if (beforeWait) { beforeWait(); // FIXME: Think about better workaround diff --git a/cpp_src/core/clusterproxy.h b/cpp_src/core/clusterproxy.h index e61bd6731..edb5140de 100644 --- a/cpp_src/core/clusterproxy.h +++ b/cpp_src/core/clusterproxy.h @@ -8,6 +8,8 @@ #include "core/reindexer_impl/reindexerimpl.h" #include "tools/clusterproxyloghelper.h" +#include "vendor/spdlog/fmt/fmt.h" + namespace reindexer { #define CallProxyFunction(Fn) proxyCall @@ -127,16 +129,22 @@ class ClusterProxy { return proxyCall(ctx, q.NsName(), action, q, qr); } Error Upsert(std::string_view nsName, Item &item, const RdxContext &ctx) { + std::cout << fmt::sprintf("ClusterProxy::Upsert(...) into '%s' begin\n", nsName); auto action = [this](const RdxContext &ctx, LeaderRefT clientToLeader, std::string_view nsName, Item &item) { return itemFollowerAction<&client::Reindexer::Upsert>(ctx, clientToLeader, nsName, item); }; - return proxyCall(ctx, nsName, action, nsName, item); + auto res = proxyCall(ctx, nsName, action, nsName, item); + std::cout << fmt::sprintf("ClusterProxy::Upsert(...) into '%s' done\n", nsName); + return res; } Error Upsert(std::string_view nsName, Item &item, LocalQueryResults &qr, const RdxContext &ctx) { + std::cout << fmt::sprintf("ClusterProxy::Upsert(qr) into '%s' begin\n", nsName); auto action = [this](const RdxContext &ctx, LeaderRefT clientToLeader, std::string_view nsName, Item &item, LocalQueryResults &qr) { return resultItemFollowerAction<&client::Reindexer::Upsert>(ctx, clientToLeader, nsName, item, qr); }; - return proxyCall(ctx, nsName, action, nsName, item, qr); + auto res = proxyCall(ctx, nsName, action, nsName, item, qr); + std::cout << fmt::sprintf("ClusterProxy::Upsert(qr) into '%s' done\n", nsName); + return res; } Error Delete(std::string_view nsName, Item &item, const RdxContext &ctx) { auto action = [this](const RdxContext &ctx, LeaderRefT clientToLeader, std::string_view nsName, Item &item) { diff --git a/cpp_src/core/namespace/namespace.h b/cpp_src/core/namespace/namespace.h index 881b2e52e..70a551d0d 100644 --- a/cpp_src/core/namespace/namespace.h +++ b/cpp_src/core/namespace/namespace.h @@ -54,6 +54,10 @@ class Namespace { CounterGuardAIR32 cg(ns->cancelCommitCnt_); if constexpr (std::is_same_v) { + auto name = GetName(ctx); + if (enumVal == ModeUpsert && !isSystemNamespaceNameFast(name)) { + std::cout << fmt::sprintf("NamespaceImpl::ModifyItem(qr) into '%s' begin\n", name); + } auto wlck = ns->dataWLock(nsCtx.rdxContext); cg.Reset(); qr.AddNamespace(ns, true); @@ -61,6 +65,9 @@ class Namespace { (*ns.*fn)(v, enumVal, pendedRepl, nsCtx); qr.AddItem(v, true, false); ns->replicate(std::move(pendedRepl), std::move(wlck), true, nullptr, nsCtx); + if (enumVal == ModeUpsert && !isSystemNamespaceNameFast(name)) { + std::cout << fmt::sprintf("NamespaceImpl::ModifyItem(qr) into '%s' done\n", name); + } } else { auto params = longUpdDelLoggingParams_.load(std::memory_order_relaxed); const bool isEnabled = params.thresholdUs >= 0 && !isSystemNamespaceNameFast(v.NsName()); @@ -127,9 +134,25 @@ class Namespace { void Update(const Query &query, LocalQueryResults &result, const RdxContext &ctx) { nsFuncWrapper<&NamespaceImpl::doUpdate, QueryType::QueryUpdate>(query, result, ctx); } - void Upsert(Item &item, const RdxContext &ctx) { nsFuncWrapper<&NamespaceImpl::Upsert>(item, ctx); } + void Upsert(Item &item, const RdxContext &ctx) { + auto name = GetName(ctx); + if (!isSystemNamespaceNameFast(name)) { + std::cout << fmt::sprintf("Namespace::Upsert(...) into '%s' begin\n", name); + } + nsFuncWrapper<&NamespaceImpl::Upsert>(item, ctx); + if (!isSystemNamespaceNameFast(name)) { + std::cout << fmt::sprintf("Namespace::Upsert(...) into '%s' done\n", name); + } + } void Upsert(Item &item, LocalQueryResults &qr, const RdxContext &ctx) { + auto name = GetName(ctx); + if (!isSystemNamespaceNameFast(name)) { + std::cout << fmt::sprintf("Namespace::Upsert(qr) into '%s' begin\n", name); + } nsFuncWrapper<&NamespaceImpl::modifyItem, ItemModifyMode::ModeUpsert>(item, qr, ctx); + if (!isSystemNamespaceNameFast(name)) { + std::cout << fmt::sprintf("Namespace::Upsert(qr) into '%s' done\n", name); + } } void Delete(Item &item, const RdxContext &ctx) { nsFuncWrapper<&NamespaceImpl::Delete>(item, ctx); } void Delete(Item &item, LocalQueryResults &qr, const RdxContext &ctx) { diff --git a/cpp_src/core/namespace/namespaceimpl.cc b/cpp_src/core/namespace/namespaceimpl.cc index 5f817925c..75de46adc 100644 --- a/cpp_src/core/namespace/namespaceimpl.cc +++ b/cpp_src/core/namespace/namespaceimpl.cc @@ -1478,6 +1478,10 @@ void NamespaceImpl::doTruncate(UpdatesContainer& pendedRepl, const NsContext& ct } void NamespaceImpl::ModifyItem(Item& item, ItemModifyMode mode, const RdxContext& ctx) { + auto name = GetName(ctx); + if (mode == ModeUpsert && !isSystemNamespaceNameFast(name)) { + std::cout << fmt::sprintf("NamespaceImpl::ModifyItem(...) into '%s' begin\n", name); + } PerfStatCalculatorMT calc(updatePerfCounter_, enablePerfCounters_); UpdatesContainer pendedRepl; @@ -1491,6 +1495,9 @@ void NamespaceImpl::ModifyItem(Item& item, ItemModifyMode mode, const RdxContext modifyItem(item, mode, pendedRepl, NsContext(ctx)); replicate(std::move(pendedRepl), std::move(wlck), true, nullptr, ctx); + if (mode == ModeUpsert && !isSystemNamespaceNameFast(name)) { + std::cout << fmt::sprintf("NamespaceImpl::ModifyItem(...) into '%s' begin\n", name); + } } void NamespaceImpl::Truncate(const RdxContext& ctx) { @@ -1809,7 +1816,13 @@ void NamespaceImpl::modifyItem(Item& item, ItemModifyMode mode, UpdatesContainer if (mode == ModeDelete) { deleteItem(item, pendedRepl, ctx); } else { + if (mode == ModeUpsert && !isSystemNamespaceNameFast(name_)) { + std::cout << fmt::sprintf("NamespaceImpl::modifyItem() into '%s' begin\n", name_); + } doModifyItem(item, mode, pendedRepl, ctx); + if (mode == ModeUpsert && !isSystemNamespaceNameFast(name_)) { + std::cout << fmt::sprintf("NamespaceImpl::modifyItem() into '%s' done\n", name_); + } } } diff --git a/cpp_src/core/namespace/namespaceimpl.h b/cpp_src/core/namespace/namespaceimpl.h index d58e14509..0c62d9b6b 100644 --- a/cpp_src/core/namespace/namespaceimpl.h +++ b/cpp_src/core/namespace/namespaceimpl.h @@ -587,7 +587,9 @@ class NamespaceImpl : public intrusive_atomic_rc_base { // NOLINT(*performance. QueryStatsCalculatorT &&statCalculator, const NsContext &ctx) { if (!repl_.temporary) { assertrx(!ctx.isCopiedNsRequest); - std::cout << fmt::sprintf("Namespace::'%s' replicating %d records\n", name_, recs.size()); + if (!isSystem()) { + std::cout << fmt::sprintf("Namespace::'%s' replicating %d records\n", name_, recs.size()); + } auto err = clusterizator_.Replicate( std::move(recs), [&wlck]() { diff --git a/cpp_src/core/rdxcontext.h b/cpp_src/core/rdxcontext.h index bbc115af4..989e7708f 100644 --- a/cpp_src/core/rdxcontext.h +++ b/cpp_src/core/rdxcontext.h @@ -40,6 +40,26 @@ void ThrowOnCancel(const Context& ctx, std::string_view errMsg = std::string_vie throw Error(errCanceled, errMsg.empty() ? kDefaultCancelError : errMsg); } +template +void AssertOnCancel(const Context& ctx, std::string_view errMsg = std::string_view()) { + (void)errMsg; + if (!ctx.IsCancelable()) return; + + const auto cancel = ctx.CheckCancel(); + switch (cancel) { + case CancelType::Explicit: + assertrx(false); + std::abort(); + case CancelType::Timeout: + assertrx(false); + std::abort(); + case CancelType::None: + return; + } + assertrx(false); + std::abort(); +} + class RdxDeadlineContext : public IRdxCancelContext { public: using ClockT = std::chrono::steady_clock; diff --git a/cpp_src/core/reindexer_impl/reindexerimpl.cc b/cpp_src/core/reindexer_impl/reindexerimpl.cc index bd52b68ab..b018eab0d 100644 --- a/cpp_src/core/reindexer_impl/reindexerimpl.cc +++ b/cpp_src/core/reindexer_impl/reindexerimpl.cc @@ -930,6 +930,13 @@ Error ReindexerImpl::applyNsFunction(std::string_view nsName, const RdxContext& return applyNsFunction(nsName, ctx, arg1, arg2) +#define APPLY_NS_FUNCTION11(needUpdateSys, memFn, arg) \ + applyNsFunction(nsName, ctx, arg) + +#define APPLY_NS_FUNCTION22(needUpdateSys, memFn, arg1, arg2) \ + applyNsFunction(nsName, ctx, arg1, arg2) + Error ReindexerImpl::Insert(std::string_view nsName, Item& item, const RdxContext& ctx) { APPLY_NS_FUNCTION1(true, Insert, item); } Error ReindexerImpl::insertDontUpdateSystemNS(std::string_view nsName, Item& item, const RdxContext& ctx) { @@ -965,10 +972,18 @@ Error ReindexerImpl::Update(const Query& q, LocalQueryResults& result, const Rdx return errOK; } -Error ReindexerImpl::Upsert(std::string_view nsName, Item& item, const RdxContext& ctx) { APPLY_NS_FUNCTION1(true, Upsert, item); } +Error ReindexerImpl::Upsert(std::string_view nsName, Item& item, const RdxContext& ctx) { + std::cout << fmt::sprintf("ReindexerImpl::Upsert(...) into '%s' begin\n", nsName); + auto res = APPLY_NS_FUNCTION11(true, Upsert, item); + std::cout << fmt::sprintf("ReindexerImpl::Upsert(...) into '%s' done\n", nsName); + return res; +} Error ReindexerImpl::Upsert(std::string_view nsName, Item& item, LocalQueryResults& qr, const RdxContext& ctx) { - APPLY_NS_FUNCTION2(true, Upsert, item, qr); + std::cout << fmt::sprintf("ReindexerImpl::Upsert(qr) into '%s' begin\n", nsName); + auto res = APPLY_NS_FUNCTION22(true, Upsert, item, qr); + std::cout << fmt::sprintf("ReindexerImpl::Upsert(qr) into '%s' done\n", nsName); + return res; } Item ReindexerImpl::NewItem(std::string_view nsName, const RdxContext& rdxCtx) { diff --git a/cpp_src/estl/contexted_cond_var.h b/cpp_src/estl/contexted_cond_var.h index 859f18466..6affd486e 100644 --- a/cpp_src/estl/contexted_cond_var.h +++ b/cpp_src/estl/contexted_cond_var.h @@ -23,7 +23,7 @@ class contexted_cond_var { // const auto lockWard = _M_context->BeforeLock(_Mutex::mark); if (_M_chk_timeout.count() > 0 && __context.IsCancelable()) { while (!_M_cond_var->wait_for(__lock, _M_chk_timeout, __p)) { - ThrowOnCancel(__context, "Context was canceled or timed out (condition variable)"sv); + AssertOnCancel(__context, "Context was canceled or timed out (condition variable)"sv); } } else { _M_cond_var->wait(__lock, std::move(__p)); @@ -37,7 +37,7 @@ class contexted_cond_var { // const auto lockWard = _M_context->BeforeLock(_Mutex::mark); if (_M_chk_timeout.count() > 0 && __context.IsCancelable()) { while (_M_cond_var->wait_for(__lock, _M_chk_timeout) == std::cv_status::timeout) { - ThrowOnCancel(__context, "Context was canceled or timed out (condition variable)"sv); + AssertOnCancel(__context, "Context was canceled or timed out (condition variable)"sv); } } else { _M_cond_var->wait(__lock); diff --git a/cpp_src/gtests/tests/API/base_tests.cc b/cpp_src/gtests/tests/API/base_tests.cc deleted file mode 100644 index 1554aee81..000000000 --- a/cpp_src/gtests/tests/API/base_tests.cc +++ /dev/null @@ -1,2110 +0,0 @@ -#include -#include "reindexer_api.h" -#include "tools/errors.h" - -#include "core/item.h" -#include "core/keyvalue/variant.h" -#include "core/queryresults/joinresults.h" -#include "core/reindexer.h" -#include "tools/fsops.h" -#include "tools/logger.h" -#include "tools/stringstools.h" - -#include -#include - -#include "core/cjson/jsonbuilder.h" -#include "core/keyvalue/p_string.h" -#include "server/loggerwrapper.h" -#include "tools/serializer.h" - -TEST(ReindexerTest, DeleteTemporaryNamespaceOnConnect) { - const auto kStoragePath = reindexer::fs::JoinPath(reindexer::fs::GetTempDir(), "reindex/base_tests/DeleteTemporaryNamespaceOnConnect"); - const std::string kBuiltin = "builtin://" + kStoragePath; - - std::string temporaryNamespacePath; - { - reindexer::Reindexer rt; - Error err = rt.Connect(kBuiltin); - ASSERT_TRUE(err.ok()) << err.what(); - - // Create temporary namespace - std::string temporaryNamespaceOnFSName; - err = rt.CreateTemporaryNamespace("tmp_ns", temporaryNamespaceOnFSName, StorageOpts().Enabled()); - ASSERT_TRUE(err.ok()) << err.what(); - - // Check temporary namespace on Filesysten - temporaryNamespacePath = reindexer::fs::JoinPath(kStoragePath, temporaryNamespaceOnFSName); - ASSERT_TRUE(reindexer::fs::Stat(temporaryNamespacePath) == reindexer::fs::StatDir); - } - - // On second connect we already have tmp namespace, and Connect should delete it. - { - reindexer::Reindexer rt; - Error err = rt.Connect(kBuiltin); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(reindexer::fs::Stat(temporaryNamespacePath) == reindexer::fs::StatError); - } -} - -TEST_F(ReindexerApi, AddNamespace) { - auto err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_EQ(true, err.ok()) << err.what(); - - const auto item = getMemStat(*rt.reindexer, default_namespace); - ASSERT_EQ(item["storage_ok"].As(), false); - ASSERT_EQ(item["storage_enabled"].As(), false); - ASSERT_EQ(item["storage_status"].As(), "DISABLED"); -} - -TEST_F(ReindexerApi, AddNamespace_CaseInsensitive) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - std::string upperNS(default_namespace); - std::transform(default_namespace.begin(), default_namespace.end(), upperNS.begin(), [](int c) { return std::toupper(c); }); - - err = rt.reindexer->AddNamespace(reindexer::NamespaceDef(upperNS)); - ASSERT_FALSE(err.ok()) << "Somehow namespace '" << upperNS << "' was added. But namespace '" << default_namespace << "' already exists"; -} - -TEST_F(ReindexerApi, AddExistingNamespace) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddNamespace(reindexer::NamespaceDef(default_namespace, StorageOpts().Enabled(false))); - ASSERT_FALSE(err.ok()) << err.what(); -} - -TEST_F(ReindexerApi, RenameNamespace) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()); - for (int i = 0; i < 10; ++i) { - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - item["id"] = i; - item["column1"] = i + 100; - err = rt.reindexer->Upsert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - } - - const std::string renameNamespace("rename_namespace"); - const std::string existingNamespace("existing_namespace"); - - err = rt.reindexer->OpenNamespace(existingNamespace); - ASSERT_TRUE(err.ok()) << err.what(); - - auto testInList = [&](std::string_view testNamespaceName, bool inList) { - std::vector namespacesList; - err = rt.reindexer->EnumNamespaces(namespacesList, reindexer::EnumNamespacesOpts()); - ASSERT_TRUE(err.ok()) << err.what(); - auto r = std::find_if(namespacesList.begin(), namespacesList.end(), - [testNamespaceName](const reindexer::NamespaceDef& d) { return d.name == testNamespaceName; }); - if (inList) { - ASSERT_FALSE(r == namespacesList.end()) << testNamespaceName << " not exist"; - } else { - ASSERT_TRUE(r == namespacesList.end()) << testNamespaceName << " exist"; - } - }; - - auto getRowsInJSON = [&](std::string_view namespaceName, std::vector& resStrings) { - QueryResults result; - auto err = rt.reindexer->Select(Query(namespaceName), result); - ASSERT_TRUE(err.ok()) << err.what(); - resStrings.clear(); - for (auto it = result.begin(); it != result.end(); ++it) { - ASSERT_TRUE(it.Status().ok()) << it.Status().what(); - reindexer::WrSerializer sr; - err = it.GetJSON(sr, false); - ASSERT_TRUE(err.ok()) << err.what(); - std::string_view sv = sr.Slice(); - resStrings.emplace_back(sv.data(), sv.size()); - } - }; - - std::vector resStrings; - std::vector resStringsBeforeTest; - getRowsInJSON(default_namespace, resStringsBeforeTest); - - // ok - err = rt.reindexer->RenameNamespace(default_namespace, renameNamespace); - ASSERT_TRUE(err.ok()) << err.what(); - testInList(renameNamespace, true); - testInList(default_namespace, false); - getRowsInJSON(renameNamespace, resStrings); - ASSERT_TRUE(resStrings == resStringsBeforeTest) << "Data in namespace changed"; - - // rename to equal name - err = rt.reindexer->RenameNamespace(renameNamespace, renameNamespace); - ASSERT_TRUE(err.ok()) << err.what(); - testInList(renameNamespace, true); - getRowsInJSON(renameNamespace, resStrings); - ASSERT_TRUE(resStrings == resStringsBeforeTest) << "Data in namespace changed"; - - // rename to empty namespace - err = rt.reindexer->RenameNamespace(renameNamespace, ""); - ASSERT_FALSE(err.ok()) << err.what(); - testInList(renameNamespace, true); - getRowsInJSON(renameNamespace, resStrings); - ASSERT_TRUE(resStrings == resStringsBeforeTest) << "Data in namespace changed"; - - // rename to system namespace - err = rt.reindexer->RenameNamespace(renameNamespace, "#rename_namespace"); - ASSERT_FALSE(err.ok()) << err.what(); - testInList(renameNamespace, true); - getRowsInJSON(renameNamespace, resStrings); - ASSERT_TRUE(resStrings == resStringsBeforeTest) << "Data in namespace changed"; - - // rename to existing namespace - err = rt.reindexer->RenameNamespace(renameNamespace, existingNamespace); - ASSERT_TRUE(err.ok()) << err.what(); - testInList(renameNamespace, false); - testInList(existingNamespace, true); - getRowsInJSON(existingNamespace, resStrings); - ASSERT_TRUE(resStrings == resStringsBeforeTest) << "Data in namespace changed"; -} - -TEST_F(ReindexerApi, AddIndex) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); -} - -TEST_F(ReindexerApi, DistinctDiffType) { - reindexer::p_string stringVal("abc"); - std::hash hashStr; - size_t vString = hashStr(stringVal); - std::hash hashInt; - auto vInt = hashInt(vString); - ASSERT_EQ(vString, vInt) << "hash not equals"; - - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - { - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - item["id"] = 1; - item["column1"] = int64_t(vInt); - err = rt.reindexer->Upsert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - } - { - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - item["id"] = 2; - item["column1"] = stringVal; - err = rt.reindexer->Upsert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - } - { - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - item["id"] = 3; - item["column1"] = stringVal; - err = rt.reindexer->Upsert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - } - - QueryResults result; - err = rt.reindexer->Select("select column1, distinct(column1) from test_namespace;", result); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(result.Count(), 2); - std::set BaseVals = {"{\"column1\":" + std::to_string(int64_t(vInt)) + "}", "{\"column1\":\"abc\"}"}; - std::set Vals; - for (auto& r : result) { - reindexer::WrSerializer ser; - auto err = r.GetJSON(ser, false); - ASSERT_TRUE(err.ok()) << err.what(); - Vals.insert(ser.c_str()); - } - ASSERT_TRUE(bool(BaseVals == Vals)); -} - -TEST_F(ReindexerApi, DistinctCompositeIndex) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(default_namespace, {"v1", "-", "int", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(default_namespace, {"v2", "-", "int", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - reindexer::IndexDef indexDeclr; - indexDeclr.name_ = "v1+v2"; - indexDeclr.indexType_ = "hash"; - indexDeclr.fieldType_ = "composite"; - indexDeclr.opts_ = IndexOpts(); - indexDeclr.jsonPaths_ = reindexer::JsonPaths({"v1", "v2"}); - err = rt.reindexer->AddIndex(default_namespace, indexDeclr); - EXPECT_TRUE(err.ok()) << err.what(); - - { - Item item = NewItem(default_namespace); - item["id"] = 1; - item["v1"] = 2; - item["v2"] = 3; - err = rt.reindexer->Upsert(default_namespace, item); - EXPECT_TRUE(err.ok()) << err.what(); - } - { - Item item = NewItem(default_namespace); - item["id"] = 2; - item["v1"] = 2; - item["v2"] = 3; - err = rt.reindexer->Upsert(default_namespace, item); - EXPECT_TRUE(err.ok()) << err.what(); - } - - Query q{default_namespace}; - q.Distinct("v1+v2"); - { - QueryResults qr; - err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 1); - } - - { - Item item = NewItem(default_namespace); - item["id"] = 3; - item["v1"] = 3; - item["v2"] = 2; - err = rt.reindexer->Upsert(default_namespace, item); - EXPECT_TRUE(err.ok()) << err.what(); - } - { - QueryResults qr; - err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 2); - } - { - Item item = NewItem(default_namespace); - item["id"] = 4; - err = rt.reindexer->Upsert(default_namespace, item); - EXPECT_TRUE(err.ok()) << err.what(); - } - - { - Item item = NewItem(default_namespace); - item["id"] = 5; - err = rt.reindexer->Upsert(default_namespace, item); - EXPECT_TRUE(err.ok()) << err.what(); - } - { - QueryResults qr; - err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 3); - } - { - Item item = NewItem(default_namespace); - item["id"] = 6; - item["v1"] = 3; - err = rt.reindexer->Upsert(default_namespace, item); - EXPECT_TRUE(err.ok()) << err.what(); - } - { - Item item = NewItem(default_namespace); - item["id"] = 7; - item["v1"] = 3; - err = rt.reindexer->Upsert(default_namespace, item); - EXPECT_TRUE(err.ok()) << err.what(); - } - { - QueryResults qr; - err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 4); - } - { - Item item = NewItem(default_namespace); - item["id"] = 8; - item["v1"] = 4; - err = rt.reindexer->Upsert(default_namespace, item); - EXPECT_TRUE(err.ok()) << err.what(); - } - { - QueryResults qr; - err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 5); - } -} - -TEST_F(ReindexerApi, CompositeIndexCreationError) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(default_namespace, {"x", "hash", "int", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - constexpr char kExpectedErrMsgField[] = - "Composite indexes over non-indexed field ('%s') are not supported yet (except for full-text indexes). Create at least column " - "index('-') over each field inside the composite index"; - { - // Attempt to create composite over 2 non-index fields - reindexer::IndexDef indexDeclr{"v1+v2", reindexer::JsonPaths({"v1", "v2"}), "hash", "composite", IndexOpts()}; - err = rt.reindexer->AddIndex(default_namespace, indexDeclr); - EXPECT_EQ(err.code(), errParams) << err.what(); - EXPECT_EQ(err.what(), fmt::sprintf(kExpectedErrMsgField, "v1")); - } - { - // Attempt to create composite over 1 index and 1 non-index fields - reindexer::IndexDef indexDeclr{"id+v2", reindexer::JsonPaths({"id", "v2"}), "hash", "composite", IndexOpts()}; - err = rt.reindexer->AddIndex(default_namespace, indexDeclr); - EXPECT_EQ(err.code(), errParams) << err.what(); - EXPECT_EQ(err.what(), fmt::sprintf(kExpectedErrMsgField, "v2")); - } - { - // Attempt to create composite over 1 index and 1 non-index fields - reindexer::IndexDef indexDeclr{"v2+id", reindexer::JsonPaths({"v2", "id"}), "hash", "composite", IndexOpts()}; - err = rt.reindexer->AddIndex(default_namespace, indexDeclr); - EXPECT_EQ(err.code(), errParams) << err.what(); - EXPECT_EQ(err.what(), fmt::sprintf(kExpectedErrMsgField, "v2")); - } - { - // Attempt to create sparse composite index - reindexer::IndexDef indexDeclr{"id+x", reindexer::JsonPaths({"id", "x"}), "hash", "composite", IndexOpts().Sparse()}; - err = rt.reindexer->AddIndex(default_namespace, indexDeclr); - EXPECT_EQ(err.code(), errParams) << err.what(); - EXPECT_EQ(err.what(), "Composite index cannot be sparse. Use non-sparse composite instead"); - } -} - -TEST_F(ReindexerApi, AddIndex_CaseInsensitive) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - std::string idxName = "IdEnTiFiCaToR"; - err = rt.reindexer->AddIndex(default_namespace, {idxName, "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()); - - // check adding index named in lower case - idxName = "identificator"; - err = rt.reindexer->AddIndex(default_namespace, {idxName, "hash", "int64", IndexOpts().PK()}); - ASSERT_FALSE(err.ok()) << "Somehow index 'identificator' was added. But index 'IdEnTiFiCaToR' already exists"; - - // check adding index named in upper case - idxName = "IDENTIFICATOR"; - err = rt.reindexer->AddIndex(default_namespace, {idxName, "hash", "int64", IndexOpts().PK()}); - ASSERT_FALSE(err.ok()) << "Somehow index 'IDENTIFICATOR' was added. But index 'IdEnTiFiCaToR' already exists"; - - // check case insensitive field access - Item item = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - ASSERT_NO_THROW(item[idxName] = 1234); -} - -TEST_F(ReindexerApi, AddExistingIndex) { - auto err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); -} - -TEST_F(ReindexerApi, AddUnacceptablePKIndex) { - const std::string kIdxName = "id"; - auto err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - - // Try to add an array as a PK - err = rt.reindexer->AddIndex(default_namespace, {kIdxName, "hash", "int", IndexOpts().PK().Array()}); - ASSERT_EQ(err.code(), errParams) << err.what(); - - // Try to add a store indexes of few types as a PKs - err = rt.reindexer->AddIndex(default_namespace, {kIdxName, "-", "int", IndexOpts().PK()}); - ASSERT_EQ(err.code(), errParams) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {kIdxName, "-", "bool", IndexOpts().PK()}); - ASSERT_EQ(err.code(), errParams) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {kIdxName, "-", "int64", IndexOpts().PK()}); - ASSERT_EQ(err.code(), errParams) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {kIdxName, "-", "double", IndexOpts().PK()}); - ASSERT_EQ(err.code(), errParams) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {kIdxName, "-", "string", IndexOpts().PK()}); - ASSERT_EQ(err.code(), errParams) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {kIdxName, "text", "string", IndexOpts().PK()}); - ASSERT_EQ(err.code(), errParams) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {kIdxName, "fuzzytext", "string", IndexOpts().PK()}); - ASSERT_EQ(err.code(), errParams) << err.what(); - - // Add valid index with the same name - err = rt.reindexer->AddIndex(default_namespace, {kIdxName, "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); -} - -TEST_F(ReindexerApi, UpdateToUnacceptablePKIndex) { - const std::string kIdxName = "id"; - auto err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(default_namespace, {kIdxName, "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - // Try to update to an array as a PK - err = rt.reindexer->UpdateIndex(default_namespace, {kIdxName, "tree", "int", IndexOpts().PK().Array()}); - ASSERT_EQ(err.code(), errParams) << err.what(); - - // Try to update to a store indexes of few types as a PKs - const std::vector kTypes = {"int", "bool", "int64", "double", "string"}; - for (auto& type : kTypes) { - err = rt.reindexer->UpdateIndex(default_namespace, {kIdxName, "-", type, IndexOpts().PK()}); - ASSERT_EQ(err.code(), errParams) << err.what(); - } - - // Update to a valid index with the same name - err = rt.reindexer->UpdateIndex(default_namespace, {kIdxName, "tree", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); -} - -TEST_F(ReindexerApi, IndexNameValidation) { - auto err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - // Index names with cirillic characters are not allowed - err = rt.reindexer->AddIndex(default_namespace, {"индекс", "hash", "int", IndexOpts().PK()}); - ASSERT_EQ(err.code(), errParams) << err.what(); - err = rt.reindexer->AddIndex(default_namespace, {"idд", "hash", "int", IndexOpts().PK()}); - ASSERT_EQ(err.code(), errParams) << err.what(); - // Index names with special characters are not allowed - const std::string_view kForbiddenChars = "?#№/@!$%^*)+"; - for (auto c : kForbiddenChars) { - auto idxName = std::string("id"); - idxName += c; - err = rt.reindexer->AddIndex(default_namespace, {idxName, "hash", "int", IndexOpts().PK()}); - ASSERT_EQ(err.code(), errParams) << err.what() << "; IdxName: " << idxName; - } -} - -TEST_F(ReindexerApi, AddExistingIndexWithDiffType) { - auto err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int64", IndexOpts().PK()}); - ASSERT_EQ(err.code(), errConflict) << err.what(); -} - -TEST_F(ReindexerApi, CloseNamespace) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->CloseNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - QueryResults qr; - err = rt.reindexer->Select(Query(default_namespace), qr); - ASSERT_FALSE(err.ok()) << "Namespace '" << default_namespace << "' open. But must be closed"; -} - -TEST_F(ReindexerApi, DropStorage) { - const std::string kBaseTestsStoragePath = reindexer::fs::JoinPath(reindexer::fs::GetTempDir(), "reindex/api_drop_storage/"); - auto rx = std::make_unique(); - auto err = rx->Connect("builtin://" + kBaseTestsStoragePath); - ASSERT_TRUE(err.ok()) << err.what(); - auto storagePath = reindexer::fs::JoinPath(kBaseTestsStoragePath, default_namespace); - err = rx->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(reindexer::fs::Stat(storagePath) == reindexer::fs::StatDir); - - err = rx->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - const auto item = getMemStat(*rx, default_namespace); - ASSERT_EQ(item["storage_ok"].As(), true); - ASSERT_EQ(item["storage_enabled"].As(), true); - ASSERT_EQ(item["storage_status"].As(), "OK"); - - err = rx->DropNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(reindexer::fs::Stat(storagePath) == reindexer::fs::StatError); -} - -TEST_F(ReindexerApi, DeleteNonExistingNamespace) { - auto err = rt.reindexer->CloseNamespace(default_namespace); - ASSERT_FALSE(err.ok()) << "Error: unexpected result of delete non-existing namespace."; -} - -TEST_F(ReindexerApi, NewItem) { - auto err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled()); - - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(default_namespace, {"value", "text", "string", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); -} - -TEST_F(ReindexerApi, GetItemFromQueryResults) { - constexpr size_t kItemsCount = 10; - initializeDefaultNs(); - std::vector> data; - while (data.size() < kItemsCount) { - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - data.emplace_back(data.size(), RandString()); - item["id"] = data.back().first; - item["value"] = data.back().second; - auto err = rt.reindexer->Insert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - } - reindexer::QueryResults qr; - auto err = rt.reindexer->Select(Query(default_namespace).Sort("id", false), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), kItemsCount); - // items in QueryResults are valid after the ns is destroyed - err = rt.reindexer->TruncateNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->DropNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - size_t i = 0; - for (auto it = qr.begin(), end = qr.end(); it != end; ++it, ++i) { - ASSERT_LT(i, data.size()); - auto item = it.GetItem(); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - EXPECT_EQ(item["id"].As(), data[i].first); - EXPECT_EQ(item["value"].As(), data[i].second); - } - - qr.Clear(); - data.clear(); - initializeDefaultNs(); - { - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - data.emplace_back(data.size(), RandString()); - item["id"] = data.back().first; - item["value"] = data.back().second; - err = rt.reindexer->Insert(default_namespace, item, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), data.size()); - - item = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - data.emplace_back(data.size(), RandString()); - item["id"] = data.back().first; - item["value"] = data.back().second; - err = rt.reindexer->Upsert(default_namespace, item, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), data.size()); - - item = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - data.emplace_back(data.back().first, RandString()); - item["id"] = data.back().first; - item["value"] = data.back().second; - err = rt.reindexer->Upsert(default_namespace, item, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), data.size()); - - item = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - data.emplace_back(data.size(), RandString()); - item["id"] = data.back().first; - item["value"] = data.back().second; - err = rt.reindexer->Insert(default_namespace, item, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), data.size()); - - item = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - data.emplace_back(data.back().first, RandString()); - item["id"] = data.back().first; - item["value"] = data.back().second; - err = rt.reindexer->Update(default_namespace, item, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), data.size()); - - item = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - data.emplace_back(data.back()); - item["id"] = data.back().first; - item["value"] = RandString(); - err = rt.reindexer->Delete(default_namespace, item, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), data.size()); - - item = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - item["id"] = static_cast(data.size()); - item["value"] = RandString(); - err = rt.reindexer->Update(default_namespace, item, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), data.size()); - - item = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - item["id"] = static_cast(data.size()); - item["value"] = RandString(); - err = rt.reindexer->Delete(default_namespace, item, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), data.size()); - } - err = rt.reindexer->TruncateNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->DropNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - ASSERT_EQ(qr.Count(), 6); - ASSERT_EQ(qr.Count(), data.size()); - i = 0; - for (auto it = qr.begin(), end = qr.end(); it != end; ++it, ++i) { - ASSERT_LT(i, data.size()); - auto item = it.GetItem(); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - EXPECT_EQ(item["id"].As(), data[i].first); - EXPECT_EQ(item["value"].As(), data[i].second); - } -} - -TEST_F(ReindexerApi, NewItem_CaseInsensitiveCheck) { - int idVal = 1000; - std::string valueVal = "value"; - - auto err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled()); - - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(default_namespace, {"value", "text", "string", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Commit(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - auto item = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - ASSERT_NO_THROW(item["ID"] = 1000); - ASSERT_NO_THROW(item["VaLuE"] = "value"); - ASSERT_NO_THROW(ASSERT_EQ(item["id"].As(), idVal)); - ASSERT_NO_THROW(ASSERT_EQ(item["value"].As(), valueVal)); -} - -TEST_F(ReindexerApi, Insert) { - Error err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"value", "text", "string", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - err = item.FromJSON(R"_({"id":1234, "value" : "value"})_"); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Insert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Commit(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - QueryResults qr; - err = rt.reindexer->Select(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - // check item consist and check case insensitive access to field by name - Item selItem = qr.begin().GetItem(false); - ASSERT_NO_THROW(ASSERT_EQ(selItem["id"].As(), 1234)); - ASSERT_NO_THROW(ASSERT_EQ(selItem["value"].As(), "value")); -} - -TEST_F(ReindexerApi, ItemJSONWithDouble) { - Error err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - Item item = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - { - const std::string kJSON = R"_({"id":1234,"double":0.0})_"; - err = item.FromJSON(kJSON); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(item.GetJSON(), kJSON); - } - - { - const std::string kJSON = R"_({"id":1234,"double":0.1})_"; - err = item.FromJSON(kJSON); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(item.GetJSON(), kJSON); - } -} - -TEST_F(ReindexerApi, WithTimeoutInterface) { - using std::chrono::milliseconds; - - Error err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"value", "text", "string", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - err = item.FromJSON(R"_({"id":1234, "value" : "value"})_"); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->WithTimeout(milliseconds(1000)).Insert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->WithTimeout(milliseconds(100)).Commit(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - QueryResults qr; - err = rt.reindexer->WithTimeout(milliseconds(1000)).Select(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - // check item consist and check case insensitive access to field by name - Item selItem = qr.begin().GetItem(false); - ASSERT_NO_THROW(ASSERT_EQ(selItem["id"].As(), 1234)); - ASSERT_NO_THROW(ASSERT_EQ(selItem["value"].As(), "value")); - - qr.Clear(); - err = rt.reindexer->WithTimeout(milliseconds(1000)).Delete(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); -} - -template -struct CollateComparer { - bool operator()(const std::string& lhs, const std::string& rhs) const { - return reindexer::collateCompare(lhs, rhs, reindexer::SortingPrioritiesTable()) < 0; - } -}; - -TEST_F(ReindexerApi, SortByMultipleColumns) { - auto err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"column1", "tree", "int", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"column2", "tree", "string", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"column3", "hash", "int", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - const std::vector possibleValues = { - "apple", "arrangment", "agreement", "banana", "bull", "beech", "crocodile", "crucifix", "coat", "day", - "dog", "deer", "easter", "ear", "eager", "fair", "fool", "foot", "genes", "genres", - "greatness", "hockey", "homeless", "homocide", "key", "kit", "knockdown", "motion", "monument", "movement"}; - - int sameOldValue = 0; - int stringValuedIdx = 0; - for (int i = 0; i < 100; ++i) { - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - item["id"] = i; - item["column1"] = sameOldValue; - item["column2"] = possibleValues[stringValuedIdx]; - item["column3"] = rand() % 30; - - err = rt.reindexer->Upsert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - - if (i % 5 == 0) sameOldValue += 5; - if (i % 3 == 0) ++stringValuedIdx; - stringValuedIdx %= possibleValues.size(); - } - - err = rt.reindexer->Commit(default_namespace); - EXPECT_TRUE(err.ok()) << err.what(); - - const size_t offset = 23; - const size_t limit = 61; - - QueryResults qr; - Query query{Query(default_namespace, offset, limit).Sort("column1", true).Sort("column2", false).Sort("column3", false)}; - err = rt.reindexer->Select(query, qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_TRUE(qr.Count() == limit) << qr.Count(); - - PrintQueryResults(default_namespace, qr); - - std::vector lastValues(query.sortingEntries_.size()); - for (auto& it : qr) { - Item item = it.GetItem(false); - - std::vector cmpRes(query.sortingEntries_.size()); - std::fill(cmpRes.begin(), cmpRes.end(), -1); - - for (size_t j = 0; j < query.sortingEntries_.size(); ++j) { - const reindexer::SortingEntry& sortingEntry(query.sortingEntries_[j]); - Variant sortedValue = item[sortingEntry.expression]; - if (!lastValues[j].Type().Is()) { - cmpRes[j] = lastValues[j].Compare(sortedValue); - bool needToVerify = true; - if (j != 0) { - for (int k = j - 1; k >= 0; --k) - if (cmpRes[k] != 0) { - needToVerify = false; - break; - } - } - needToVerify = (j == 0) || needToVerify; - if (needToVerify) { - bool sortOrderSatisfied = - (sortingEntry.desc && cmpRes[j] >= 0) || (!sortingEntry.desc && cmpRes[j] <= 0) || (cmpRes[j] == 0); - EXPECT_TRUE(sortOrderSatisfied) - << "\nSort order is incorrect for column: " << sortingEntry.expression << "; rowID: " << item[1].As(); - } - } - } - } - - // Check sql parser work correctness - QueryResults qrSql; - std::string sqlQuery = ("select * from test_namespace order by column2 asc, column3 desc"); - err = rt.reindexer->Select(sqlQuery, qrSql); - EXPECT_TRUE(err.ok()) << err.what(); -} - -TEST_F(ReindexerApi, SortByMultipleColumnsWithLimits) { - auto err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"f1", "tree", "string", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"f2", "tree", "int", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - const std::vector srcStrValues = { - "A", "A", "B", "B", "B", "C", "C", - }; - const std::vector srcIntValues = {1, 2, 4, 3, 5, 7, 6}; - - for (size_t i = 0; i < srcIntValues.size(); ++i) { - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - item["id"] = static_cast(i); - item["f1"] = srcStrValues[i]; - item["f2"] = srcIntValues[i]; - - err = rt.reindexer->Upsert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - } - - err = rt.reindexer->Commit(default_namespace); - EXPECT_TRUE(err.ok()) << err.what(); - - const size_t offset = 4; - const size_t limit = 3; - - QueryResults qr; - Query query{Query(default_namespace, offset, limit).Sort("f1", false).Sort("f2", false)}; - err = rt.reindexer->Select(query, qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_TRUE(qr.Count() == limit) << qr.Count(); - - const std::vector properRes = {5, 6, 7}; - size_t i = 0; - for (auto& it : qr) { - Item item = it.GetItem(false); - Variant kr = item["f2"]; - EXPECT_TRUE(static_cast(kr) == properRes[i]); - ++i; - } -} - -TEST_F(ReindexerApi, SortByUnorderedIndexes) { - auto err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"valueInt", "hash", "int", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"valueString", "hash", "string", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"valueStringASCII", "hash", "string", IndexOpts().SetCollateMode(CollateASCII)}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"valueStringNumeric", "hash", "string", IndexOpts().SetCollateMode(CollateNumeric)}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"valueStringUTF8", "hash", "string", IndexOpts().SetCollateMode(CollateUTF8)}); - ASSERT_TRUE(err.ok()) << err.what(); - - std::deque allIntValues; - std::set allStrValues; - std::set> allStrValuesASCII; - std::set> allStrValuesNumeric; - std::set> allStrValuesUTF8; - - for (int i = 0; i < 100; ++i) { - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - item["id"] = i; - item["valueInt"] = i; - allIntValues.push_front(i); - - std::string strCollateNone = RandString().c_str(); - allStrValues.insert(strCollateNone); - item["valueString"] = strCollateNone; - - std::string strASCII(strCollateNone + "ASCII"); - allStrValuesASCII.insert(strASCII); - item["valueStringASCII"] = strASCII; - - std::string strNumeric(std::to_string(i + 1)); - allStrValuesNumeric.insert(strNumeric); - item["valueStringNumeric"] = strNumeric; - - allStrValuesUTF8.insert(strCollateNone); - item["valueStringUTF8"] = strCollateNone; - - err = rt.reindexer->Upsert(default_namespace, item); - EXPECT_TRUE(err.ok()) << err.what(); - } - - err = rt.reindexer->Commit(default_namespace); - EXPECT_TRUE(err.ok()) << err.what(); - - bool descending = true; - const unsigned offset = 5; - const unsigned limit = 30; - - QueryResults sortByIntQr; - Query sortByIntQuery{Query(default_namespace, offset, limit).Sort("valueInt", descending)}; - err = rt.reindexer->Select(sortByIntQuery, sortByIntQr); - EXPECT_TRUE(err.ok()) << err.what(); - - std::deque selectedIntValues; - for (auto it : sortByIntQr) { - Item item(it.GetItem(false)); - int value = item["valueInt"].Get(); - selectedIntValues.push_back(value); - } - - EXPECT_TRUE(std::equal(allIntValues.begin() + offset, allIntValues.begin() + offset + limit, selectedIntValues.begin())); - - QueryResults sortByStrQr, sortByASCIIStrQr, sortByNumericStrQr, sortByUTF8StrQr; - Query sortByStrQuery{Query(default_namespace).Sort("valueString", !descending)}; // -V547 - Query sortByASSCIIStrQuery{Query(default_namespace).Sort("valueStringASCII", !descending)}; // -V547 - Query sortByNumericStrQuery{Query(default_namespace).Sort("valueStringNumeric", !descending)}; // -V547 - Query sortByUTF8StrQuery{Query(default_namespace).Sort("valueStringUTF8", !descending)}; // -V547 - - err = rt.reindexer->Select(sortByStrQuery, sortByStrQr); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Select(sortByASSCIIStrQuery, sortByASCIIStrQr); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Select(sortByNumericStrQuery, sortByNumericStrQr); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Select(sortByUTF8StrQuery, sortByUTF8StrQr); - EXPECT_TRUE(err.ok()) << err.what(); - - auto collectQrStringFieldValues = [](const QueryResults& qr, const char* fieldName, std::vector& selectedStrValues) { - selectedStrValues.clear(); - for (auto it : qr) { - Item item(it.GetItem(false)); - selectedStrValues.push_back(item[fieldName].As()); - } - }; - - std::vector selectedStrValues; - { - auto itSortedStr(allStrValues.begin()); - collectQrStringFieldValues(sortByStrQr, "valueString", selectedStrValues); - for (auto it = selectedStrValues.begin(); it != selectedStrValues.end(); ++it) { - EXPECT_EQ(*it, *itSortedStr++); - } - } - - { - auto itSortedStr = allStrValuesASCII.begin(); - collectQrStringFieldValues(sortByASCIIStrQr, "valueStringASCII", selectedStrValues); - for (auto it = selectedStrValues.begin(); it != selectedStrValues.end(); ++it) { - EXPECT_EQ(*it, *itSortedStr++); - } - } - - auto itSortedNumericStr = allStrValuesNumeric.cbegin(); - collectQrStringFieldValues(sortByNumericStrQr, "valueStringNumeric", selectedStrValues); - for (auto it = selectedStrValues.begin(); it != selectedStrValues.end(); ++it) { - EXPECT_EQ(*it, *itSortedNumericStr++); - } - - { - auto itSortedStr = allStrValuesUTF8.cbegin(); - collectQrStringFieldValues(sortByUTF8StrQr, "valueStringUTF8", selectedStrValues); - for (auto it = selectedStrValues.begin(); it != selectedStrValues.end(); ++it) { - EXPECT_EQ(*it, *itSortedStr++); - } - } -} - -TEST_F(ReindexerApi, SortByUnorderedIndexWithJoins) { - constexpr std::string_view secondNamespace = "test_namespace_2"; - std::vector secondNamespacePKs; - - auto err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"fk", "hash", "int", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - { - err = rt.reindexer->OpenNamespace(secondNamespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(secondNamespace, {"pk", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - for (int i = 0; i < 50; ++i) { - Item item(rt.reindexer->NewItem(secondNamespace)); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - secondNamespacePKs.push_back(i); - item["pk"] = i; - - err = rt.reindexer->Upsert(secondNamespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - } - - err = rt.reindexer->Commit(secondNamespace); - EXPECT_TRUE(err.ok()) << err.what(); - } - - for (int i = 0; i < 100; ++i) { - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - item["id"] = i; - - int fk = secondNamespacePKs[rand() % (secondNamespacePKs.size() - 1)]; - item["fk"] = fk; - - err = rt.reindexer->Upsert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - } - - err = rt.reindexer->Commit(default_namespace); - EXPECT_TRUE(err.ok()) << err.what(); - - bool descending = true; - const unsigned offset = 10; - const unsigned limit = 40; - - Query querySecondNamespace = Query(secondNamespace); - Query joinQuery{Query(default_namespace, offset, limit).Sort("id", descending)}; - joinQuery.InnerJoin("fk", "pk", CondEq, std::move(querySecondNamespace)); - - QueryResults queryResult; - err = rt.reindexer->Select(joinQuery, queryResult); - EXPECT_TRUE(err.ok()) << err.what(); - - for (auto it : queryResult) { - auto itemIt = it.GetJoined(); - EXPECT_TRUE(itemIt.getJoinedItemsCount() > 0); - } -} - -static void TestDSLParseCorrectness(const std::string& testDsl) { - Query query; - Error err = query.FromJSON(testDsl); - EXPECT_TRUE(err.ok()) << err.what(); -} - -TEST_F(ReindexerApi, DslFieldsTest) { - TestDSLParseCorrectness(R"xxx({ - "namespace": "test_ns" - "filters": [ - { - "op": "AND", - "join_query": { - "type": "inner", - "namespace": "test1", - "filters": [ - { - "Op": "OR", - "Field": "id", - "Cond": "EMPTY" - } - ], - "sort": { - "field": "test1", - "desc": true - }, - "limit": 3, - "offset": 0, - "on": [ - { - "left_field": "joined", - "right_field": "joined", - "cond": "lt", - "op": "OR" - }, - { - "left_field": "joined2", - "right_field": "joined2", - "cond": "gt", - "op": "AND" - } - ] - } - }, - { - "op": "OR", - "join_query": { - "type": "left", - "namespace": "test2", - "filters": [ - { - "filters": [ - { - "Op": "And", - "Filters": [ - { - "Op": "Not", - "Field": "id2", - "Cond": "SET", - "Value": [ - 81204872, - 101326571, - 101326882 - ] - }, - { - "Op": "Or", - "Field": "id2", - "Cond": "SET", - "Value": [ - 81204872, - 101326571, - 101326882 - ] - }, - { - "Op": "And", - "filters": [ - { - "Op": "Not", - "Field": "id2", - "Cond": "SET", - "Value": [ - 81204872, - 101326571, - 101326882 - ] - }, - { - "Op": "Or", - "Field": "id2", - "Cond": "SET", - "Value": [ - 81204872, - 101326571, - 101326882 - ] - } - ] - } - ] - }, - { - "Op": "Not", - "Field": "id2", - "Cond": "SET", - "Value": [ - 81204872, - 101326571, - 101326882 - ] - } - ] - } - ], - "sort": { - "field": "test2", - "desc": true - }, - "limit": 4, - "offset": 5, - "on": [ - { - "left_field": "joined1", - "right_field": "joined1", - "cond": "le", - "op": "AND" - }, - { - "left_field": "joined2", - "right_field": "joined2", - "cond": "ge", - "op": "OR" - } - ] - } - } - - ] - })xxx"); - - TestDSLParseCorrectness(R"xxx({ - "namespace": "test_ns", - "merge_queries": [{ - "namespace": "services", - "offset": 0, - "limit": 3, - "sort": { - "field": "", - "desc": true - }, - "filters": [{ - "Op": "or", - "Field": "id", - "Cond": "SET", - "Value": [81204872, 101326571, 101326882] - }] - }, - { - "namespace": "services", - "offset": 1, - "limit": 5, - "sort": { - "field": "field1", - "desc": false - }, - "filters": [{ - "Op": "not", - "Field": "id", - "Cond": "ge", - "Value": 81204872 - }] - } - ] - })xxx"); - - TestDSLParseCorrectness(R"xxx({"namespace": "test1","select_filter": ["f1", "f2", "f3", "f4", "f5"]})xxx"); - TestDSLParseCorrectness(R"xxx({"namespace": "test1","select_functions": ["f1()", "f2()", "f3()", "f4()", "f5()"]})xxx"); - TestDSLParseCorrectness(R"xxx({"namespace": "test1","req_total":"cached"})xxx"); - TestDSLParseCorrectness(R"xxx({"namespace": "test1","req_total":"enabled"})xxx"); - TestDSLParseCorrectness(R"xxx({"namespace": "test1","req_total":"disabled"})xxx"); - TestDSLParseCorrectness( - R"xxx({"namespace": "test1","aggregations":[{"fields":["field1"], "type":"sum"}, {"fields":["field2"], "type":"avg"}]})xxx"); - TestDSLParseCorrectness(R"xxx({"namespace": "test1", "strict_mode":"indexes"})xxx"); -} - -TEST_F(ReindexerApi, DistinctQueriesEncodingTest) { - constexpr std::string_view sql = "select distinct(country), distinct(city) from clients;"; - - Query q1 = Query::FromSQL(sql); - EXPECT_EQ(q1.Entries().Size(), 0); - ASSERT_EQ(q1.aggregations_.size(), 2); - EXPECT_EQ(q1.aggregations_[0].Type(), AggDistinct); - ASSERT_EQ(q1.aggregations_[0].Fields().size(), 1); - EXPECT_EQ(q1.aggregations_[0].Fields()[0], "country"); - EXPECT_EQ(q1.aggregations_[1].Type(), AggDistinct); - ASSERT_EQ(q1.aggregations_[1].Fields().size(), 1); - EXPECT_EQ(q1.aggregations_[1].Fields()[0], "city"); - - std::string dsl = q1.GetJSON(); - Query q2; - const auto err = q2.FromJSON(dsl); - ASSERT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(q1, q2) << "q1: " << q1.GetSQL() << "\nq2: " << q2.GetSQL(); - - Query q3{Query(default_namespace).Distinct("name").Distinct("city").Where("id", CondGt, static_cast(10))}; - std::string sql2 = q3.GetSQL(); - - Query q4 = Query::FromSQL(sql2); - EXPECT_EQ(q3, q4) << "q3: " << q3.GetSQL() << "\nq4: " << q4.GetSQL(); - EXPECT_EQ(sql2, q4.GetSQL()); -} - -TEST_F(ReindexerApi, ContextCancelingTest) { - Error err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"value", "text", "string", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - err = item.FromJSON(R"_({"id":1234, "value" : "value"})_"); - ASSERT_TRUE(err.ok()) << err.what(); - - // Canceled insert - CanceledRdxContext canceledCtx; - err = rt.reindexer->WithContext(&canceledCtx).Insert(default_namespace, item); - ASSERT_TRUE(err.code() == errCanceled); - - err = rt.reindexer->Commit(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - // Canceled delete - std::vector namespaces; - err = rt.reindexer->WithContext(&canceledCtx).EnumNamespaces(namespaces, reindexer::EnumNamespacesOpts()); - ASSERT_TRUE(err.code() == errCanceled); - - // Canceled select - QueryResults qr; - err = rt.reindexer->WithContext(&canceledCtx).Select(Query(default_namespace), qr); - ASSERT_TRUE(err.code() == errCanceled); - qr.Clear(); - - std::string sqlQuery = ("select * from test_namespace"); - err = rt.reindexer->WithContext(&canceledCtx).Select(sqlQuery, qr); - ASSERT_TRUE(err.code() == errCanceled); - qr.Clear(); - - DummyRdxContext dummyCtx; - err = rt.reindexer->WithContext(&dummyCtx).Select(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 0); - qr.Clear(); - - FakeRdxContext fakeCtx; - err = rt.reindexer->WithContext(&fakeCtx).Insert(default_namespace, item); - EXPECT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->WithContext(&fakeCtx).Select(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - qr.Clear(); - - // Canceled upsert - item["value"] = "value1"; - err = rt.reindexer->WithContext(&canceledCtx).Upsert(default_namespace, item); - ASSERT_TRUE(err.code() == errCanceled); - err = rt.reindexer->Select(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - Item selItem = qr.begin().GetItem(false); - ASSERT_NO_THROW(ASSERT_EQ(selItem["id"].As(), 1234)); - ASSERT_NO_THROW(ASSERT_EQ(selItem["value"].As(), "value")); - qr.Clear(); - - // Canceled update - err = rt.reindexer->WithContext(&canceledCtx).Update(default_namespace, item); - ASSERT_TRUE(err.code() == errCanceled); - err = rt.reindexer->Select(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - selItem = qr.begin().GetItem(false); - ASSERT_NO_THROW(ASSERT_EQ(selItem["id"].As(), 1234)); - ASSERT_NO_THROW(ASSERT_EQ(selItem["value"].As(), "value")); - qr.Clear(); - - // Canceled delete - err = rt.reindexer->WithContext(&canceledCtx).Delete(default_namespace, item); - ASSERT_TRUE(err.code() == errCanceled); - err = rt.reindexer->Select(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - qr.Clear(); - - err = rt.reindexer->WithContext(&canceledCtx).Delete(Query(default_namespace), qr); - ASSERT_TRUE(err.code() == errCanceled); - qr.Clear(); - err = rt.reindexer->Select(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - qr.Clear(); - - err = rt.reindexer->WithContext(&fakeCtx).Delete(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->Select(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 0); -} - -TEST_F(ReindexerApi, JoinConditionsSqlParserTest) { - constexpr std::string_view sql1 = - "SELECT * FROM ns WHERE a > 0 AND INNER JOIN (SELECT * FROM ns2 WHERE b > 10 AND c = 1) ON ns2.id = ns.fk_id"; - const auto q1 = Query::FromSQL(sql1); - ASSERT_EQ(q1.GetSQL(), sql1); - - constexpr std::string_view sql2 = - "SELECT * FROM ns WHERE a > 0 AND INNER JOIN (SELECT * FROM ns2 WHERE b > 10 AND c = 1 LIMIT 0) ON ns2.id = ns.fk_id"; - const auto q2 = Query::FromSQL(sql2); - ASSERT_EQ(q2.GetSQL(), sql2); -} - -TEST_F(ReindexerApi, UpdateWithBoolParserTest) { - constexpr std::string_view sql = "UPDATE ns SET flag1 = true,flag2 = false WHERE id > 100"; - Query query = Query::FromSQL(sql); - ASSERT_EQ(query.UpdateFields().size(), 2); - EXPECT_EQ(query.UpdateFields().front().Column(), "flag1"); - EXPECT_EQ(query.UpdateFields().front().Mode(), FieldModeSet); - ASSERT_EQ(query.UpdateFields().front().Values().size(), 1); - EXPECT_TRUE(query.UpdateFields().front().Values().front().Type().Is()); - EXPECT_TRUE(query.UpdateFields().front().Values().front().As()); - EXPECT_EQ(query.UpdateFields().back().Column(), "flag2"); - EXPECT_EQ(query.UpdateFields().back().Mode(), FieldModeSet); - ASSERT_EQ(query.UpdateFields().back().Values().size(), 1); - EXPECT_TRUE(query.UpdateFields().back().Values().front().Type().Is()); - EXPECT_FALSE(query.UpdateFields().back().Values().front().As()); - EXPECT_EQ(query.GetSQL(), sql) << query.GetSQL(); -} - -TEST_F(ReindexerApi, EqualPositionsSqlParserTest) { - constexpr std::string_view sql = - "SELECT * FROM ns WHERE (f1 = 1 AND f2 = 2 OR f3 = 3 equal_position(f1, f2) equal_position(f1, f3)) OR (f4 = 4 AND f5 > 5 " - "equal_position(f4, f5))"; - - Query query = Query::FromSQL(sql); - EXPECT_EQ(query.GetSQL(), sql); - EXPECT_TRUE(query.Entries().equalPositions.empty()); - ASSERT_EQ(query.Entries().Size(), 7); - - ASSERT_TRUE(query.Entries().IsSubTree(0)); - const auto& ep1 = query.Entries().Get(0).equalPositions; - ASSERT_EQ(ep1.size(), 2); - ASSERT_EQ(ep1[0].size(), 2); - EXPECT_EQ(ep1[0][0], "f1"); - EXPECT_EQ(ep1[0][1], "f2"); - ASSERT_EQ(ep1[1].size(), 2); - EXPECT_EQ(ep1[1][0], "f1"); - EXPECT_EQ(ep1[1][1], "f3"); - - ASSERT_TRUE(query.Entries().IsSubTree(4)); - const auto& ep2 = query.Entries().Get(4).equalPositions; - ASSERT_EQ(ep2.size(), 1); - ASSERT_EQ(ep2[0].size(), 2); - EXPECT_EQ(ep2[0][0], "f4"); - EXPECT_EQ(ep2[0][1], "f5"); -} - -TEST_F(ReindexerApi, SchemaSuggestions) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->OpenNamespace("second_ns"); - ASSERT_TRUE(err.ok()) << err.what(); - - // clang-format off - constexpr std::string_view jsonschema = R"xxx( - { - "required": [ - "Countries", - "Nest_fake", - "nested", - "second_field" - ], - "properties": { - "Countries": { - "items": { - "type": "string" - }, - "type": "array" - }, - "Nest_fake": { - "type": "number" - }, - "nested": { - "required": [ - "Name", - "Naame", - "Age" - ], - "properties": { - "Name": { - "type": "string" - }, - "Naame": { - "type": "string" - }, - "Age": { - "type": "integer" - } - }, - "additionalProperties": false, - "type": "object" - } - "second_field": { - "type": "number" - }, - }, - "additionalProperties": false, - "type": "object" - })xxx"; - // clang-format on - - // clang-format off - constexpr std::string_view jsonschema2 = R"xxx( - { - "required": [ - "id", - "Field", - ], - "properties": { - "id": { - "type": "number" - }, - "Field": { - "type": "number" - } - }, - "additionalProperties": false, - "type": "object" - })xxx"; - // clang-format on - err = rt.reindexer->SetSchema(default_namespace, jsonschema); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->SetSchema("second_ns", jsonschema2); - ASSERT_TRUE(err.ok()) << err.what(); - - auto validateSuggestions = [this](std::string_view sql, const std::unordered_set& expected, size_t position) { - std::vector suggestions; - auto err = rt.reindexer->GetSqlSuggestions(sql, position, suggestions); - ASSERT_TRUE(err.ok()) << err.what(); - for (auto& sugg : suggestions) { - EXPECT_TRUE(expected.find(sugg) != expected.end()) << sql << '\n' - << std::string(position, ' ') << "^\nUnexpected suggestion: " << sugg; - } - for (auto& expSugg : expected) { - EXPECT_TRUE(std::find(suggestions.begin(), suggestions.end(), expSugg) != suggestions.end()) - << sql << '\n' - << std::string(position, ' ') << "^\nExpected but not found suggestion: " << expSugg; - } - }; - - struct { - std::string_view sql; - std::unordered_set expected; - size_t position = sql.empty() ? 0 : sql.size() - 1; - } testData[]{ - {"select * from test_namespace where ne", {"Nest_fake", "nested"}}, - {"select * from test_namespace where nested", {}}, - {"select * from test_namespace where nested.", {".Name", ".Naame", ".Age"}}, - {"select * from test_namespace where nested.Na", {".Name", ".Naame"}}, - - {"", {"explain", "local", "select", "delete", "update", "truncate"}}, - {"s", {"select"}}, - {"select", {}}, - {"select ", {"*", "avg", "min", "max", "facet", "sum", "distinct", "rank", "count", "count_cached"}}, - {"select *,", {}}, - {"select *, ", {"*", "avg", "min", "max", "facet", "sum", "distinct", "rank", "count", "count_cached"}}, - {"select *, f", {"facet", "Field"}}, - {"select f", {"facet", "Field"}}, - {"select * ", {"from"}}, - {"select * f", {"from"}}, - {"select * from ", - {"test_namespace", "second_ns", "#memstats", "#activitystats", "#config", "#queriesperfstats", "#namespaces", "#perfstats", - "#clientsstats", "#replicationstats"}}, - {"select * from te", {"test_namespace"}}, - {"select * from test_namespace ", - {"where", ";", "equal_position", "inner", "join", "left", "limit", "merge", "offset", "or", "order"}}, - {"select * from test_namespace w", {"where"}}, - {"select * from test_namespace where ", - {"second_field", "ST_DWithin", "Countries", "nested", "Nest_fake", "inner", "join", "left", "not", "equal_position"}}, - {"select * from test_namespace where s", {"second_field", "ST_DWithin"}}, - {"select * from second_ns where i", {"id", "inner"}}, - {"select * from test_namespace where (", {}}, - {"select * from test_namespace where (s", {"second_field", "ST_DWithin", "select"}}, - {"select * from test_namespace where (select m", {"max", "min"}}, - {"select * from test_namespace where (select i", {"id", "items_count", "ip"}}, - {"select * from test_namespace where (select second_field f", {"from"}}, - {"select * from test_namespace where (select id from s", {"second_ns"}}, - {"select * from test_namespace where (select Field from second_ns where ", {"id", "ST_DWithin", "Field", "not", "equal_position"}}, - {"select * from test_namespace where C", {"Countries"}}, - {"select * from test_namespace where Countries == (", {}}, - {"select * from test_namespace where Countries == (s", {"select"}}, - {"select * from test_namespace where Countries == (select m", {"max", "min"}}, - {"select * from test_namespace where Countries == (select i", {"id", "ip", "items_count"}}, - {"select * from test_namespace where Countries == (select second_field f", {"from"}}, - {"select * from test_namespace where Countries == (select second_field from ", - {"test_namespace", "second_ns", "#memstats", "#activitystats", "#config", "#queriesperfstats", "#namespaces", "#perfstats", - "#clientsstats", "#replicationstats"}}, - {"select * from test_namespace where Countries == (select second_field from s", {"second_ns"}}, - {"select * from test_namespace where i", {"inner"}}, - {"select * from test_namespace where inner j", {"join"}}, - {"select * from test_namespace where inner join s", {"second_ns"}}, - {"select * from test_namespace where inner join (s", {"select"}}, - {"select * from test_namespace where inner join (select m", {"min", "max"}}, - {"select * from test_namespace where inner join (select i", {"id", "ip", "items_count"}}, - {"select * from test_namespace where inner join (select second_field f", {"from"}}, - {"select * from test_namespace where inner join (select second_field from s", {"second_ns"}}, - {"SELECT * FROM ns WHERE id = ( ", {"null", "empty", "not", "select"}}, - }; - - for (const auto& [sql, expected, position] : testData) { - if (sql.empty() || sql.back() == ' ') { - validateSuggestions(sql, expected, position); - } else { - for (const auto& td : testData) { - if (reindexer::checkIfStartsWith(sql, td.sql)) { - validateSuggestions(td.sql, expected, position); - } - } - } - } -} - -TEST_F(ReindexerApi, LoggerWriteInterruptTest) { - struct Logger { - Logger() { - spdlog::drop_all(); - spdlog::set_async_mode(16384, spdlog::async_overflow_policy::discard_log_msg, nullptr, std::chrono::seconds(2)); - spdlog::set_level(spdlog::level::trace); - spdlog::set_pattern("[%L%d/%m %T.%e %t] %v"); - - std::remove(logFile.c_str()); - sinkPtr = std::make_shared(logFile); - spdlog::create("log", sinkPtr); - logger = reindexer_server::LoggerWrapper("log"); - } - ~Logger() { - spdlog::drop_all(); - std::remove(logFile.c_str()); - } - const std::string logFile = "/tmp/reindex_logtest.out"; - reindexer_server::LoggerWrapper logger; - std::shared_ptr sinkPtr; - } instance; - - reindexer::logInstallWriter( - [&](int level, char* buf) { - if (level <= LogTrace) { - instance.logger.trace(buf); - } - }, - reindexer::LoggerPolicy::WithLocks); - auto writeThread = std::thread([]() { - for (size_t i = 0; i < 10000; ++i) { - reindexer::logPrintf(LogTrace, "Detailed and amazing description of this error: [%d]!", i); - } - }); - auto reopenThread = std::thread([&instance]() { - for (size_t i = 0; i < 1000; ++i) { - instance.sinkPtr->reopen(); - reindexer::logPrintf(LogTrace, "REOPENED [%d]", i); - std::this_thread::sleep_for(std::chrono::milliseconds(3)); - } - }); - writeThread.join(); - reopenThread.join(); - reindexer::logPrintf(LogTrace, "FINISHED\n"); - reindexer::logInstallWriter(nullptr, reindexer::LoggerPolicy::WithLocks); -} - -TEST_F(ReindexerApi, IntToStringIndexUpdate) { - const std::string kFieldId = "id"; - const std::string kFieldNumeric = "numeric"; - - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {kFieldId, "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {kFieldNumeric, "tree", "int", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - for (int i = 0; i < 100; ++i) { - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - item[kFieldId] = i; - item[kFieldNumeric] = i * 2; - - err = rt.reindexer->Upsert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - } - - err = rt.reindexer->UpdateIndex(default_namespace, {kFieldNumeric, "tree", "string", IndexOpts()}); - EXPECT_FALSE(err.ok()); - EXPECT_TRUE(err.what() == "Cannot convert key from type int to string") << err.what(); - - QueryResults qr; - err = rt.reindexer->Select(Query(default_namespace), qr); - EXPECT_TRUE(err.ok()) << err.what(); - - for (auto it : qr) { - Item item = it.GetItem(false); - Variant v = item[kFieldNumeric]; - EXPECT_TRUE(v.Type().Is()) << v.Type().Name(); - } -} - -TEST_F(ReindexerApi, SelectFilterWithAggregationConstraints) { - Query q; - - std::string sql = "select id, distinct(year) from test_namespace"; - EXPECT_NO_THROW(q = Query::FromSQL(sql)); - Error status = Query().FromJSON(q.GetJSON()); - EXPECT_TRUE(status.ok()) << status.what(); - - q = Query().Select({"id"}); - EXPECT_NO_THROW(q.Aggregate(AggDistinct, {"year"}, {})); - - sql = "select id, max(year) from test_namespace"; - EXPECT_THROW(q = Query::FromSQL(sql), Error); - q = Query(default_namespace).Select({"id"}); - q.aggregations_.emplace_back(reindexer::AggregateEntry{AggMax, {"year"}}); - status = Query().FromJSON(q.GetJSON()); - EXPECT_FALSE(status.ok()); - EXPECT_TRUE(status.what() == std::string(reindexer::kAggregationWithSelectFieldsMsgError)); - EXPECT_THROW(q.Aggregate(AggMax, {"price"}, {}), Error); - - sql = "select facet(year), id, name from test_namespace"; - EXPECT_THROW(q = Query::FromSQL(sql), Error); - q = Query(default_namespace).Select({"id", "name"}); - EXPECT_THROW(q.Aggregate(AggFacet, {"year"}, {}), Error); - status = Query().FromJSON(fmt::sprintf(R"({"namespace":"%s", - "limit":-1, - "offset":0, - "req_total":"disabled", - "explain":false, - "type":"select", - "select_with_rank":false, - "select_filter":[ - "id", - "name" - ], - "select_functions":[], - "sort":[], - "filters":[], - "merge_queries":[], - "aggregations":[ - { - "type":"facet", - "sort":[], - "fields":["year"] - } - ]})", - default_namespace)); - EXPECT_FALSE(status.ok()); - EXPECT_TRUE(status.what() == std::string(reindexer::kAggregationWithSelectFieldsMsgError)); - - EXPECT_THROW((void)Query::FromSQL("select max(id), * from test_namespace"), Error); - EXPECT_THROW((void)Query::FromSQL("select *, max(id) from test_namespace"), Error); - EXPECT_NO_THROW((void)Query::FromSQL("select *, count(*) from test_namespace")); - EXPECT_NO_THROW((void)Query::FromSQL("select count(*), * from test_namespace")); -} - -TEST_F(ReindexerApi, InsertIncorrectItemWithJsonPathsDuplication) { - Error err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - Item oldTagsItemCJSON = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(oldTagsItemCJSON.Status().ok()) << oldTagsItemCJSON.Status().what(); - Item oldTagsItemJSON = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(oldTagsItemJSON.Status().ok()) << oldTagsItemJSON.Status().what(); - - err = rt.reindexer->AddIndex(default_namespace, {"value", reindexer::JsonPaths{"value1"}, "hash", "string", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - { - // Check item unmarshaled from json - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - err = item.FromJSON(R"_({"id":0,"value1":"v","obj":{"id":11},"value1":"p"})_"); - EXPECT_EQ(err.code(), errLogic) << err.what(); - } - { - // Check item unmarshaled from cjson (with correct tags) - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - constexpr char cjson[] = {0x06, 0x08, 0x00, 0x12, 0x01, 0x70, 0x12, 0x01, 0x70, 0x07}; - err = item.FromCJSON(std::string_view(cjson, sizeof(cjson))); - ASSERT_EQ(err.code(), errLogic); - } - { - // Check item unmarshaled from msgpack - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - constexpr uint8_t msgpack[] = {0xDF, 0x00, 0x00, 0x00, 0x04, 0xA2, 0x69, 0x64, 0x00, 0xA6, 0x76, 0x61, 0x6C, 0x75, - 0x65, 0x31, 0xA1, 0x76, 0xA3, 0x6F, 0x62, 0x6A, 0xDF, 0x00, 0x00, 0x00, 0x01, 0xA2, - 0x69, 0x64, 0x0B, 0xA6, 0x76, 0x61, 0x6C, 0x75, 0x65, 0x31, 0xA1, 0x70}; - size_t offset = 0; - err = item.FromMsgPack(std::string_view(reinterpret_cast(msgpack), sizeof(msgpack)), offset); - EXPECT_EQ(err.code(), errLogic) << err.what(); - } - { - // Check item unmarshaled from msgpack (different encoding) - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - constexpr uint8_t msgpack[] = {0x84, 0xA2, 0x69, 0x64, 0x00, 0xA6, 0x76, 0x61, 0x6C, 0x75, 0x65, 0x31, 0xA1, 0x70, 0xA3, 0x6F, - 0x62, 0x6A, 0x81, 0xA2, 0x69, 0x64, 0x0B, 0xA6, 0x76, 0x61, 0x6C, 0x75, 0x65, 0x31, 0xA1, 0x70}; - size_t offset = 0; - err = item.FromMsgPack(std::string_view(reinterpret_cast(msgpack), sizeof(msgpack)), offset); - EXPECT_EQ(err.code(), errLogic) << err.what(); - } - - { - // Check item unmarshaled from cjson (with outdated tags) - constexpr char cjson[] = {0x07, 0x13, 0x00, 0x00, 0x00, 0x06, 0x08, 0x00, 0x12, 0x01, 0x76, 0x1E, 0x08, 0x16, 0x07, 0x12, 0x01, - 0x70, 0x07, 0x03, 0x02, 0x69, 0x64, 0x06, 0x76, 0x61, 0x6C, 0x75, 0x65, 0x31, 0x03, 0x6F, 0x62, 0x6A}; - err = oldTagsItemCJSON.FromCJSON(std::string_view(cjson, sizeof(cjson))); - EXPECT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->Insert(default_namespace, oldTagsItemCJSON); - ASSERT_EQ(err.code(), errLogic); - - QueryResults qr; - err = rt.reindexer->Select(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 0); - } - { - // Check item unmarshaled from json with outdated tags - err = oldTagsItemJSON.FromJSON(R"_({"id":0,"value1":"v","obj":{"id":11},"value1":"p"})_"); - EXPECT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->Insert(default_namespace, oldTagsItemJSON); - ASSERT_EQ(err.code(), errLogic); - - QueryResults qr; - err = rt.reindexer->Select(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 0); - } -} - -TEST_F(ReindexerApi, UpdateDoublesItemByPKIndex) { - rt.SetVerbose(true); - - Error err = rt.reindexer->OpenNamespace(default_namespace); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "tree", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(default_namespace, {"v1", "tree", "int", IndexOpts().Sparse()}); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(default_namespace, {"v2", "tree", "string", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - struct ItemData { - ItemData() = default; - ItemData(unsigned int _id, unsigned int _v1, const std::string& _v2) : id(_id), v1(_v1), v2(_v2) {} - unsigned int id = 0; - unsigned int v1 = 0; - std::string v2; - }; - constexpr size_t kItemsCount = 4; - std::vector data; - std::string checkUuid; - for (unsigned i = 0; i < kItemsCount; i++) { - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(!!item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - data.emplace_back(ItemData{i, i + 100, RandString()}); - item["id"] = int(data.back().id); - item["v1"] = int(data.back().v1); - item["v2"] = data.back().v2; - err = rt.reindexer->Insert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - } - - { - reindexer::QueryResults qr; - constexpr std::string_view sql = "UPDATE test_namespace SET v1=125, id = 3 WHERE id = 2"; - Query query = Query::FromSQL(sql); - err = rt.reindexer->Update(query, qr); - ASSERT_EQ(err.code(), errLogic); - EXPECT_EQ(err.what(), "Duplicate Primary Key {id: {3}} for rows [2, 3]!"); - } - - { - reindexer::QueryResults qr; - err = rt.reindexer->Select(Query(default_namespace).Sort("id", false), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), kItemsCount); - - unsigned int i = 0; - for (auto it : qr) { - auto item = it.GetItem(); - ASSERT_EQ(item["id"].As(), data[i].id); - ASSERT_EQ(item["v1"].As(), data[i].v1); - ASSERT_EQ(item["v2"].As(), data[i].v2); - i++; - } - } -} -TEST_F(ReindexerApi, IntFieldConvertToStringIndexTest) { - Error err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - static int id = 0; - enum class Order { InsertThenAddIndex, AddIndexThenUpdate }; - - auto testImpl = [this](Order order) { - std::srand(std::time(0)); - int value = std::rand(); - auto indexName = fmt::sprintf("data_%d", id); - auto indexPaths = order == Order::AddIndexThenUpdate ? reindexer::JsonPaths{"n." + indexName} : reindexer::JsonPaths{indexName}; - auto insert = [this](const char* tmplt, auto&&... args) { - Item item(rt.reindexer->NewItem(default_namespace)); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - auto err = item.FromJSON(fmt::sprintf(tmplt, std::forward(args)...)); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->Insert(default_namespace, item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - ASSERT_TRUE(err.ok()) << err.what(); - }; - - auto update = [&] { - QueryResults qr; - auto err = rt.reindexer->Select( - fmt::sprintf("UPDATE %s SET n = {\"%s\":%d} where id = %d", default_namespace, indexName, value, id), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - }; - - auto addIndex = [&] { - auto err = rt.reindexer->AddIndex(default_namespace, {indexName, std::move(indexPaths), "hash", "string", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - }; - - auto checkResult = [&](const std::string& searchIndex, const std::string& searchValue) { - QueryResults qr; - auto err = rt.reindexer->Select(Query(default_namespace).Where(searchIndex, CondEq, searchValue), qr); - ASSERT_TRUE(err.ok()) << err.what(); - - ASSERT_EQ(qr.Count(), 1); - - auto item = qr.begin().GetItem(); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - ASSERT_TRUE(Variant(item[indexName]).Type().Is()) << Variant(item[indexName]).Type().Name(); - ASSERT_EQ(item[indexName].As(), std::to_string(value)); - }; - - switch (order) { - case Order::InsertThenAddIndex: { - insert("{\"id\":%d,\"%s\":%d})", id, indexName, value); - addIndex(); - break; - } - case Order::AddIndexThenUpdate: { - addIndex(); - insert("{\"id\":%d}", id); - update(); - break; - } - } - checkResult("id", std::to_string(id)); - checkResult(indexName, std::to_string(value)); - id++; - }; - - testImpl(Order::InsertThenAddIndex); - testImpl(Order::AddIndexThenUpdate); -} - -TEST_F(ReindexerApi, Meta) { - const std::string kMetaKey = "key"; - const std::string kMetaVal = "value"; - auto& rx = *rt.reindexer; - std::vector meta; - - Error err = rx.OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rx.EnumMeta(default_namespace, meta); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(meta.size(), 0); - - err = rx.PutMeta(default_namespace, kMetaKey, kMetaVal); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rx.EnumMeta(default_namespace, meta); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(meta.size(), 1); - ASSERT_EQ(meta[0], kMetaKey); - - { - std::string data; - err = rx.GetMeta(default_namespace, kMetaKey, data); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(data, kMetaVal); - } - - { - std::vector data; - err = rx.GetMeta(default_namespace, kMetaKey, data); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(data.size(), 1); - ASSERT_EQ(data[0].data, kMetaVal); - ASSERT_EQ(data[0].shardId, ShardingKeyType::NotSetShard); - } -} diff --git a/cpp_src/gtests/tests/API/collate_custom_test.cc b/cpp_src/gtests/tests/API/collate_custom_test.cc deleted file mode 100644 index 6780b066c..000000000 --- a/cpp_src/gtests/tests/API/collate_custom_test.cc +++ /dev/null @@ -1,151 +0,0 @@ -#include "collate_custom_mode_api.h" -#include "core/indexopts.h" - -const std::vector sourceTable = {u8"Вася", u8"Johny", u8"Mary", u8"Иван", u8"Петр", u8"Emmarose", - u8"Gabriela", u8"Антон", u8"1й Петр", u8"2й Петр", u8"3й Петр", u8"Maxwell", - u8"Anthony", u8"1й Павел", u8"Jane", u8"2й Павел", u8"3й Павел"}; - -// clang-format off -const std::vector cyrillicNames = { - u8"Антон", - u8"Вася", - u8"Иван", - u8"Петр", -}; - -const std::vector numericNames = { - u8"1й Павел", - u8"1й Петр", - u8"2й Павел", - u8"2й Петр", - u8"3й Павел", - u8"3й Петр", -}; - -const std::vector ansiNames = { - u8"Anthony", - u8"Emmarose", - u8"Gabriela", - u8"Jane", - u8"Johny", - u8"Mary", - u8"Maxwell", -}; -// clang-format on - -TEST_F(CollateCustomModeAPI, CollateCustomTest1) { - PrepareNs(rt.reindexer, default_namespace, u8"А-Я0-9A-Z", sourceTable); - - QueryResults qr; - SortByName(qr); - PrintQueryResults(default_namespace, qr); - - std::vector sortedTable; - sortedTable.insert(sortedTable.end(), cyrillicNames.begin(), cyrillicNames.end()); - sortedTable.insert(sortedTable.end(), numericNames.begin(), numericNames.end()); - sortedTable.insert(sortedTable.end(), ansiNames.begin(), ansiNames.end()); - - CompareResults(qr, sortedTable); -} - -TEST_F(CollateCustomModeAPI, CollateCustomTest2) { - PrepareNs(rt.reindexer, default_namespace, u8"A-ZА-Я0-9", sourceTable); - - QueryResults qr; - SortByName(qr); - PrintQueryResults(default_namespace, qr); - - std::vector sortedTable; - sortedTable.insert(sortedTable.end(), ansiNames.begin(), ansiNames.end()); - sortedTable.insert(sortedTable.end(), cyrillicNames.begin(), cyrillicNames.end()); - sortedTable.insert(sortedTable.end(), numericNames.begin(), numericNames.end()); - - CompareResults(qr, sortedTable); -} - -TEST_F(CollateCustomModeAPI, CollateCustomTest3) { - PrepareNs(rt.reindexer, default_namespace, u8"0-9A-ZА-Я", sourceTable); - - QueryResults qr; - SortByName(qr); - PrintQueryResults(default_namespace, qr); - - std::vector sortedTable; - sortedTable.insert(sortedTable.end(), numericNames.begin(), numericNames.end()); - sortedTable.insert(sortedTable.end(), ansiNames.begin(), ansiNames.end()); - sortedTable.insert(sortedTable.end(), cyrillicNames.begin(), cyrillicNames.end()); - - CompareResults(qr, sortedTable); -} - -TEST_F(CollateCustomModeAPI, CollateCustomTest4) { - const std::vector sourceData = {u8"вампир", - u8"Johny", - u8"яблоко", - u8"Carrick Michael", - u8"Валенсия", - u8"Петрозаводск", - u8"jumper", - u8"carry on, please", - u8"петля", - u8"1й Петр", - u8"2й Петр", - u8"электричка", - u8"йод", - u8"3й Петр", - u8"Minnesota Timberwolves", - u8"1й Павел", - u8"mindblowing shit!", - u8"Арсенал Лондон", - u8"Houston Rockets", - u8"ананас", - u8"ёжик", - u8"Ёрохин", - u8"2й Павел", - u8"чоткий парень", - u8"3й Павел", - u8"Элтон Джон", - u8"humble person", - u8"Чебоксары", - u8"Я даже и не знаю, почему это все работает"}; - - const std::vector correctlyOrderedData = {u8"Арсенал Лондон", - u8"ананас", - u8"Валенсия", - u8"вампир", - u8"Петрозаводск", - u8"петля", - u8"Чебоксары", - u8"чоткий парень", - u8"Элтон Джон", - u8"Я даже и не знаю, почему это все работает", - u8"Carrick Michael", - u8"carry on, please", - u8"Houston Rockets", - u8"humble person", - u8"Johny", - u8"jumper", - u8"Minnesota Timberwolves", - u8"mindblowing shit!", - u8"1й Павел", - u8"1й Петр", - u8"2й Павел", - u8"2й Петр", - u8"3й Павел", - u8"3й Петр", - u8"Ёрохин", - u8"ёжик", - u8"йод", - u8"электричка", - u8"яблоко"}; - - PrepareNs(rt.reindexer, default_namespace, - u8"АаБбВвГгДдЕеЖжЗзИиКкЛлМмНнОоПпРрСсТтУуФфХхЦцЧчШшЩщЪъЫыЬьЭ-ЯAaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0-9ЁёЙйэ-я", - sourceData); - - QueryResults qr; - SortByName(qr); - PrintQueryResults(default_namespace, qr); - - CompareResults(qr, correctlyOrderedData); -} diff --git a/cpp_src/gtests/tests/fixtures/reindexertestapi.h b/cpp_src/gtests/tests/fixtures/reindexertestapi.h index 5128e64c2..0c100dc6c 100644 --- a/cpp_src/gtests/tests/fixtures/reindexertestapi.h +++ b/cpp_src/gtests/tests/fixtures/reindexertestapi.h @@ -13,6 +13,9 @@ #include "tools/stringstools.h" #include "vendor/utf8cpp/utf8.h" +#include +#include "vendor/spdlog/fmt/fmt.h" + struct IndexDeclaration { std::string_view indexName; std::string_view fieldType; @@ -77,7 +80,9 @@ class ReindexerTestApi { reindexer::Error Commit(std::string_view ns) { return reindexer->Commit(ns); } void Upsert(std::string_view ns, ItemType &item) { assertrx(!!item); + std::cout << fmt::sprintf("Upsert to '%s' begin\n", ns); auto err = reindexer->WithTimeout(kBasicTimeout).Upsert(ns, item); + std::cout << fmt::sprintf("Upsert to '%s' end with result: %s\n", ns, err.ok() ? "OK" : err.what()); ASSERT_TRUE(err.ok()) << err.what(); ASSERT_TRUE(item.Status().ok()) << item.Status().what(); } diff --git a/cpp_src/gtests/tests/unit/btree_idsets_tests.cc b/cpp_src/gtests/tests/unit/btree_idsets_tests.cc deleted file mode 100644 index 039c1b191..000000000 --- a/cpp_src/gtests/tests/unit/btree_idsets_tests.cc +++ /dev/null @@ -1,161 +0,0 @@ -#include "btree_idsets_api.h" -#include "core/index/index.h" -#include "core/index/string_map.h" -#include "core/indexopts.h" -#include "core/nsselecter/btreeindexiterator.h" -#include "core/queryresults/joinresults.h" - -TEST_F(BtreeIdsetsApi, SelectByStringField) { - QueryResults qr; - std::string strValueToCheck = lastStrValue; - Error err = rt.reindexer->Select(Query(default_namespace).Not().Where(kFieldOne, CondEq, strValueToCheck), qr); - EXPECT_TRUE(err.ok()) << err.what(); - for (auto& it : qr) { - Item item = it.GetItem(false); - Variant kr = item[kFieldOne]; - EXPECT_TRUE(kr.Type().Is()); - EXPECT_TRUE(kr.As() != strValueToCheck); - } -} - -TEST_F(BtreeIdsetsApi, SelectByIntField) { - const int boundaryValue = 5000; - - QueryResults qr; - Error err = rt.reindexer->Select(Query(default_namespace).Where(kFieldTwo, CondGe, Variant(static_cast(boundaryValue))), qr); - EXPECT_TRUE(err.ok()) << err.what(); - for (auto& it : qr) { - Item item = it.GetItem(false); - Variant kr = item[kFieldTwo]; - EXPECT_TRUE(kr.Type().Is()); - EXPECT_TRUE(static_cast(kr) >= boundaryValue); - } -} - -TEST_F(BtreeIdsetsApi, SelectByBothFields) { - QueryResults qr; - const int boundaryValue = 50000; - const std::string strValueToCheck = lastStrValue; - const std::string strValueToCheck2 = "reindexer is fast"; - Error err = rt.reindexer->Select(Query(default_namespace) - .Where(kFieldOne, CondLe, strValueToCheck2) - .Not() - .Where(kFieldOne, CondEq, strValueToCheck) - .Where(kFieldTwo, CondGe, Variant(static_cast(boundaryValue))), - qr); - EXPECT_TRUE(err.ok()) << err.what(); - for (auto& it : qr) { - Item item = it.GetItem(false); - Variant krOne = item[kFieldOne]; - EXPECT_TRUE(krOne.Type().Is()); - EXPECT_TRUE(strValueToCheck2.compare(krOne.As()) > 0); - EXPECT_TRUE(krOne.As() != strValueToCheck); - Variant krTwo = item[kFieldTwo]; - EXPECT_TRUE(krTwo.Type().Is()); - EXPECT_TRUE(static_cast(krTwo) >= boundaryValue); - } -} - -TEST_F(BtreeIdsetsApi, SortByStringField) { - QueryResults qr; - Error err = rt.reindexer->Select(Query(default_namespace).Sort(kFieldOne, true), qr); - EXPECT_TRUE(err.ok()) << err.what(); - - Variant prev; - for (auto& it : qr) { - Item item = it.GetItem(false); - Variant curr = item[kFieldOne]; - if (it != qr.begin()) { - EXPECT_TRUE(prev >= curr); - } - prev = curr; - } -} - -TEST_F(BtreeIdsetsApi, SortByIntField) { - QueryResults qr; - Error err = rt.reindexer->Select(Query(default_namespace).Sort(kFieldTwo, false), qr); - EXPECT_TRUE(err.ok()) << err.what(); - - Variant prev; - for (auto& it : qr) { - Item item = it.GetItem(false); - Variant curr = item[kFieldTwo]; - if (it != qr.begin()) { - EXPECT_TRUE(prev.As() <= curr.As()); - } - prev = curr; - } -} - -TEST_F(BtreeIdsetsApi, JoinSimpleNs) { - QueryResults qr; - Query joinedNs{Query(joinedNsName).Where(kFieldThree, CondGt, Variant(static_cast(9000))).Sort(kFieldThree, false)}; - Error err = rt.reindexer->Select( - Query(default_namespace, 0, 3000).InnerJoin(kFieldId, kFieldIdFk, CondEq, std::move(joinedNs)).Sort(kFieldTwo, false), qr); - EXPECT_TRUE(err.ok()) << err.what(); - - Variant prevFieldTwo; - for (auto& it : qr) { - Item item = it.GetItem(false); - Variant currFieldTwo = item[kFieldTwo]; - if (it != qr.begin()) { - EXPECT_TRUE(currFieldTwo.As() >= prevFieldTwo.As()); - } - prevFieldTwo = currFieldTwo; - - Variant prevJoinedFk; - auto itemIt = it.GetJoined(); - reindexer::joins::JoinedFieldIterator joinedFieldIt = itemIt.begin(); - EXPECT_TRUE(joinedFieldIt.ItemsCount() > 0); - for (int j = 0; j < joinedFieldIt.ItemsCount(); ++j) { - reindexer::ItemImpl joinedItem = joinedFieldIt.GetItem(j, qr.GetPayloadType(1), qr.GetTagsMatcher(1)); - Variant joinedFkCurr = joinedItem.GetField(qr.GetPayloadType(1).FieldByName(kFieldIdFk)); - EXPECT_TRUE(joinedFkCurr == item[kFieldId]); - if (j != 0) { - EXPECT_TRUE(joinedFkCurr >= prevJoinedFk); - } - prevJoinedFk = joinedFkCurr; - } - } -} - -TEST_F(ReindexerApi, BtreeUnbuiltIndexIteratorsTest) { - reindexer::number_map m1; - reindexer::number_map m2; - - std::vector ids1, ids2; - for (size_t i = 0; i < 10000; ++i) { - auto it1 = m1.insert({i, reindexer::KeyEntry()}); - for (int i = 0; i < rand() % 100 + 50; ++i) { - it1.first->second.Unsorted().Add(IdType(i), reindexer::IdSet::Unordered, 1); - ids1.push_back(i); - } - auto it2 = m2.insert({i, reindexer::KeyEntry()}); - for (int i = 0; i < rand() % 100 + 50; ++i) { - it2.first->second.Unsorted().Add(IdType(i), reindexer::IdSet::Unordered, 1); - ids2.push_back(i); - } - } - - size_t pos = 0; - - reindexer::BtreeIndexIterator bIt1(m1); - bIt1.Start(false); - while (bIt1.Next()) { - EXPECT_TRUE(bIt1.Value() == ids1[pos]) << "iterator value = " << bIt1.Value() << "; expected value = " << ids1[pos]; - ++pos; - } - EXPECT_TRUE(pos == ids1.size()); - EXPECT_TRUE(!bIt1.Next()); - - reindexer::BtreeIndexIterator bIt2(m2); - bIt2.Start(true); - pos = ids2.size() - 1; - while (bIt2.Next()) { - EXPECT_TRUE(bIt2.Value() == ids2[pos]) << "iterator value = " << bIt2.Value() << "; expected value = " << ids2[pos]; - if (pos) --pos; - } - EXPECT_TRUE(pos == 0); - EXPECT_TRUE(!bIt2.Next()); -} diff --git a/cpp_src/gtests/tests/unit/cascade_replication_test.cc b/cpp_src/gtests/tests/unit/cascade_replication_test.cc deleted file mode 100644 index 802659d25..000000000 --- a/cpp_src/gtests/tests/unit/cascade_replication_test.cc +++ /dev/null @@ -1,1149 +0,0 @@ -#include -#include "cascade_replication_api.h" -#include "core/cjson/jsonbuilder.h" -#include "core/queryresults/queryresults.h" -#include "vendor/gason/gason.h" - -using namespace reindexer; - -TEST_F(CascadeReplicationApi, MasterSlaveSyncByWalAddRow) { - // Check WAL synchronization on a single row - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "MasterSlaveSyncByWalAddRow")); - const std::string kDbPathMaster(kBaseDbPath + "/test_"); - const int port = 9999; - - std::vector clusterConfig = {-1, 0}; - Cluster cluster = CreateConfiguration(clusterConfig, port, 10, kDbPathMaster); - - TestNamespace1 ns1(cluster.Get(0)); - cluster.Get(0)->SetWALSize(100000, ns1.nsName_); - int n1 = 1000; - ns1.AddRows(cluster.Get(0), 0, n1); - - WaitSync(cluster.Get(0), cluster.Get(1), ns1.nsName_); - - const auto replState = cluster.Get(1)->GetState(ns1.nsName_); - ASSERT_EQ(replState.role, ClusterizationStatus::Role::SimpleReplica); - - cluster.ShutdownServer(1); - - const int startId = 10000; - const unsigned int n2 = 2000; - auto master = cluster.Get(0); - auto ThreadAdd = [&master, &ns1]() { - master->SetWALSize(50000, ns1.nsName_); - ns1.AddRows(master, startId, n2); - }; - - std::thread insertThread(ThreadAdd); - - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - - cluster.InitServer(1, port + 1, port + 1000 + 1, kDbPathMaster + std::to_string(1), "db", true); - - insertThread.join(); - - WaitSync(cluster.Get(0), cluster.Get(1), "ns1"); - - std::vector ids0; - ns1.GetData(cluster.Get(0), ids0); - std::vector ids1; - ns1.GetData(cluster.Get(1), ids1); - - EXPECT_TRUE(ids1.size() == (n1 + n2)); - EXPECT_TRUE(ids0 == ids1); -} - -TEST_F(CascadeReplicationApi, MasterSlaveStart) { - // Check WAL/force sync on multiple rows - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "MasterSlaveStart")); - const std::string kDbPathMaster(kBaseDbPath + "/test_"); - const int port = 9999; - - std::vector clusterConfig = {-1, 0}; - auto cluster = CreateConfiguration(clusterConfig, port, 10, kDbPathMaster); - - // Insert 100 rows - std::string nsName("ns1"); - TestNamespace1 ns1(cluster.Get(0), nsName); - - unsigned int n1 = 100; - ns1.AddRows(cluster.Get(0), 0, n1); - cluster.Get(0)->SetWALSize(1000, "ns1"); - - WaitSync(cluster.Get(0), cluster.Get(1), nsName); - // restart Slave - cluster.RestartServer(1, port, kDbPathMaster); - WaitSync(cluster.Get(0), cluster.Get(1), nsName); - - // shutdown slave - cluster.ShutdownServer(1); - // insert another 100 rows (200 total) - ns1.AddRows(cluster.Get(0), n1 + 1, n1); - - // run slave - cluster.InitServer(1, port + 1, port + 1000 + 1, kDbPathMaster + std::to_string(1), "db", true); - WaitSync(cluster.Get(0), cluster.Get(1), nsName); - - std::vector ids0; - ns1.GetData(cluster.Get(0), ids0); - std::vector ids1; - ns1.GetData(cluster.Get(1), ids1); - - EXPECT_TRUE(ids1.size() == (n1 + n1)); - EXPECT_TRUE(ids0 == ids1); -} - -TEST_F(CascadeReplicationApi, InterceptingSeparateSlaveNsLists) { - // Check replication with intercepting separate nodes namespace lists - /* - leader - / | \ - 1 2 3 - (ns1,ns2) (ns1) (*-ns1,ns2,ns3) - */ - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "InterceptingSeparateSlaveNsLists")); - const std::string kDbPathMaster(kBaseDbPath + "/test_"); - const int port = 9999; - const std::string kNs1 = "ns1"; - const std::string kNs2 = "ns2"; - const std::string kNs3 = "ns3"; - const std::vector kFollowerNsList1 = {kNs1, kNs2}; - const std::vector kFollowerNsList2 = {kNs1}; - const std::vector kFollowerNsList3 = {}; - - std::vector clusterConfig = {FollowerConfig{-1}, FollowerConfig{0, kFollowerNsList1}, - FollowerConfig{0, kFollowerNsList2}, FollowerConfig{0}}; - auto cluster = CreateConfiguration(clusterConfig, port, 10, kDbPathMaster, {}); - - // Insert few rows to each namespace - auto leader = cluster.Get(0); - std::vector testNss = {TestNamespace1{leader, kNs1}, TestNamespace1{leader, kNs2}, TestNamespace1{leader, kNs3}}; - const unsigned int n1 = 20; - for (auto& ns : testNss) { - ns.AddRows(leader, 0, n1); - } - - WaitSync(leader, cluster.Get(1), kNs1); - WaitSync(leader, cluster.Get(1), kNs2); - WaitSync(leader, cluster.Get(2), kNs1); - WaitSync(leader, cluster.Get(3), kNs1); - WaitSync(leader, cluster.Get(3), kNs2); - WaitSync(leader, cluster.Get(3), kNs3); - ValidateNsList(cluster.Get(1), clusterConfig[1].nsList.value()); // NOLINT(bugprone-unchecked-optional-access) - ValidateNsList(cluster.Get(2), clusterConfig[2].nsList.value()); // NOLINT(bugprone-unchecked-optional-access) - ValidateNsList(cluster.Get(3), {kNs1, kNs2, kNs3}); - - auto stats = leader->GetReplicationStats(cluster::kAsyncReplStatsType); - WrSerializer wser; - stats.GetJSON(wser); - ASSERT_EQ(stats.nodeStats.size(), 3) << wser.Slice(); - ASSERT_EQ(stats.nodeStats[0].namespaces, kFollowerNsList1) << wser.Slice(); - ASSERT_EQ(stats.nodeStats[1].namespaces, kFollowerNsList2) << wser.Slice(); - ASSERT_EQ(stats.nodeStats[2].namespaces, kFollowerNsList3) << wser.Slice(); -} - -TEST_F(CascadeReplicationApi, NonInterceptingSeparateSlaveNsLists) { - // Check replication with non-intercepting separate nodes namespace lists - /* - leader - / | \ - 1 2 3 - (ns1) (ns2) (*-ns3) - */ - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "NonInterceptingSeparateSlaveNsLists")); - const std::string kDbPathMaster(kBaseDbPath + "/test_"); - const int port = 9999; - const std::string kNs1 = "ns1"; - const std::string kNs2 = "ns2"; - const std::string kNs3 = "ns3"; - - std::vector clusterConfig = {FollowerConfig{-1}, FollowerConfig{0, {{kNs1}}}, FollowerConfig{0, {{kNs2}}}, - FollowerConfig{0}}; - auto cluster = CreateConfiguration(clusterConfig, port, 10, kDbPathMaster, {kNs3}); - - // Insert few rows to each namespace - auto leader = cluster.Get(0); - std::vector testNss = {TestNamespace1{leader, kNs1}, TestNamespace1{leader, kNs2}, TestNamespace1{leader, kNs3}}; - const unsigned int n1 = 20; - for (auto& ns : testNss) { - ns.AddRows(leader, 0, n1); - } - - WaitSync(leader, cluster.Get(1), kNs1); - WaitSync(leader, cluster.Get(2), kNs2); - WaitSync(leader, cluster.Get(3), kNs3); - ValidateNsList(cluster.Get(1), clusterConfig[1].nsList.value()); // NOLINT(bugprone-unchecked-optional-access) - ValidateNsList(cluster.Get(2), clusterConfig[2].nsList.value()); // NOLINT(bugprone-unchecked-optional-access) - ValidateNsList(cluster.Get(3), {kNs3}); -} - -TEST_F(CascadeReplicationApi, MasterSlaveSlave2) { - // Check WAL/force sync on cascade setups - auto SimpleTest = [this](int port, const std::vector& clusterConfig) { - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "MasterSlaveSlave2")); - const std::string kDbPathMaster(kBaseDbPath + "/test_"); - const int serverId = 5; - auto cluster = CreateConfiguration(clusterConfig, port, serverId, kDbPathMaster); - - auto master = cluster.Get(0); - TestNamespace1 ns1(master); - - const int count = 1000; - ns1.AddRows(master, 0, count); - - for (size_t i = 1; i < cluster.Size(); i++) { - WaitSync(master, cluster.Get(i), ns1.nsName_); - } - - std::vector> results; - for (size_t i = 0; i < clusterConfig.size(); i++) { - results.push_back(std::vector()); - ns1.GetData(cluster.Get(i), results.back()); - } - - for (size_t i = 1; i < results.size(); ++i) { - EXPECT_TRUE((results[0] == results[i])); - } - }; - - const int port = 9999; - { - /* - m - | - 1 - | - 2 - */ - std::vector clusterConfig = {-1, 0, 1}; - SimpleTest(port, clusterConfig); - } - { - /* - m - / \ - 1 2 - | | \ - 3 4 5 - */ - - std::vector clusterConfig = {-1, 0, 0, 1, 2, 2}; - SimpleTest(port, clusterConfig); - } -} - -#if !defined(REINDEX_WITH_TSAN) -TEST_F(CascadeReplicationApi, MasterSlaveSlaveReload) { - // Check synchronization continous nodes' restarting - const int port = 9999; - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "MasterSlaveSlaveReload")); - const std::string kDbPathMaster(kBaseDbPath + "/test_"); - const int serverId = 5; - std::atomic_bool stopRestartServerThread(false); - - /* - m - / \ - 1 2 - | | \ - 3 4 5 - */ - const std::vector clusterConfig = {-1, 0, 0, 1, 2, 2}; - auto cluster = CreateConfiguration(clusterConfig, port, serverId, kDbPathMaster); - auto leader = cluster.Get(0); - TestNamespace1 ns1(leader); - const int startId = 1000; - const int n2 = 20000; - - auto AddThread = [&leader, &ns1]() { ns1.AddRows(leader, startId, n2); }; - - auto restartServer = [&cluster, &kDbPathMaster, &stopRestartServerThread]() { - while (!stopRestartServerThread) { - std::this_thread::sleep_for(std::chrono::milliseconds(500)); - int N = rand() % 3; - cluster.RestartServer(N + 1, port, kDbPathMaster); - } - }; - - std::thread insertThread(AddThread); - std::thread restartThread(restartServer); - - insertThread.join(); - stopRestartServerThread = true; - restartThread.join(); - //--------------------------- - - for (size_t i = 1; i < cluster.Size(); ++i) { - TestCout() << "Awaiting sync with " << i << std::endl; - WaitSync(leader, cluster.Get(i), ns1.nsName_); - } - - std::vector> results; - - Query qr = Query(ns1.nsName_).Sort("id", true); - - for (size_t i = 0; i < cluster.Size(); ++i) { - results.push_back(std::vector()); - ns1.GetData(cluster.Get(i), results.back()); - } - - for (size_t i = 1; i < results.size(); ++i) { - EXPECT_TRUE((results[0] == results[i])) << i << "; size[0]: " << results[0].size() << "; size[i]: " << results[i].size(); - } -} -#endif - -TEST_F(CascadeReplicationApi, TransactionTest) { - // Check transactions replication for cascade setup - /* - m - | - 1 - | - 2 - | - 3 - | - 4 - */ - const int port = 9999; - const std::string kDbPathMaster(fs::JoinPath(fs::JoinPath(kBaseTestsetDbPath, "TransactionTest"), "test_")); - const int serverId = 5; - const std::vector clusterConfig = {-1, 0, 1, 2, 3}; - auto cluster = CreateConfiguration(clusterConfig, port, serverId, kDbPathMaster); - const size_t kRows = 100; - - auto master = cluster.Get(0); - TestNamespace1 ns1(master); - - ns1.AddRows(master, 0, kRows); - - for (size_t i = 1; i < cluster.Size(); i++) { - WaitSync(master, cluster.Get(i), ns1.nsName_); - } - - auto tr = master->api.reindexer->NewTransaction(ns1.nsName_); - for (unsigned int i = 0; i < kRows; i++) { - reindexer::client::Item item = tr.NewItem(); - auto err = item.FromJSON("{\"id\":" + std::to_string(i + kRows * 10) + "}"); - tr.Upsert(std::move(item)); - } - BaseApi::QueryResultsType qr; - master->api.reindexer->CommitTransaction(tr, qr); - - for (size_t i = 1; i < cluster.Size(); i++) { - WaitSync(master, cluster.Get(i), ns1.nsName_); - } - - std::vector> results; - for (size_t i = 0; i < cluster.Size(); i++) { - results.push_back(std::vector()); - ns1.GetData(cluster.Get(i), results.back()); - } - - for (size_t i = 1; i < results.size(); ++i) { - EXPECT_TRUE((results[0] == results[i])); - } -} - -TEST_F(CascadeReplicationApi, TransactionCopyPolicyForceSync) { - // Check transactions copy policy after force sync - /* - l - | - 1 - | - 2 - */ - constexpr std::string_view kJsonCfgNss = R"=({ - "namespaces": [ - { - "namespace": "*", - "start_copy_policy_tx_size": 10000, - "copy_policy_multiplier": 5, - "tx_size_to_always_copy": 100000 - }, - { - "namespace": "ns1", - "start_copy_policy_tx_size": 10000, - "copy_policy_multiplier": 5, - "tx_size_to_always_copy": 1 - } - ], - "type": "namespaces" - })="; - constexpr int port = 9999; - const std::string kDbPathMaster(fs::JoinPath(fs::JoinPath(kBaseTestsetDbPath, "TransactionCopyPolicyForceSync"), "test_")); - constexpr int serverId = 5; - constexpr size_t kRows = 100; - const std::string nsName("ns1"); - - auto nodes = CreateConfiguration({-1, 0, 1}, port, serverId, kDbPathMaster); - for (size_t i = 0; i < nodes.Size(); ++i) { - nodes.Get(i)->EnableAllProfilings(); - } - - // Set tx copy policy for the node '2' to 'always copy' - ApplyConfig(nodes.Get(2), kJsonCfgNss); - - nodes.ShutdownServer(2); - - auto leader = nodes.Get(0); - TestNamespace1 ns1(leader, nsName); - WaitSync(leader, nodes.Get(1), nsName); - - // Restart node '2' - nodes.InitServer(2, port + 2, port + 1000 + 2, kDbPathMaster + std::to_string(2), "db", true); - auto follower = nodes.Get(2); - WaitSync(leader, follower, nsName); - - // Check copy tx events in the perfstats before tx - CheckTxCopyEventsCount(follower, 0); - - // Apply tx - ns1.AddRowsTx(leader, 0, kRows); - WaitSync(leader, follower, nsName); - - // Check copy tx events in the perfstats after tx - CheckTxCopyEventsCount(follower, 1); -} - -TEST_F(CascadeReplicationApi, TransactionCopyPolicyWalSync) { - // Check transactions copy policy during the wal sync - /* - m - | - 1 - */ - constexpr std::string_view kJsonCfgNss = R"=({ - "namespaces": [ - { - "namespace": "*", - "start_copy_policy_tx_size": 10000, - "copy_policy_multiplier": 5, - "tx_size_to_always_copy": 100000 - }, - { - "namespace": "ns1", - "start_copy_policy_tx_size": 10000, - "copy_policy_multiplier": 5, - "tx_size_to_always_copy": 1 - } - ], - "type": "namespaces" - })="; - constexpr int port = 9999; - const std::string kDbPathMaster(fs::JoinPath(fs::JoinPath(kBaseTestsetDbPath, "TransactionCopyPolicyWalSync"), "/test_")); - constexpr int serverId = 5; - constexpr size_t kRows = 100; - const std::string nsName("ns1"); - - auto nodes = CreateConfiguration({-1, 0}, port, serverId, kDbPathMaster); - for (size_t i = 0; i < nodes.Size(); ++i) { - nodes.Get(i)->EnableAllProfilings(); - } - - // Set tx copy policy for the node '1' to 'always copy' - ApplyConfig(nodes.Get(1), kJsonCfgNss); - - auto leader = nodes.Get(0); - TestNamespace1 ns1(leader, nsName); - WaitSync(leader, nodes.Get(1), nsName); - - nodes.ShutdownServer(1); - // Apply tx - ns1.AddRowsTx(leader, 0, kRows); - - // Restart node '1' - nodes.InitServer(1, port + 1, port + 1000 + 1, kDbPathMaster + std::to_string(1), "db", true); - WaitSync(leader, nodes.Get(1), nsName); - - // Check copy tx event in the perfstats - CheckTxCopyEventsCount(nodes.Get(1), 1); -} - -TEST_F(CascadeReplicationApi, ForceSync3Node) { - // Check force-sync for cascade setup - /* - m - | - 1 - | - 2 - | - 3 - */ - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "ForceSync3Node")); - ServerControl masterSc; - - masterSc.InitServer(ServerControlConfig(0, 7770, 7880, kBaseDbPath + "/master", "db")); - auto master = masterSc.Get(); - TestNamespace1 testns(master); - testns.AddRows(master, 10, 1000); - master->MakeLeader(); - - ServerControl slave1; - slave1.InitServer(ServerControlConfig(1, 7771, 7881, kBaseDbPath + "/slave1", "db")); - slave1.Get()->MakeFollower(); - master->AddFollower(fmt::format("cproto://127.0.0.1:{}/db", slave1.Get()->RpcPort())); - - ServerControl slave2; - slave2.InitServer(ServerControlConfig(2, 7772, 7882, kBaseDbPath + "/slave2", "db")); - slave2.Get()->MakeFollower(); - slave1.Get()->AddFollower(fmt::format("cproto://127.0.0.1:{}/db", slave2.Get()->RpcPort())); - - ServerControl slave3; - slave3.InitServer(ServerControlConfig(3, 7773, 7883, kBaseDbPath + "/slave3", "db")); - slave3.Get()->MakeFollower(); - slave2.Get()->AddFollower(fmt::format("cproto://127.0.0.1:{}/db", slave3.Get()->RpcPort())); - - WaitSync(master, slave1.Get(), testns.nsName_); - WaitSync(master, slave2.Get(), testns.nsName_); - WaitSync(master, slave3.Get(), testns.nsName_); - - std::vector results_m; - testns.GetData(master, results_m); - - std::vector results_s1; - testns.GetData(slave1.Get(), results_s1); - - std::vector results_s2; - testns.GetData(slave2.Get(), results_s2); - - std::vector results_s3; - testns.GetData(slave3.Get(), results_s3); - - EXPECT_TRUE(results_m == results_s1); - EXPECT_TRUE(results_m == results_s2); - EXPECT_TRUE(results_m == results_s3); -} - -TEST_F(CascadeReplicationApi, NodeWithMasterAndSlaveNs1) { - // Check syncing namespaces filtering and writable namespaces on slave - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "NodeWithMasterAndSlaveNs1")); - ServerControl masterSc; - masterSc.InitServer(ServerControlConfig(0, 7770, 7880, kBaseDbPath + "/master", "db")); - auto master = masterSc.Get(); - master->MakeLeader(); - TestNamespace1 testns1(master, "ns1"); - testns1.AddRows(master, 11, 113); - TestNamespace1 testns2(master, "ns2"); - testns2.AddRows(master, 11, 113); - - const unsigned int c1 = 5011; - const unsigned int c2 = 6013; - const unsigned int n = 121; - ServerControl slaveSc; - slaveSc.InitServer(ServerControlConfig(1, 7771, 7881, kBaseDbPath + "/slave", "db")); - auto slave = slaveSc.Get(); - slave->MakeFollower(); - TestNamespace1 testns3(slave, "ns3"); - testns3.AddRows(slave, c1, n); - master->AddFollower(fmt::format("cproto://127.0.0.1:{}/db", slave->RpcPort())); - testns3.AddRows(slave, c2, n); - - WaitSync(master, slave, testns1.nsName_); - WaitSync(master, slave, testns2.nsName_); - { - std::vector results_m; - testns1.GetData(master, results_m); - - std::vector results_s1; - testns1.GetData(slave, results_s1); - EXPECT_TRUE(results_m == results_s1); - } - { - std::vector results_m; - testns2.GetData(master, results_m); - - std::vector results_s1; - testns2.GetData(slave, results_s1); - EXPECT_TRUE(results_m == results_s1); - } - { - std::vector results_data; - for (unsigned int i = 0; i < n; i++) results_data.push_back(c1 + i); - for (unsigned int i = 0; i < n; i++) results_data.push_back(c2 + i); - - std::vector results_3; - testns3.GetData(slave, results_3); - EXPECT_TRUE(results_data == results_3); - } -} - -TEST_F(CascadeReplicationApi, NodeWithMasterAndSlaveNs2) { - // Check existing namespace resync - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "NodeWithMasterAndSlaveNs2")); - const unsigned int cm1 = 11; - const unsigned int cm2 = 999; - const unsigned int cm3 = 1999; - const unsigned int nm = 113; - - ServerControl masterSc; - masterSc.InitServer(ServerControlConfig(0, 7770, 7880, kBaseDbPath + "/master", "db")); - auto master = masterSc.Get(); - TestNamespace1 testns1(master, "ns1"); - testns1.AddRows(master, cm1, nm); - TestNamespace1 testns2(master, "ns2"); - testns2.AddRows(master, cm2, nm); - - const unsigned int c1 = 5001; - const unsigned int c2 = 6007; - const unsigned int n = 101; - ServerControl slaveSc; - slaveSc.InitServer(ServerControlConfig(0, 7771, 7881, kBaseDbPath + "/slave", "db")); - auto slave = slaveSc.Get(); - slave->MakeFollower(); - TestNamespace1 testns3(slave, "ns3"); - testns3.AddRows(slave, c1, n); - TestNamespace1 testns4(slave, "ns1"); - testns4.AddRows(slave, c1, n); - master->MakeLeader(AsyncReplicationConfigTest("leader", {}, true, true, 0, "node0")); - master->AddFollower(fmt::format("cproto://127.0.0.1:{}/db", slave->RpcPort()), {{"ns1"}}); - testns3.AddRows(slave, c2, n); - - WaitSync(master, slave, testns1.nsName_); - - testns1.AddRows(master, cm3, nm); - testns2.AddRows(master, cm2, nm); - - ASSERT_EQ(testns1.nsName_, testns4.nsName_); - WaitSync(master, slave, testns1.nsName_); - - { - std::vector results_m; - testns1.GetData(master, results_m); - - std::vector results_s1; - testns1.GetData(slave, results_s1); - EXPECT_TRUE(results_m == results_s1); - } - { - std::vector results_data; - results_data.reserve(2 * n); - for (unsigned int i = 0; i < n; i++) results_data.push_back(c1 + i); - for (unsigned int i = 0; i < n; i++) results_data.push_back(c2 + i); - - std::vector results_3; - results_3.reserve(results_data.size()); - testns3.GetData(slave, results_3); - EXPECT_TRUE(results_data == results_3); - } -} - -TEST_F(CascadeReplicationApi, NodeWithMasterAndSlaveNs3) { - // Check syncing namespaces filtering and writable namespaces on slave after role switch - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "NodeWithMasterAndSlaveNs3")); - const unsigned int c1 = 5001; - const unsigned int c2 = 6001; - const unsigned int n = 101; - ServerControl masterSc; - masterSc.InitServer(ServerControlConfig(0, 7770, 7880, kBaseDbPath + "/master", "db")); - auto master = masterSc.Get(); - master->MakeLeader(); - TestNamespace1 testns1(master, "ns1"); - testns1.AddRows(master, 11, n); - TestNamespace1 testns2(master, "ns2"); - testns2.AddRows(master, 11, n); - - ServerControl slaveSc; - slaveSc.InitServer(ServerControlConfig(0, 7771, 7881, kBaseDbPath + "/slave", "db")); - auto slave = slaveSc.Get(); - slave->MakeFollower(); - TestNamespace1 testns3(slave, "ns3"); - testns3.AddRows(slave, c1, n); - TestNamespace1 testns4(slave, "ns1"); - testns4.AddRows(slave, c1, n); - master->SetReplicationConfig(AsyncReplicationConfigTest("leader", {}, true, true, 0, "node0")); - master->AddFollower(fmt::format("cproto://127.0.0.1:{}/db", slave->RpcPort()), {{"ns1"}}); - testns3.AddRows(slave, c2, n); - - ASSERT_EQ(testns1.nsName_, testns4.nsName_); - WaitSync(master, slave, testns1.nsName_); - - slave->MakeLeader(); - master->SetReplicationConfig(AsyncReplicationConfigTest("leader", {}, true, true, 0, "node0", {})); - slave->ResetReplicationRole(); - testns4.AddRows(slave, c1 + c2, n); - - std::vector results_m; - testns4.GetData(slave, results_m); - ASSERT_TRUE(results_m.size() == n * 2); - ValidateNsList(master, {testns1.nsName_, testns2.nsName_}); - ValidateNsList(slave, {testns3.nsName_, testns4.nsName_}); -} - -TEST_F(CascadeReplicationApi, RenameError) { - // Check if rename still returns error - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "ForceSync3Node")); - ServerControl masterSc; - - masterSc.InitServer(ServerControlConfig(0, 7770, 7880, kBaseDbPath + "/master", "db")); - auto master = masterSc.Get(); - TestNamespace1 testns(master); - testns.AddRows(master, 10, 10); - master->MakeLeader(); - - ServerControl slave1; - slave1.InitServer(ServerControlConfig(1, 7771, 7881, kBaseDbPath + "/slave1", "db")); - slave1.Get()->MakeFollower(); - master->AddFollower(fmt::format("cproto://127.0.0.1:{}/db", slave1.Get()->RpcPort())); - - WaitSync(master, slave1.Get(), testns.nsName_); - - // Check if ns renaming is not posible in this config - auto err = master->api.reindexer->RenameNamespace(testns.nsName_, "new_ns"); - ASSERT_EQ(err.code(), errParams) << err.what(); - std::vector defs; - err = master->api.reindexer->EnumNamespaces(defs, EnumNamespacesOpts().OnlyNames().HideSystem()); - ASSERT_EQ(defs.size(), 1); - ASSERT_EQ(defs[0].name, testns.nsName_); -} - -// TODO: Enable this test, when new repliation will support namesapce rename -TEST_F(CascadeReplicationApi, DISABLED_RenameSlaveNs) { - // create on master ns1 and ns2 - // create on slave ns1 and ns3 ,ns1 sync whith master - // 1. check on slave rename ns3 to ns3Rename ok - // 2. check on slave rename ns1 to ns1RenameSlave fail - // create on master temporary ns (tmpNsName) - // 3. check on master rename tmpNsName to tmpNsNameRename fail - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "RenameSlaveNs")); - ServerControl masterSc; - masterSc.InitServer(ServerControlConfig(0, 7770, 7880, kBaseDbPath + "/master", "db")); - auto master = masterSc.Get(); - TestNamespace1 testns1(master, "ns1"); - const unsigned int n = 101; - testns1.AddRows(master, 11, n); - master->MakeLeader(); - TestNamespace1 testns2(master, "ns2"); - testns1.AddRows(master, 10015, n); - Error err = master->api.reindexer->RenameNamespace("ns2", "ns2Rename"); - ASSERT_TRUE(err.ok()) << err.what(); - - ServerControl slaveSc; - slaveSc.InitServer(ServerControlConfig(0, 7771, 7881, kBaseDbPath + "/slave", "db")); - auto slave = slaveSc.Get(); - TestNamespace1 testns3(slave, "ns3"); - unsigned int n3 = 1234; - testns3.AddRows(slave, 5015, n3); - TestNamespace1 testns4(slave, "ns1"); - std::string upDsn = "cproto://127.0.0.1:7770/db"; - AsyncReplicationConfigTest::NsSet nsSet = {"ns1"}; - // ReplicationConfigTest configSlave("slave", false, true, 0, upDsn, "slave", nsSet); - // slave->MakeFollower(0, configSlave); - - WaitSync(master, slave, testns1.nsName_); - - err = slave->api.reindexer->RenameNamespace("ns3", "ns3Rename"); - ASSERT_TRUE(err.ok()) << err.what(); - - Query qr = Query("ns3Rename").Sort("id", false); - BaseApi::QueryResultsType res; - err = slave->api.reindexer->Select(qr, res); - EXPECT_TRUE(err.ok()) << err.what(); - std::vector results_m; - for (auto it : res) { - WrSerializer ser; - auto err = it.GetJSON(ser, false); - EXPECT_TRUE(err.ok()) << err.what(); - gason::JsonParser parser; - auto root = parser.Parse(ser.Slice()); - results_m.push_back(root["id"].As()); - } - ASSERT_TRUE(results_m.size() == n3); - - err = slave->api.reindexer->RenameNamespace("ns1", "ns1RenameSlave"); - ASSERT_FALSE(err.ok()); - - std::string tmpNsName("tmpNsName"); - NamespaceDef tmpNsDef = NamespaceDef(tmpNsName, StorageOpts().Enabled().CreateIfMissing()); - tmpNsDef.AddIndex("id", "hash", "int", IndexOpts().PK()); - tmpNsDef.isTemporary = true; - err = master->api.reindexer->AddNamespace(tmpNsDef); - ASSERT_TRUE(err.ok()) << err.what(); - reindexer::client::Item item = master->api.NewItem(tmpNsName); - err = item.FromJSON("{\"id\":" + std::to_string(10) + "}"); - ASSERT_TRUE(err.ok()) << err.what(); - err = master->api.reindexer->Upsert(tmpNsName, item); - ASSERT_TRUE(err.ok()) << err.what(); - err = master->api.reindexer->RenameNamespace(tmpNsName, tmpNsName + "Rename"); - ASSERT_FALSE(err.ok()); - - BaseApi::QueryResultsType r1; - err = master->api.reindexer->Select("Select * from " + tmpNsName, r1); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(r1.Count() == 1); -} - -TEST_F(CascadeReplicationApi, Node3ApplyWal) { - // Node configuration: - // master - // | - // slave1 - // | - // slave2 - // Checks applying syncNamespaceByWAL on slave1 and slave2 node. - - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "Node3ApplyWal")); - const std::string kNsName = "ns1"; - const unsigned int n = 2; - { - ServerControl masterSc; - ServerControl slave1Sc; - ServerControl slave2Sc; - masterSc.InitServer(ServerControlConfig(0, 7770, 7880, kBaseDbPath + "/master", "db")); - auto master = masterSc.Get(); - master->MakeLeader(); - TestNamespace1 testns1(master, kNsName); - testns1.AddRows(master, 3000, n); - // start init of slave - { - slave1Sc.InitServer(ServerControlConfig(1, 7771, 7881, kBaseDbPath + "/slave1", "db")); - slave2Sc.InitServer(ServerControlConfig(2, 7772, 7882, kBaseDbPath + "/slave2", "db")); - auto slave1 = slave1Sc.Get(); - auto slave2 = slave2Sc.Get(); - slave1->MakeFollower(); - master->AddFollower(fmt::format("cproto://127.0.0.1:{}/db", slave1->RpcPort())); - slave2->MakeFollower(); - slave1->AddFollower(fmt::format("cproto://127.0.0.1:{}/db", slave2->RpcPort())); - WaitSync(master, slave1, kNsName); - WaitSync(master, slave2, kNsName); - } - } - - { - ServerControl masterSc; - masterSc.InitServer(ServerControlConfig(0, 7770, 7880, kBaseDbPath + "/master", "db")); - auto master = masterSc.Get(); - TestNamespace1 testns1(master, kNsName); - testns1.AddRows(master, 30000, n); - } - ServerControl masterSc; - masterSc.InitServer(ServerControlConfig(0, 7770, 7880, kBaseDbPath + "/master", "db")); - - ServerControl slave1Sc; - slave1Sc.InitServer(ServerControlConfig(1, 7771, 7881, kBaseDbPath + "/slave1", "db")); - - ServerControl slave2Sc; - slave2Sc.InitServer(ServerControlConfig(2, 7772, 7882, kBaseDbPath + "/slave2", "db")); - - WaitSync(masterSc.Get(), slave1Sc.Get(), kNsName); - WaitSync(masterSc.Get(), slave2Sc.Get(), kNsName); -} - -static int64_t AwaitUpdatesReplication(const ServerControl::Interface::Ptr& node) { - auto awaitTime = std::chrono::milliseconds(10000); - constexpr auto step = std::chrono::milliseconds(100); - cluster::ReplicationStats stats; - WrSerializer ser; - for (; awaitTime.count() > 0; awaitTime -= step) { - stats = node->GetReplicationStats("async"); - assert(stats.nodeStats.size() == 1); - ser.Reset(); - stats.GetJSON(ser); - if (stats.pendingUpdatesCount == 0 && stats.nodeStats[0].updatesCount == 0) { - return stats.updateDrops; - } - std::this_thread::sleep_for(step); - } - assertf(false, "Stats: %s", ser.Slice()); - return 0; -} - -TEST_F(CascadeReplicationApi, RestrictUpdates) { - // 1. create master node, - // 2. set max updates size 1024 * 5 (actual size will be 1024 * 1024) - // 3. add 5000 rows - // 4. start slave node - // 5. insert more (updates will be pended in queue due to force sync) - // 6. wait sync - const std::string upDsn = "cproto://127.0.0.1:7770/db"; - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "RestrictUpdates")); - ServerControl masterSc; - masterSc.InitServer(ServerControlConfig(0, 7770, 7880, kBaseDbPath + "/master", "db", true, 1024 * 5)); - auto master = masterSc.Get(); - master->MakeLeader(); - - ServerControl slaveSc; - slaveSc.InitServer(ServerControlConfig(1, 7771, 7881, kBaseDbPath + "/slave", "db")); - auto slave = slaveSc.Get(); - slave->MakeFollower(); - - TestNamespace1 testns1(master, "ns1"); - testns1.AddRows(master, 0, 2000, 10000); - - const int count = 400; - const int from = 1000000; - const std::string nsName("ns1"); - std::string dataString; - for (size_t i = 0; i < 10000; ++i) { - dataString.append("xxx"); - } - - master->AddFollower(fmt::format("cproto://127.0.0.1:{}/db", slave->RpcPort())); - - for (unsigned int i = 0; i < count; i++) { - reindexer::client::Item item = master->api.NewItem("ns1"); - std::string itemJson = fmt::sprintf(R"json({"id": %d, "data": "%s" })json", i + from, dataString); - auto err = item.Unsafe().FromJSON(itemJson); - ASSERT_TRUE(err.ok()) << err.what(); - master->api.Upsert(nsName, item); - ASSERT_TRUE(err.ok()) << err.what(); - } - - WaitSync(master, slave, nsName); - - const auto updatesDrops1 = AwaitUpdatesReplication(master); - - // Make sure, that replication is works fine after updates drop - testns1.AddRows(master, 0, 100, 2000); - WaitSync(master, slave, nsName); - - const auto updatesDrops2 = AwaitUpdatesReplication(master); - if (!updatesDrops2 || !updatesDrops1) { - // Mark test as skipped, because we didn't got any updates drops - GTEST_SKIP(); - } -} - -#if !defined(REINDEX_WITH_TSAN) -TEST_F(CascadeReplicationApi, ConcurrentForceSync) { - /* - * Check concurrent force sync and updates subscription on nodes 1, 2 and 3 - m - | - 1 - / \ - 2 3 - */ - const int kBasePort = 9999; - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "ConcurrentForceSync")); - const std::string kDbName("db"); - const std::vector kNsList = {"ns1", "ns2", "ns3", "ns4"}; - const size_t kNsSyncCount = 3; - - std::vector nodes; - auto createFollower = [&kBaseDbPath, &kDbName, &nodes, &kNsList](const ServerPtr& leader) { - size_t id = nodes.size(); - nodes.push_back(ServerControl()); - nodes.back().InitServer( - ServerControlConfig(id, kBasePort + id, kBasePort + 1000 + id, kBaseDbPath + "/slave" + std::to_string(id), kDbName)); - AsyncReplicationConfigTest::NsSet nsSet; - for (size_t i = 0; i < kNsSyncCount; ++i) { - nsSet.emplace(kNsList[i]); - } - auto follower = nodes.back().Get(); - follower->SetReplicationConfig( - AsyncReplicationConfigTest{"follower", {}, false, true, int(id), "node_" + std::to_string(id), std::move(nsSet)}); - leader->AddFollower(fmt::format("cproto://127.0.0.1:{}/{}", follower->RpcPort(), kDbName)); - return follower; - }; - - // Create leader - ServerPtr leader; - { - nodes.push_back(ServerControl()); - nodes.back().InitServer(ServerControlConfig(0, kBasePort, kBasePort + 1000, kBaseDbPath + "/master", kDbName)); - AsyncReplicationConfigTest::NsSet nsSet; - for (size_t i = 0; i < kNsSyncCount; ++i) { - nsSet.emplace(kNsList[i]); - } - leader = nodes.back().Get(); - leader->SetReplicationConfig(AsyncReplicationConfigTest{"leader", {}, false, true, 0, "node_0", std::move(nsSet)}); - } - - // Fill leader's data - const size_t kRows = 10000; - const size_t kDataBytes = 1000; - std::vector testNsList; - for (auto& ns : kNsList) { - testNsList.emplace_back(nodes[0].Get(), ns); - testNsList.back().AddRows(nodes[0].Get(), 0, kRows, kDataBytes); - } - - // Create semileader - auto semiNode = createFollower(leader); - - // Create slaves - createFollower(semiNode); - createFollower(semiNode); - - for (size_t i = 1; i < nodes.size(); i++) { - for (size_t j = 0; j < kNsSyncCount; ++j) { - WaitSync(leader, nodes[i].Get(), kNsList[j]); - } - } - - // Add one more row to master - for (auto& ns : testNsList) { - ns.AddRows(nodes[0].Get(), kRows, 1, kDataBytes); - } - - for (size_t i = 0; i < kNsSyncCount; ++i) { - std::vector> results; - for (size_t j = 0; j < nodes.size(); j++) { - results.push_back(std::vector()); - WaitSync(nodes[0].Get(), nodes[j].Get(), kNsList[i]); - testNsList[i].GetData(nodes[j].Get(), results.back()); - } - - for (size_t j = 1; j < results.size(); ++j) { - EXPECT_TRUE((results[0] == results[j])); - } - } - - // Allow server to handle disconnects and remove temporary namespaces - std::this_thread::sleep_for(std::chrono::seconds(2)); - - std::vector syncNsList(kNsSyncCount); - std::copy(kNsList.begin(), kNsList.begin() + kNsSyncCount, syncNsList.begin()); - for (size_t i = 0; i < nodes.size(); ++i) { - if (i == 0) { - ValidateNsList(nodes[i].Get(), kNsList); - } else { - ValidateNsList(nodes[i].Get(), syncNsList); - } - } -} -#endif - -TEST_F(CascadeReplicationApi, WriteIntoSlaveNsAfterReconfiguration) { - // Check if it is possible to write in slave's ns after removing this ns from replication ns list - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "WriteIntoSlaveNsAfterReconfiguration/node_")); - const unsigned int n = 5; - const int kBasePort = 7770; - const int kServerId = 5; - const std::string kNs1 = "ns1"; - const std::string kNs2 = "ns2"; - int manualItemId = 5; - auto cluster = CreateConfiguration({-1, 0}, kBasePort, kServerId, kBaseDbPath); - TestNamespace1 testns1(cluster.Get(0), kNs1); - testns1.AddRows(cluster.Get(0), 0, n); - TestNamespace1 testns2(cluster.Get(0), kNs2); - testns2.AddRows(cluster.Get(0), 1, n); - - WaitSync(cluster.Get(0), cluster.Get(1), kNs1); - WaitSync(cluster.Get(0), cluster.Get(1), kNs2); - - auto createItem = [](const ServerPtr& node, const std::string& ns, int itemId) -> reindexer::client::Item { - reindexer::client::Item item = node->api.NewItem(ns); - auto err = item.FromJSON("{\"id\":" + std::to_string(itemId) + "}"); - EXPECT_TRUE(err.ok()) << err.what(); - return item; - }; - - auto item = createItem(cluster.Get(1), kNs1, manualItemId); - auto err = cluster.Get(1)->api.reindexer->Upsert(kNs1, item); - ASSERT_EQ(err.code(), errWrongReplicationData) << err.what(); - item = createItem(cluster.Get(1), kNs2, manualItemId); - err = cluster.Get(1)->api.reindexer->Upsert(kNs1, item); - ASSERT_EQ(err.code(), errWrongReplicationData) << err.what(); - - // Remove ns1 from replication config - { - auto config = cluster.Get(0)->GetServerConfig(ServerControl::ConfigType::Namespace); - config.namespaces = {kNs2}; - cluster.Get(0)->SetReplicationConfig(config); - cluster.Get(1)->ResetReplicationRole(kNs1); - // Await for replicator startup - testns1.AddRows(cluster.Get(0), 100, n); - testns2.AddRows(cluster.Get(0), 100, n); - WaitSync(cluster.Get(0), cluster.Get(1), kNs2); - } - - item = createItem(cluster.Get(1), kNs1, manualItemId); - err = cluster.Get(1)->api.reindexer->Upsert(kNs1, item); - ASSERT_TRUE(err.ok()) << err.what(); - item = createItem(cluster.Get(1), kNs2, manualItemId++); - err = cluster.Get(1)->api.reindexer->Upsert(kNs2, item); - ASSERT_EQ(err.code(), errWrongReplicationData) << err.what(); - WaitSync(cluster.Get(0), cluster.Get(1), kNs2); - - // Restart slave - cluster.RestartServer(1, kBasePort, kBaseDbPath); - - item = createItem(cluster.Get(1), kNs1, manualItemId); - err = cluster.Get(1)->api.reindexer->Upsert(kNs1, item); - ASSERT_TRUE(err.ok()) << err.what(); - item = createItem(cluster.Get(1), kNs2, manualItemId++); - err = cluster.Get(1)->api.reindexer->Upsert(kNs2, item); - ASSERT_EQ(err.code(), errWrongReplicationData) << err.what(); - - auto validateItemsCount = [](const ServerPtr& node, const std::string& nsName, size_t expectedCnt) { - BaseApi::QueryResultsType qr; - auto err = node->api.reindexer->Select(Query(nsName), qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), expectedCnt); - }; - validateItemsCount(cluster.Get(0), kNs1, 2 * n); - validateItemsCount(cluster.Get(0), kNs2, 2 * n); - validateItemsCount(cluster.Get(1), kNs1, n + 2); - validateItemsCount(cluster.Get(1), kNs2, 2 * n); - - // Enable slave mode for ns1 - { - AsyncReplicationConfigTest::NsSet nsSet = {kNs1, kNs2}; - auto config = cluster.Get(0)->GetServerConfig(ServerControl::ConfigType::Namespace); - config.namespaces = {kNs1, kNs2}; - cluster.Get(0)->SetReplicationConfig(config); - WaitSync(cluster.Get(0), cluster.Get(1), kNs1); - WaitSync(cluster.Get(0), cluster.Get(1), kNs2); - } - - item = createItem(cluster.Get(1), kNs1, manualItemId); - err = cluster.Get(1)->api.reindexer->Upsert(kNs1, item); - ASSERT_EQ(err.code(), errWrongReplicationData) << err.what(); - item = createItem(cluster.Get(1), kNs2, manualItemId); - err = cluster.Get(1)->api.reindexer->Upsert(kNs1, item); - ASSERT_EQ(err.code(), errWrongReplicationData) << err.what(); - - testns1.AddRows(cluster.Get(0), 200, n); - testns2.AddRows(cluster.Get(0), 200, n); - WaitSync(cluster.Get(0), cluster.Get(1), kNs1); - WaitSync(cluster.Get(0), cluster.Get(1), kNs2); - validateItemsCount(cluster.Get(0), kNs1, 3 * n); - validateItemsCount(cluster.Get(0), kNs2, 3 * n); - validateItemsCount(cluster.Get(1), kNs1, 3 * n); - validateItemsCount(cluster.Get(1), kNs2, 3 * n); -} - -static void AwaitFollowersState(const ServerControl::Interface::Ptr& node, cluster::NodeStats::Status expectedStatus, - cluster::NodeStats::SyncState expectedSyncState) { - constexpr std::chrono::milliseconds step{100}; - std::chrono::milliseconds awaitTime{10000}; - WrSerializer wser; - while (awaitTime.count() > 0) { - auto stats = node->GetReplicationStats(cluster::kAsyncReplStatsType); - wser.Reset(); - stats.GetJSON(wser); - ASSERT_EQ(stats.nodeStats.size(), 1) << wser.Slice(); - ASSERT_EQ(stats.nodeStats[0].role, cluster::RaftInfo::Role::Follower) << wser.Slice(); - if (stats.nodeStats[0].status == expectedStatus && stats.nodeStats[0].syncState == expectedSyncState) { - return; - } - - std::this_thread::sleep_for(step); - awaitTime -= step; - } - ASSERT_TRUE(false) << "Timeout: " << wser.Slice(); -} - -TEST_F(CascadeReplicationApi, FollowerNetworkAndSyncStatus) { - // Check if network and sync status of the follower depends on actual follower's state - const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "WriteIntoSlaveNsAfterReconfiguration/node_")); - const unsigned int n = 5; - const int kBasePort = 7770; - const int kServerId = 5; - const std::string kNs1 = "ns1"; - auto cluster = CreateConfiguration({-1, 0}, kBasePort, kServerId, kBaseDbPath); - TestNamespace1 testns1(cluster.Get(0), kNs1); - testns1.AddRows(cluster.Get(0), 0, n); - WaitSync(cluster.Get(0), cluster.Get(1), kNs1); - - AwaitFollowersState(cluster.Get(0), cluster::NodeStats::Status::Online, cluster::NodeStats::SyncState::OnlineReplication); - - cluster.ShutdownServer(1); - AwaitFollowersState(cluster.Get(0), cluster::NodeStats::Status::Offline, cluster::NodeStats::SyncState::AwaitingResync); -} diff --git a/cpp_src/gtests/tests/unit/cgoctxpool_tests.cc b/cpp_src/gtests/tests/unit/cgoctxpool_tests.cc deleted file mode 100644 index 649b25f1b..000000000 --- a/cpp_src/gtests/tests/unit/cgoctxpool_tests.cc +++ /dev/null @@ -1,379 +0,0 @@ -#include "cgoctxpool_api.h" - -#include - -namespace CGOCtxPoolTests { - -using reindexer::CancelType; - -static const size_t kCtxPoolSize = 4096; -static const size_t kCtxPoolTestsRepeatCount = 40; - -TEST_F(CGOCtxPoolApi, SingleThread) { - static const size_t kFirstIterCount = kCtxPoolSize + 1; - static const size_t kSecondIterCount = kCtxPoolSize * 3 / 2 + 1; - static const size_t kThirdIterCount = kCtxPoolSize * 5 / 2 + 1; - - auto pool = createCtxPool(kCtxPoolSize); - std::vector ctxPtrs(kCtxPoolSize); - - EXPECT_TRUE(getAndValidateCtx(0, *pool) == nullptr); - - for (uint64_t i = 1; i < kFirstIterCount; ++i) { - ctxPtrs[i - 1] = getAndValidateCtx(i, *pool); - ASSERT_TRUE(ctxPtrs[i - 1] != nullptr); - } - - // Trying to get the same contexts - for (uint64_t i = 1; i < kFirstIterCount; ++i) { - ASSERT_TRUE(getAndValidateCtx(i, *pool) == nullptr); - } - - // Get few more - for (uint64_t i = kFirstIterCount; i < kSecondIterCount; ++i) { - ASSERT_TRUE(getAndValidateCtx(i, *pool) != nullptr); - } - - auto& contexts = pool->contexts(); - ASSERT_EQ(contexts.size(), kCtxPoolSize); - for (size_t i = 0; i < kCtxPoolSize; ++i) { - EXPECT_TRUE(ContextsPoolImpl::isInitialized(contexts[i].ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceling(contexts[i].ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceled(contexts[i].ctxID)); - if (i < kCtxPoolSize / 2 + 1 && i != 0) { - ASSERT_TRUE(contexts[i].next != nullptr); - EXPECT_TRUE(ContextsPoolImpl::isInitialized(contexts[i].next->ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceling(contexts[i].next->ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceled(contexts[i].next->ctxID)); - ASSERT_TRUE(contexts[i].next->next == nullptr); - } else { - ASSERT_TRUE(contexts[i].next == nullptr); - } - } - - // Cancel some of the ctx - EXPECT_FALSE(pool->cancelContext(0, CancelType::Explicit)); - for (uint64_t i = 1; i < kCtxPoolSize / 2 + 1; ++i) { - ASSERT_TRUE(pool->cancelContext(i, CancelType::Explicit)); - EXPECT_EQ(ctxPtrs[i - 1]->GetCancelType(), CancelType::Explicit); - } - - auto validateAfterCancel = [&]() { - for (size_t i = 0; i < kCtxPoolSize; ++i) { - if (i < kCtxPoolSize / 2 + 1 && i != 0) { - EXPECT_TRUE(ContextsPoolImpl::isInitialized(contexts[i].ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceling(contexts[i].ctxID)); - EXPECT_TRUE(ContextsPoolImpl::isCanceled(contexts[i].ctxID)); - - ASSERT_TRUE(contexts[i].next != nullptr); - EXPECT_TRUE(ContextsPoolImpl::isInitialized(contexts[i].next->ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceling(contexts[i].next->ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceled(contexts[i].next->ctxID)); - ASSERT_TRUE(contexts[i].next->next == nullptr); - } else { - EXPECT_TRUE(ContextsPoolImpl::isInitialized(contexts[i].ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceling(contexts[i].ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceled(contexts[i].ctxID)); - ASSERT_TRUE(contexts[i].next == nullptr); - } - } - }; - validateAfterCancel(); - - // Cancel the same contexts - for (uint64_t i = 1; i < kCtxPoolSize / 2 + 1; ++i) { - ASSERT_TRUE(pool->cancelContext(i, CancelType::Explicit)); - } - validateAfterCancel(); - - // Get even more contexts - for (uint64_t i = kSecondIterCount; i < kThirdIterCount; ++i) { - ASSERT_TRUE(getAndValidateCtx(i, *pool) != nullptr); - } - - for (size_t i = 0; i < kCtxPoolSize; ++i) { - EXPECT_TRUE(ContextsPoolImpl::isInitialized(contexts[i].ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceling(contexts[i].ctxID)); - ASSERT_TRUE(contexts[i].next != nullptr); - EXPECT_TRUE(ContextsPoolImpl::isInitialized(contexts[i].next->ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceling(contexts[i].next->ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceled(contexts[i].next->ctxID)); - if (i < kCtxPoolSize / 2 + 1 && i != 0) { - EXPECT_TRUE(ContextsPoolImpl::isCanceled(contexts[i].ctxID)); - ASSERT_TRUE(contexts[i].next->next != nullptr); - EXPECT_TRUE(ContextsPoolImpl::isInitialized(contexts[i].next->next->ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceling(contexts[i].next->next->ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceled(contexts[i].next->next->ctxID)); - ASSERT_TRUE(contexts[i].next->next->next == nullptr); - } else { - EXPECT_FALSE(ContextsPoolImpl::isCanceled(contexts[i].ctxID)); - ASSERT_TRUE(contexts[i].next->next == nullptr); - } - } - - // Remove some of the contexts - EXPECT_FALSE(pool->removeContext(0)); - for (uint64_t i = 1; i < kFirstIterCount; ++i) { - ASSERT_TRUE(pool->removeContext(i)); - } - - auto validateAfterRemove = [&]() { - for (size_t i = 0; i < kCtxPoolSize; ++i) { - EXPECT_EQ(contexts[i].ctxID, 0); - ASSERT_TRUE(contexts[i].next != nullptr); - EXPECT_TRUE(ContextsPoolImpl::isInitialized(contexts[i].next->ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceling(contexts[i].next->ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceled(contexts[i].next->ctxID)); - if (i < kCtxPoolSize / 2 + 1 && i != 0) { - ASSERT_TRUE(contexts[i].next->next != nullptr); - EXPECT_TRUE(ContextsPoolImpl::isInitialized(contexts[i].next->next->ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceling(contexts[i].next->next->ctxID)); - EXPECT_FALSE(ContextsPoolImpl::isCanceled(contexts[i].next->next->ctxID)); - ASSERT_TRUE(contexts[i].next->next->next == nullptr); - } else { - ASSERT_TRUE(contexts[i].next->next == nullptr); - } - } - }; - validateAfterRemove(); - - // Remove the same of the contexts - for (uint64_t i = 1; i < kFirstIterCount; ++i) { - ASSERT_FALSE(pool->removeContext(i)); - } - validateAfterRemove(); - - // Remove the rest of the contexts - for (uint64_t i = kFirstIterCount; i < kThirdIterCount; ++i) { - ASSERT_TRUE(pool->removeContext(i)); - } - for (size_t i = 0; i < kCtxPoolSize; ++i) { - EXPECT_EQ(contexts[i].ctxID, 0); - ASSERT_TRUE(contexts[i].next != nullptr); - EXPECT_EQ(contexts[i].next->ctxID, 0); - if (i < kCtxPoolSize / 2 + 1 && i != 0) { - ASSERT_TRUE(contexts[i].next->next != nullptr); - EXPECT_EQ(contexts[i].next->next->ctxID, 0); - ASSERT_TRUE(contexts[i].next->next->next == nullptr); - } else { - ASSERT_TRUE(contexts[i].next->next == nullptr); - } - } -} - -void CGOCtxPoolApi::multiThreadTest(size_t threadsCount, MultiThreadTestMode mode) { - auto pool = createCtxPool(kCtxPoolSize); - - std::vector threads; - threads.reserve(threadsCount); - std::condition_variable cond; - std::mutex mtx; - std::atomic awaitCount{0}; - for (size_t i = 0; i < threadsCount; ++i) { - threads.emplace_back(std::thread([this, &pool, &cond, &mtx, &awaitCount, mode]() { - static const size_t kCtxCount = kCtxPoolSize * 2; - std::vector ctxPtrs(kCtxCount); - std::unique_lock lck(mtx); - awaitCount.fetch_add(1, std::memory_order_relaxed); - cond.wait(lck); - lck.unlock(); - for (uint64_t i = 0; i < kCtxCount; ++i) { - ctxPtrs[i] = getAndValidateCtx(i + 1, *pool); - } - - if (mode == MultiThreadTestMode::Synced) { - awaitCount.fetch_add(-1, std::memory_order_acq_rel); - while (awaitCount.load(std::memory_order_acquire) > 0) { - std::this_thread::yield(); - } - } - - for (uint64_t i = 0; i < kCtxCount; ++i) { - if (ctxPtrs[i] && i % 2 == 0) { - EXPECT_TRUE(pool->cancelContext(i + 1, CancelType::Explicit)); - if (mode == MultiThreadTestMode::Synced) { - EXPECT_EQ(ctxPtrs[i]->GetCancelType(), CancelType::Explicit); - } - } - } - for (uint64_t i = 0; i < kCtxCount; ++i) { - if (ctxPtrs[i]) { - EXPECT_TRUE(pool->removeContext(i + 1)); - } - } - })); - } - - while (awaitCount.load(std::memory_order_relaxed) < threadsCount) { - std::this_thread::yield(); - } - std::unique_lock lck(mtx); - cond.notify_all(); - lck.unlock(); - for (auto& thread : threads) { - thread.join(); - } - - auto& contexts = pool->contexts(); - for (size_t i = 0; i < kCtxPoolSize; ++i) { - auto node = &contexts[i]; - ASSERT_TRUE(node->next != nullptr); - do { - EXPECT_EQ(node->ctxID, 0); - node = node->next; - } while (node); - } -} - -TEST_F(CGOCtxPoolApi, MultiThread) { - const size_t kThreadsCount = 16; - for (size_t testNum = 0; testNum < kCtxPoolTestsRepeatCount; ++testNum) { - multiThreadTest(kThreadsCount, MultiThreadTestMode::Simple); - } -} - -TEST_F(CGOCtxPoolApi, MultiThreadSynced) { - const size_t kThreadsCount = 16; - for (size_t testNum = 0; testNum < kCtxPoolTestsRepeatCount; ++testNum) { - multiThreadTest(kThreadsCount, MultiThreadTestMode::Synced); - } -} - -TEST_F(CGOCtxPoolApi, ConcurrentCancel) { - static const size_t kGetThreadsCount = 8; - static const size_t kCancelThreadsCount = 8; - - auto pool = createCtxPool(kCtxPoolSize); - - for (size_t testNum = 0; testNum < kCtxPoolTestsRepeatCount; ++testNum) { - std::vector threads; - threads.reserve(kGetThreadsCount + kCancelThreadsCount); - std::condition_variable cond; - std::mutex mtx; - std::atomic awaitCount{0}; - for (size_t i = 0; i < kGetThreadsCount; ++i) { - threads.emplace_back(std::thread([i, &pool, &cond, &mtx, &awaitCount]() { - size_t threadID = i; - std::unique_lock lck(mtx); - awaitCount.fetch_add(1, std::memory_order_relaxed); - cond.wait(lck); - lck.unlock(); - static const size_t kCtxCount = 2 * kCtxPoolSize / kGetThreadsCount; - std::vector ctxPtrs(kCtxCount); - for (uint64_t i = kCtxCount * threadID, j = 0; i < kCtxCount * (threadID + 1); ++i, ++j) { - ctxPtrs[j] = pool->getContext(i + 1); - ASSERT_TRUE(ctxPtrs[j] != nullptr); - } - - for (uint64_t i = kCtxCount * threadID, j = 0; i < kCtxCount * (threadID + 1); ++i, ++j) { - while (ctxPtrs[j]->GetCancelType() == CancelType::None) { - std::this_thread::yield(); - } - EXPECT_TRUE(pool->removeContext(i + 1)); - } - })); - } - - for (size_t i = 0; i < kCancelThreadsCount; ++i) { - threads.emplace_back(std::thread([i, &pool, &cond, &mtx, &awaitCount]() { - size_t threadID = i; - std::unique_lock lck(mtx); - awaitCount.fetch_add(1, std::memory_order_acq_rel); - cond.wait(lck); - lck.unlock(); - static const size_t kCtxCount = 2 * kCtxPoolSize / kCancelThreadsCount; - for (uint64_t i = kCtxCount * threadID; i < kCtxCount * (threadID + 1); ++i) { - while (!pool->cancelContext(i + 1, CancelType::Explicit)) { - std::this_thread::yield(); - } - } - })); - } - - while (awaitCount.load(std::memory_order_acquire) < kCancelThreadsCount + kGetThreadsCount) { - std::this_thread::yield(); - } - std::unique_lock lck(mtx); - cond.notify_all(); - lck.unlock(); - for (auto& thread : threads) { - thread.join(); - } - - auto& contexts = pool->contexts(); - for (size_t i = 0; i < kCtxPoolSize; ++i) { - auto node = &contexts[i]; - do { - EXPECT_EQ(node->ctxID, 0); - node = node->next; - } while (node); - } - } -} - -// Just for tsan check -TEST_F(CGOCtxPoolApi, GeneralConcurrencyCheck) { - static const size_t kGetThreadsCount = 8; - static const size_t kRemoveThreadsCount = 8; - static const size_t kCancelThreadsCount = 8; - - for (size_t testNum = 0; testNum < kCtxPoolTestsRepeatCount; ++testNum) { - auto pool = createCtxPool(kCtxPoolSize); - - std::vector threads; - threads.reserve(kGetThreadsCount + kCancelThreadsCount + kRemoveThreadsCount); - std::condition_variable cond; - std::mutex mtx; - std::atomic awaitCount{0}; - for (size_t i = 0; i < kGetThreadsCount; ++i) { - threads.emplace_back(std::thread([&pool, &cond, &mtx, &awaitCount]() { - std::unique_lock lck(mtx); - awaitCount.fetch_add(1, std::memory_order_relaxed); - cond.wait(lck); - lck.unlock(); - static const size_t kCtxCount = 2 * kCtxPoolSize; - for (uint64_t i = 1; i <= kCtxCount; ++i) { - pool->getContext(i); - } - })); - } - - for (size_t i = 0; i < kCancelThreadsCount; ++i) { - threads.emplace_back(std::thread([&pool, &cond, &mtx, &awaitCount]() { - std::unique_lock lck(mtx); - awaitCount.fetch_add(1, std::memory_order_relaxed); - cond.wait(lck); - lck.unlock(); - static const size_t kCtxCount = 2 * kCtxPoolSize; - for (uint64_t i = 1; i <= kCtxCount; ++i) { - pool->cancelContext(i, CancelType::Explicit); - } - })); - } - - for (size_t i = 0; i < kRemoveThreadsCount; ++i) { - threads.emplace_back(std::thread([&pool, &cond, &mtx, &awaitCount]() { - std::unique_lock lck(mtx); - awaitCount.fetch_add(1, std::memory_order_relaxed); - cond.wait(lck); - lck.unlock(); - static const size_t kCtxCount = 2 * kCtxPoolSize; - for (uint64_t i = 1; i <= kCtxCount; ++i) { - pool->removeContext(i); - } - })); - } - - while (awaitCount.load(std::memory_order_acquire) < kRemoveThreadsCount + kCancelThreadsCount + kGetThreadsCount) { - std::this_thread::yield(); - } - std::unique_lock lck(mtx); - cond.notify_all(); - lck.unlock(); - for (auto& thread : threads) { - thread.join(); - } - } -} - -} // namespace CGOCtxPoolTests diff --git a/cpp_src/gtests/tests/unit/clientsstats_test.cc b/cpp_src/gtests/tests/unit/clientsstats_test.cc deleted file mode 100644 index e92769a92..000000000 --- a/cpp_src/gtests/tests/unit/clientsstats_test.cc +++ /dev/null @@ -1,250 +0,0 @@ -#include "clientsstats_api.h" -#include "coroutine/waitgroup.h" -#include "gason/gason.h" -#include "reindexer_version.h" -#include "tools/semversion.h" -#include "tools/stringstools.h" - -using reindexer::net::ev::dynamic_loop; -using reindexer::client::CoroReindexer; -using reindexer::client::CoroQueryResults; -using reindexer::client::CoroTransaction; -using reindexer::coroutine::wait_group; - -TEST_F(ClientsStatsApi, ClientsStatsConcurrent) { - // ClientsStats should work without races in concurrent environment - RunServerInThread(true); - dynamic_loop loop; - bool finished = false; - loop.spawn([this, &loop, &finished] { - CoroReindexer reindexer; - reindexer::client::ConnectOpts opts; - auto err = reindexer.Connect(GetConnectionString(), loop, opts.CreateDBIfMissing()); - ASSERT_TRUE(err.ok()) << err.what(); - SetProfilingFlag(true, "profiling.activitystats", reindexer); - - RunNSelectThread(5, 5); - RunNReconnectThread(10); - loop.sleep(std::chrono::seconds(5)); - StopThreads(); - finished = true; - }); - loop.run(); - ASSERT_TRUE(finished); -} - -TEST_F(ClientsStatsApi, ClientsStatsData) { - // Should be able to get data from #clientsstats for each connection - RunServerInThread(true); - dynamic_loop loop; - bool finished = false; - loop.spawn([this, &loop, &finished] { - const size_t kConnectionCount = 10; - std::vector> nClients; - nClients.reserve(kConnectionCount); - wait_group wg; - for (size_t i = 0; i < kConnectionCount; i++) { - loop.spawn(wg, [this, &loop, &nClients] { - std::unique_ptr clientPtr(new CoroReindexer); - reindexer::client::ConnectOpts opts; - auto err = clientPtr->Connect(GetConnectionString(), loop, opts.CreateDBIfMissing()); - ASSERT_TRUE(err.ok()) << err.what(); - CoroQueryResults result; - err = clientPtr->Select(reindexer::Query("#namespaces"), result); - ASSERT_TRUE(err.ok()) << err.what(); - nClients.emplace_back(std::move(clientPtr)); - }); - } - wg.wait(); - CoroQueryResults result; - auto err = nClients[0]->Select(reindexer::Query("#clientsstats"), result); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(result.Count(), kConnectionCount); - finished = true; - }); - loop.run(); - ASSERT_TRUE(finished); -} - -TEST_F(ClientsStatsApi, ClientsStatsOff) { - // Should get empty result if #clientsstats are disabled - RunServerInThread(false); - dynamic_loop loop; - bool finished = false; - loop.spawn([this, &loop, &finished] { - CoroReindexer reindexer; - reindexer::client::ConnectOpts opts; - auto err = reindexer.Connect(GetConnectionString(), loop, opts.CreateDBIfMissing()); - ASSERT_TRUE(err.ok()) << err.what(); - CoroQueryResults resultNs; - err = reindexer.Select(reindexer::Query("#namespaces"), resultNs); - ASSERT_TRUE(err.ok()) << err.what(); - CoroQueryResults resultCs; - err = reindexer.Select(reindexer::Query("#clientsstats"), resultCs); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(resultCs.Count(), 0); - finished = true; - }); - loop.run(); - ASSERT_TRUE(finished); -} - -TEST_F(ClientsStatsApi, ClientsStatsValues) { - // Should get correct data for specific connection - RunServerInThread(true); - dynamic_loop loop; - bool finished = false; - - loop.spawn([this, &loop, &finished] { - reindexer::client::ReindexerConfig config; - config.AppName = kAppName; - CoroReindexer reindexer(config); - reindexer::client::ConnectOpts opts; - auto err = reindexer.Connect(GetConnectionString(), loop, opts.CreateDBIfMissing()); - ASSERT_TRUE(err.ok()) << err.what(); - - std::string nsName("ns1"); - err = reindexer.OpenNamespace(nsName); - ASSERT_TRUE(err.ok()) << err.what(); - - auto tx1 = reindexer.NewTransaction(nsName); - ASSERT_FALSE(tx1.IsFree()); - auto tx2 = reindexer.NewTransaction(nsName); - ASSERT_FALSE(tx2.IsFree()); - - auto beginTs = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); - loop.sleep(std::chrono::milliseconds(2000)); // Timeout to update send/recv rate - CoroQueryResults resultNs; - err = reindexer.Select(reindexer::Query("#namespaces"), resultNs); - SetProfilingFlag(true, "profiling.activitystats", reindexer); - - CoroQueryResults resultCs; - const std::string selectClientsStats = "SELECT * FROM #clientsstats"; - err = reindexer.Select(selectClientsStats, resultCs); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(resultCs.Count(), 1); - auto it = resultCs.begin(); - reindexer::WrSerializer wrser; - err = it.GetJSON(wrser, false); - ASSERT_TRUE(err.ok()) << err.what(); - auto endTs = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); - - gason::JsonParser parser; - gason::JsonNode clientsStats = parser.Parse(wrser.Slice()); - std::string curActivity = clientsStats["current_activity"].As(); - EXPECT_TRUE(curActivity == selectClientsStats) << "curActivity = [" << curActivity << "]"; - std::string curIP = clientsStats["ip"].As(); - std::vector addrParts; - reindexer::split(curIP, ":", false, addrParts); - EXPECT_EQ(addrParts.size(), 2); - EXPECT_EQ(addrParts[0], kipaddress) << curIP; - int port = std::atoi(addrParts[1].c_str()); - EXPECT_GT(port, 0) << curIP; - EXPECT_NE(port, kPortI) << curIP; - int64_t sentBytes = clientsStats["sent_bytes"].As(); - EXPECT_GT(sentBytes, 0); - int64_t recvBytes = clientsStats["recv_bytes"].As(); - EXPECT_GT(recvBytes, 0); - std::string userName = clientsStats["user_name"].As(); - EXPECT_EQ(userName, kUserName); - std::string dbName = clientsStats["db_name"].As(); - EXPECT_EQ(dbName, kdbName); - std::string appName = clientsStats["app_name"].As(); - EXPECT_EQ(appName, kAppName); - std::string userRights = clientsStats["user_rights"].As(); - EXPECT_EQ(userRights, "owner"); - std::string clientVersion = clientsStats["client_version"].As(); - EXPECT_EQ(clientVersion, reindexer::SemVersion(REINDEX_VERSION).StrippedString()); - uint32_t txCount = clientsStats["tx_count"].As(); - EXPECT_EQ(txCount, 2); - int64_t sendBufBytes = clientsStats["send_buf_bytes"].As(-1); - EXPECT_EQ(sendBufBytes, 0); - int64_t sendRate = clientsStats["send_rate"].As(); - EXPECT_GT(sendRate, 0); - int64_t recvRate = clientsStats["recv_rate"].As(); - EXPECT_GT(recvRate, 0); - int64_t lastSendTs = clientsStats["last_send_ts"].As(); - EXPECT_GT(lastSendTs, beginTs); - EXPECT_LE(lastSendTs, endTs); - int64_t lastRecvTs = clientsStats["last_recv_ts"].As(); - EXPECT_GT(lastRecvTs, beginTs); - EXPECT_LE(lastRecvTs, endTs); - - CoroQueryResults qr1; - err = reindexer.CommitTransaction(tx1, qr1); - ASSERT_TRUE(err.ok()) << err.what(); - CoroQueryResults qr2; - err = reindexer.CommitTransaction(tx2, qr2); - ASSERT_TRUE(err.ok()) << err.what(); - - finished = true; - }); - - loop.run(); - ASSERT_TRUE(finished); -} - -TEST_F(ClientsStatsApi, TxCountLimitation) { - // Should get correct data about running client's transactions - RunServerInThread(true); - dynamic_loop loop; - bool finished = false; - loop.spawn([this, &loop, &finished] { - const size_t kMaxTxCount = 1024; - CoroReindexer reindexer; - reindexer::client::ConnectOpts opts; - auto err = reindexer.Connect(GetConnectionString(), loop, opts.CreateDBIfMissing()); - ASSERT_TRUE(err.ok()) << err.what(); - - std::string nsName("ns1"); - err = reindexer.OpenNamespace(nsName); - ASSERT_TRUE(err.ok()) << err.what(); - - std::vector txs; - txs.reserve(kMaxTxCount); - for (size_t i = 0; i < 2 * kMaxTxCount; ++i) { - auto tx = reindexer.NewTransaction(nsName); - if (tx.Status().ok()) { - ASSERT_FALSE(tx.IsFree()); - txs.emplace_back(std::move(tx)); - } - } - - ASSERT_EQ(txs.size(), kMaxTxCount); - ASSERT_EQ(StatsTxCount(reindexer), kMaxTxCount); - - for (size_t i = 0; i < kMaxTxCount / 2; ++i) { - if (i % 2) { - CoroQueryResults qr; - err = reindexer.CommitTransaction(txs[i], qr); - } else { - err = reindexer.RollBackTransaction(txs[i]); - } - ASSERT_TRUE(err.ok()) << err.what() << "; i = " << i; - } - - for (size_t i = 0; i < kMaxTxCount / 4; ++i) { - auto tx = reindexer.NewTransaction(nsName); - ASSERT_FALSE(tx.IsFree()); - ASSERT_TRUE(tx.Status().ok()); - txs[i] = std::move(tx); - } - ASSERT_EQ(StatsTxCount(reindexer), kMaxTxCount / 2 + kMaxTxCount / 4); - for (size_t i = 0; i < txs.size(); ++i) { - if (!txs[i].IsFree() && txs[i].Status().ok()) { - if (i % 2) { - CoroQueryResults qr; - err = reindexer.CommitTransaction(txs[i], qr); - } else { - err = reindexer.RollBackTransaction(txs[i]); - } - ASSERT_TRUE(err.ok()) << err.what() << "; i = " << i; - } - } - ASSERT_EQ(StatsTxCount(reindexer), 0); - finished = true; - }); - - loop.run(); - ASSERT_TRUE(finished); -} diff --git a/cpp_src/gtests/tests/unit/composite_indexes_test.cc b/cpp_src/gtests/tests/unit/composite_indexes_test.cc deleted file mode 100644 index e754bf7cf..000000000 --- a/cpp_src/gtests/tests/unit/composite_indexes_test.cc +++ /dev/null @@ -1,293 +0,0 @@ -#include "composite_indexes_api.h" - -using QueryResults = ReindexerApi::QueryResults; -using Item = ReindexerApi::Item; -using Reindexer = ReindexerApi::Reindexer; - -TEST_F(CompositeIndexesApi, CompositeIndexesAddTest) { - addCompositeIndex({kFieldNameBookid, kFieldNameBookid2}, CompositeIndexHash, IndexOpts().PK()); - fillNamespace(0, 100); - addCompositeIndex({kFieldNameTitle, kFieldNamePages}, CompositeIndexHash, IndexOpts()); - fillNamespace(101, 200); - addCompositeIndex({kFieldNameTitle, kFieldNamePrice}, CompositeIndexBTree, IndexOpts()); - fillNamespace(201, 300); -} - -TEST_F(CompositeIndexesApi, AddIndexWithExistingCompositeIndex) { - static constexpr const char* namespaceName = "test_ns_add_index"; - static const std::string kFieldNameComposite = std::string{kFieldNameName} + compositePlus + kFieldNameTitle; - static const std::vector suffixes = {"Vol1", "Vol2", "Vol3"}; - static constexpr int size = 10; - std::vector names; - names.reserve(size); - for (int i = 0; i < size; ++i) { - names.push_back(kFieldNameName + suffixes[i % suffixes.size()]); - } - - Error err = rt.reindexer->OpenNamespace(namespaceName); - ASSERT_TRUE(err.ok()) << err.what(); - DefineNamespaceDataset(namespaceName, {IndexDeclaration{kFieldNameBookid, "hash", "int", IndexOpts().PK(), 0}, - IndexDeclaration{kFieldNameBookid2, "hash", "int", IndexOpts(), 0}, - IndexDeclaration{kFieldNamePages, "hash", "int", IndexOpts(), 0}, - IndexDeclaration{kFieldNamePrice, "hash", "int", IndexOpts(), 0}, - IndexDeclaration{kFieldNameComposite.c_str(), "text", "composite", IndexOpts(), 0}}); - for (int i = 0; i < size; ++i) { - Item item = NewItem(namespaceName); - item[this->kFieldNameBookid] = i; - item[this->kFieldNameBookid2] = 777; - item[this->kFieldNamePages] = 1010; - item[this->kFieldNamePrice] = 1200; - item[this->kFieldNameName] = names[i]; - item[this->kFieldNameTitle] = kFieldNameTitle; - Upsert(namespaceName, item); - err = Commit(namespaceName); - ASSERT_TRUE(err.ok()) << err.what(); - } - err = rt.reindexer->AddIndex(namespaceName, {kFieldNameName, {kFieldNameName}, "text", "string", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); -} - -static void selectAll(reindexer::Reindexer* reindexer, const std::string& ns) { - QueryResults qr; - Error err = reindexer->Select(Query(ns, 0, 1000, ModeAccurateTotal), qr); - ASSERT_TRUE(err.ok()) << err.what(); - - for (auto it : qr) { - ASSERT_TRUE(it.Status().ok()) << it.Status().what(); - reindexer::WrSerializer wrser; - err = it.GetJSON(wrser, false); - ASSERT_TRUE(err.ok()) << err.what(); - } -} - -TEST_F(CompositeIndexesApi, DropTest2) { - const std::string test_ns = "weird_namespace"; - auto err = rt.reindexer->OpenNamespace(test_ns, StorageOpts().Enabled(false)); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(test_ns, {"id", "hash", "int", IndexOpts().PK().Dense()}); - EXPECT_TRUE(err.ok()) << err.what(); - - for (int i = 0; i < 1000; ++i) { - Item item = NewItem(test_ns); - EXPECT_FALSE(!item); - EXPECT_TRUE(item.Status().ok()) << item.Status().what(); - - item["id"] = i + 1; - - err = rt.reindexer->Upsert(test_ns, item); - EXPECT_TRUE(err.ok()) << err.what(); - } - - err = rt.reindexer->Commit(test_ns); - EXPECT_TRUE(err.ok()) << err.what(); - - selectAll(rt.reindexer.get(), test_ns); - - reindexer::IndexDef idef("id"); - err = rt.reindexer->DropIndex(test_ns, idef); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Commit(test_ns); - EXPECT_TRUE(err.ok()) << err.what(); - - selectAll(rt.reindexer.get(), test_ns); -} - -TEST_F(CompositeIndexesApi, CompositeIndexesDropTest) { - addCompositeIndex({kFieldNameBookid, kFieldNameBookid2}, CompositeIndexHash, IndexOpts().PK()); - fillNamespace(0, 100); - addCompositeIndex({kFieldNameTitle, kFieldNamePages}, CompositeIndexHash, IndexOpts()); - fillNamespace(101, 200); - addCompositeIndex({kFieldNameTitle, kFieldNamePrice}, CompositeIndexBTree, IndexOpts()); - fillNamespace(201, 300); - - dropIndex(getCompositeIndexName({kFieldNameTitle, kFieldNamePrice})); - fillNamespace(401, 500); - dropIndex(getCompositeIndexName({kFieldNameTitle, kFieldNamePages})); - fillNamespace(601, 700); -} - -TEST_F(CompositeIndexesApi, CompositeIndexesSelectTest) { - int priceValue = 77777, pagesValue = 88888; - const char* titleValue = "test book1 title"; - const char* nameValue = "test book1 name"; - - addCompositeIndex({kFieldNameBookid, kFieldNameBookid2}, CompositeIndexHash, IndexOpts().PK()); - fillNamespace(0, 100); - - std::string compositeIndexName(getCompositeIndexName({kFieldNamePrice, kFieldNamePages})); - addCompositeIndex({kFieldNamePrice, kFieldNamePages}, CompositeIndexHash, IndexOpts()); - - addOneRow(300, 3000, titleValue, pagesValue, priceValue, nameValue); - fillNamespace(101, 200); - - auto qr = execAndCompareQuery( - Query(default_namespace).WhereComposite(compositeIndexName.c_str(), CondEq, {{Variant(priceValue), Variant(pagesValue)}})); - ASSERT_EQ(qr.Count(), 1); - - Item pricePageRow = qr.begin().GetItem(false); - Variant selectedPrice = pricePageRow[kFieldNamePrice]; - Variant selectedPages = pricePageRow[kFieldNamePages]; - EXPECT_EQ(static_cast(selectedPrice), priceValue); - EXPECT_EQ(static_cast(selectedPages), pagesValue); - - Item titleNameRow = qr.begin().GetItem(false); - Variant selectedTitle = titleNameRow[kFieldNameTitle]; - Variant selectedName = titleNameRow[kFieldNameName]; - EXPECT_EQ(static_cast(selectedTitle)->compare(std::string(titleValue)), 0); - EXPECT_EQ(static_cast(selectedName)->compare(std::string(nameValue)), 0); - - execAndCompareQuery(Query(default_namespace).WhereComposite(compositeIndexName, CondLt, {{Variant(priceValue), Variant(pagesValue)}})); - execAndCompareQuery(Query(default_namespace).WhereComposite(compositeIndexName, CondLe, {{Variant(priceValue), Variant(pagesValue)}})); - execAndCompareQuery(Query(default_namespace).WhereComposite(compositeIndexName, CondGt, {{Variant(priceValue), Variant(pagesValue)}})); - execAndCompareQuery(Query(default_namespace).WhereComposite(compositeIndexName, CondGe, {{Variant(priceValue), Variant(pagesValue)}})); - - fillNamespace(301, 400); - - execAndCompareQuery( - Query(default_namespace) - .WhereComposite(compositeIndexName, CondRange, {{Variant(1), Variant(1)}, {Variant(priceValue), Variant(pagesValue)}})); - - std::vector intKeys; - intKeys.reserve(10); - for (int i = 0; i < 10; ++i) { - intKeys.emplace_back(VariantArray{Variant(i), Variant(i * 5)}); - } - execAndCompareQuery(Query(default_namespace).WhereComposite(compositeIndexName.c_str(), CondSet, intKeys)); - - dropIndex(compositeIndexName); - fillNamespace(401, 500); - - std::string compositeIndexName2(getCompositeIndexName({kFieldNameTitle, kFieldNameName})); - addCompositeIndex({kFieldNameTitle, kFieldNameName}, CompositeIndexBTree, IndexOpts()); - - fillNamespace(701, 900); - - execAndCompareQuery( - Query(default_namespace) - .WhereComposite(compositeIndexName2.c_str(), CondEq, {{Variant(std::string(titleValue)), Variant(std::string(nameValue))}})); - execAndCompareQuery( - Query(default_namespace) - .WhereComposite(compositeIndexName2.c_str(), CondGe, {{Variant(std::string(titleValue)), Variant(std::string(nameValue))}})); - execAndCompareQuery( - Query(default_namespace) - .WhereComposite(compositeIndexName2.c_str(), CondLt, {{Variant(std::string(titleValue)), Variant(std::string(nameValue))}})); - execAndCompareQuery( - Query(default_namespace) - .WhereComposite(compositeIndexName2.c_str(), CondLe, {{Variant(std::string(titleValue)), Variant(std::string(nameValue))}})); - - fillNamespace(1201, 2000); - - std::vector stringKeys; - for (size_t i = 0; i < 1010; ++i) { - stringKeys.emplace_back(VariantArray{Variant(RandString()), Variant(RandString())}); - } - execAndCompareQuery(Query(default_namespace).WhereComposite(compositeIndexName2.c_str(), CondSet, stringKeys)); - execAndCompareQuery( - Query(default_namespace) - .Where(kFieldNameName, CondEq, nameValue) - .WhereComposite(compositeIndexName2.c_str(), CondEq, {{Variant(std::string(titleValue)), Variant(std::string(nameValue))}})); - - dropIndex(compositeIndexName2); - fillNamespace(201, 300); - - execAndCompareQuery(Query(default_namespace)); -} - -TEST_F(CompositeIndexesApi, SelectsBySubIndexes) { - // Check if selects work for composite index parts - struct Case { - std::string name; - const std::vector idxs; - }; - - const std::vector caseSet = {Case{"hash+store", - {IndexDeclaration{CompositeIndexesApi::kFieldNameBookid, "hash", "int", IndexOpts().PK(), 0}, - IndexDeclaration{CompositeIndexesApi::kFieldNamePrice, "hash", "int", IndexOpts(), 0}, - IndexDeclaration{CompositeIndexesApi::kFieldNamePages, "-", "int", IndexOpts(), 0}}}, - Case{"store+hash", - {IndexDeclaration{CompositeIndexesApi::kFieldNameBookid, "hash", "int", IndexOpts().PK(), 0}, - IndexDeclaration{CompositeIndexesApi::kFieldNamePrice, "-", "int", IndexOpts(), 0}, - IndexDeclaration{CompositeIndexesApi::kFieldNamePages, "hash", "int", IndexOpts(), 0}}}, - Case{"store+store", - {IndexDeclaration{CompositeIndexesApi::kFieldNameBookid, "hash", "int", IndexOpts().PK(), 0}, - IndexDeclaration{CompositeIndexesApi::kFieldNamePrice, "-", "int", IndexOpts(), 0}, - IndexDeclaration{CompositeIndexesApi::kFieldNamePages, "-", "int", IndexOpts(), 0}}}, - Case{"hash+tree", - {IndexDeclaration{CompositeIndexesApi::kFieldNameBookid, "hash", "int", IndexOpts().PK(), 0}, - IndexDeclaration{CompositeIndexesApi::kFieldNamePrice, "hash", "int", IndexOpts(), 0}, - IndexDeclaration{CompositeIndexesApi::kFieldNamePages, "tree", "int", IndexOpts(), 0}}}}; - - for (const auto& c : caseSet) { - auto err = rt.reindexer->DropNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << c.name; - err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << c.name; - DefineNamespaceDataset(default_namespace, c.idxs); - std::string compositeIndexName(getCompositeIndexName({kFieldNamePrice, kFieldNamePages})); - addCompositeIndex({kFieldNamePrice, kFieldNamePages}, CompositeIndexHash, IndexOpts()); - - int priceValue = 77777, pagesValue = 88888, bookid = 300; - const char* titleValue = "test book1 title"; - const char* nameValue = "test book1 name"; - for (int i = -5; i < 10; ++i) { - addOneRow(bookid + i, 3000 + i, titleValue + std::to_string(i), pagesValue, priceValue + i, nameValue + std::to_string(i)); - } - auto qr = execAndCompareQuery( - Query(default_namespace).Explain().Where(kFieldNamePrice, CondEq, {priceValue}).Where(kFieldNameBookid, CondEq, {bookid})); - ASSERT_EQ(qr.Count(), 1) << c.name; - auto item = qr.begin().GetItem(); - EXPECT_EQ(item[kFieldNameBookid].As(), bookid) << c.name; - EXPECT_EQ(item[kFieldNamePrice].As(), priceValue) << c.name; - EXPECT_EQ(item[kFieldNamePages].As(), pagesValue) << c.name; - } -} - -TEST_F(CompositeIndexesApi, CompositeOverCompositeTest) { - constexpr char kExpectedErrorPattern[] = "Cannot create composite index '%s' over the other composite '%s'"; - constexpr size_t stepSize = 10; - size_t from = 0, to = stepSize; - auto addData = [this, &from, &to] { - fillNamespace(from, to); - from += stepSize; - to += stepSize; - }; - auto checkError = [this, &addData, &kExpectedErrorPattern](std::initializer_list compositeFields, - const std::string& singleField, CompositeIndexType type) { - auto compositeName = getCompositeIndexName(std::move(compositeFields)); - auto err = tryAddCompositeIndex({compositeName, singleField}, type, IndexOpts()); - EXPECT_EQ(err.code(), errParams) << compositeName; - EXPECT_EQ(err.what(), fmt::sprintf(kExpectedErrorPattern, getCompositeIndexName({compositeName, singleField}), compositeName)) - << compositeName; - addData(); - - err = tryAddCompositeIndex({singleField, compositeName}, type, IndexOpts()); - EXPECT_EQ(err.code(), errParams) << compositeName; - EXPECT_EQ(err.what(), fmt::sprintf(kExpectedErrorPattern, getCompositeIndexName({singleField, compositeName}), compositeName)) - << compositeName; - addData(); - }; - - addCompositeIndex({kFieldNameBookid, kFieldNameBookid2}, CompositeIndexHash, IndexOpts().PK()); - addData(); - - const std::string kNewIdxName = "new_idx"; - auto err = rt.reindexer->AddIndex(default_namespace, {kNewIdxName, {kNewIdxName}, "-", "int", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - addData(); - - addCompositeIndex({kFieldNameTitle, kNewIdxName}, CompositeIndexHash, IndexOpts()); - addData(); - - checkError({kFieldNameBookid, kFieldNameBookid2}, kNewIdxName, CompositeIndexBTree); - checkError({kFieldNameTitle, kNewIdxName}, kFieldNamePrice, CompositeIndexBTree); - checkError({kFieldNameBookid, kFieldNameBookid2}, kFieldNamePrice, CompositeIndexHash); - - const auto kComposite1 = getCompositeIndexName({kFieldNameBookid, kFieldNameBookid2}); - const auto kComposite2 = getCompositeIndexName({kFieldNameTitle, kNewIdxName}); - err = tryAddCompositeIndex({kComposite1, kComposite2}, CompositeIndexHash, IndexOpts()); - EXPECT_EQ(err.code(), errParams); - EXPECT_EQ(err.what(), fmt::sprintf(kExpectedErrorPattern, getCompositeIndexName({kComposite1, kComposite2}), kComposite1)); - addData(); -} diff --git a/cpp_src/gtests/tests/unit/coroutines_test.cc b/cpp_src/gtests/tests/unit/coroutines_test.cc deleted file mode 100644 index 2a891eeb5..000000000 --- a/cpp_src/gtests/tests/unit/coroutines_test.cc +++ /dev/null @@ -1,358 +0,0 @@ -#include -#include -#include - -#include -#include "gtests/tests/gtest_cout.h" -#include "net/ev/ev.h" - -using reindexer::net::ev::dynamic_loop; -using std::chrono::duration_cast; -using std::chrono::high_resolution_clock; -using std::chrono::seconds; -using std::chrono::milliseconds; -using reindexer::coroutine::channel; - -template -static void OutputVector(const std::vector& vec) { - for (auto it = vec.begin(); it != vec.end(); ++it) { - std::cerr << *it; - if (it + 1 == vec.end()) { - std::cerr << " }" << std::endl; - } else { - std::cerr << ", "; - } - } -} - -TEST(Coroutines, Timers) { - // Should be able to handle multiple timers in concurrent coroutines - constexpr auto kSleepTime = milliseconds(500); -#ifdef REINDEX_WITH_TSAN - constexpr auto kCoroCount = 100; -#else - constexpr auto kCoroCount = 500; -#endif - dynamic_loop loop; - size_t counter = 0; - for (size_t i = 0; i < kCoroCount; ++i) { - loop.spawn([&loop, &counter, kSleepTime] { - std::vector v = {1, 2, 3}; // Check if destructor was called - (void)v; - loop.sleep(kSleepTime); - ++counter; - }); - } - auto beg = high_resolution_clock::now(); - loop.run(); - auto diff = high_resolution_clock::now() - beg; - ASSERT_TRUE(diff > kSleepTime) << "Diff: " << duration_cast(diff).count() << " milliseconds"; - ASSERT_TRUE(diff < 6 * kSleepTime) << "Diff: " << duration_cast(diff).count() << " milliseconds"; - ASSERT_EQ(counter, kCoroCount); -} - -TEST(Coroutines, LoopDestructor) { - // Loop should await coroutines's completions on dectrution - constexpr auto kSleepTime = milliseconds(100); - constexpr auto kCoroCount = 1000; - size_t counter = 0; - { - dynamic_loop loop; - for (size_t i = 0; i < kCoroCount; ++i) { - loop.spawn([&loop, &counter, kSleepTime] { - std::vector v = {1, 2, 3}; // Check if destructor was called - (void)v; - loop.sleep(kSleepTime); - ++counter; - }); - } - } - ASSERT_EQ(counter, kCoroCount); -} - -TEST(Coroutines, StressTest) { - // Any number of concurrent coroutines and channels should work properly with sanitizers - size_t counter = 0; - dynamic_loop loop; - std::vector>> vec; - auto storage_size = reindexer::coroutine::shrink_storage(); - ASSERT_EQ(storage_size, 0); - size_t finishedCoroutines = 0; - int64_t userCallbackId = - reindexer::coroutine::add_completion_callback([&finishedCoroutines](reindexer::coroutine::routine_t) { ++finishedCoroutines; }); - for (size_t i = 0; i < 50; ++i) { - loop.spawn([&loop, &counter, &vec] { - for (size_t i = 0; i < 100; ++i) { - constexpr size_t kCnt = 5; - auto chPtr = std::unique_ptr>(new channel(kCnt)); - auto& ch = *chPtr; - vec.emplace_back(std::move(chPtr)); - loop.spawn([&ch, &counter] { - for (size_t i = 0; i < 2 * kCnt; ++i) { - auto res = ch.pop(); - ASSERT_TRUE(res.second); - (void)res; - } - ++counter; - }); - for (size_t i = 0; i < kCnt; ++i) { - loop.spawn([&ch, &counter] { - for (size_t i = 0; i < 2; ++i) { - ch.push(int(i)); - } - ++counter; - }); - } - } - ++counter; - }); - } - loop.run(); - constexpr size_t kExpectedTotal = 30050; - ASSERT_EQ(counter, kExpectedTotal); - ASSERT_EQ(finishedCoroutines, kExpectedTotal); - - int res = reindexer::coroutine::remove_completion_callback(userCallbackId); - ASSERT_EQ(res, 0); - res = reindexer::coroutine::remove_completion_callback(userCallbackId); - ASSERT_NE(res, 0); -} - -TEST(Coroutines, ClosedChannelWriting) { - // Closed channel should throw exception on write - dynamic_loop loop; - channel ch(10); - size_t exceptions = 0; - loop.spawn([&ch, &exceptions] { - for (size_t i = 0; i < ch.capacity(); ++i) { - try { - if (i == ch.capacity() / 2) { - ch.close(); - } - ch.push(int(i)); - } catch (std::exception&) { - ++exceptions; - } - } - }); - loop.run(); - ASSERT_EQ(exceptions, ch.capacity() / 2); -} - -TEST(Coroutines, ClosedChannelReading) { - // Closed channel should allow to read data on read (if there are any) and return error (if there are none) - dynamic_loop loop; - std::vector wData = {5, 3, 7, 15, 99, 22, 53, 44}; - std::vector rData; - rData.reserve(wData.size()); - channel ch(wData.size()); - loop.spawn([&ch, &wData] { - for (auto d : wData) { - try { - ch.push(d); - // We will get ASAN warning on exception, but this doesn't matter - } catch (std::exception&) { - ASSERT_TRUE(false); - } - } - ASSERT_EQ(ch.size(), wData.size()); - ch.close(); - }); - loop.spawn([&ch, &rData] { - auto dp = ch.pop(); - while (dp.second) { - rData.emplace_back(dp.first); - dp = ch.pop(); - } - }); - loop.run(); - if (wData != rData) { - std::cerr << "Expected data is:\n{ "; - OutputVector(wData); - std::cerr << "Actual data is:\n{ "; - OutputVector(rData); - ASSERT_TRUE(false); - } -} - -TEST(Coroutines, SchedulingOrder) { - // Coroutines should be scheduled in specified order - using reindexer::coroutine::create; - using reindexer::coroutine::current; - using reindexer::coroutine::resume; - using reindexer::coroutine::suspend; - using reindexer::coroutine::routine_t; - - TestCout() << "Expecting unhandled exception (and non-critical ASAN warning) for coroutine \"10\" here..." << std::endl; - - auto storage_size = reindexer::coroutine::shrink_storage(); - ASSERT_EQ(storage_size, 0); - std::vector order; - const std::vector kExpectedOrder = {0, 0, 1, 1, 2, 1, 1, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 7, 6, 5, 4, 3, 8, - 3, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 7, 2, 9, 2, 3, 7, 3, 2, 8, 9, 8, 2, 4, - 4, 2, 5, 5, 2, 6, 6, 2, 2, 2, 10, 10, 2, 2, 2, 2, 2, 10, 2, 2, 1, 1, 0}; - - auto testFn = [&order] { - order.emplace_back(current()); - auto fn1 = [&order] { - order.emplace_back(current()); - suspend(); - order.emplace_back(current()); - channel ch(5); - auto wrFn = [&order, &ch] { - order.emplace_back(current()); - ch.push(1); - order.emplace_back(current()); - }; - std::vector wVec; - for (size_t i = 0; i < 5; ++i) { - auto wId = create(wrFn); - ASSERT_TRUE(wId > 0) << size_t(wId); - order.emplace_back(current()); - wVec.emplace_back(wId); - } - auto rdFn = [&order, &ch] { - order.emplace_back(current()); - auto dp = ch.pop(); - ASSERT_TRUE(dp.second); - order.emplace_back(current()); - }; - std::vector rVec; - for (size_t i = 0; i < 3; ++i) { - auto rId = create(rdFn); - ASSERT_TRUE(rId > 0) << size_t(rId); - order.emplace_back(current()); - rVec.emplace_back(rId); - } - - int res = resume(rVec[0]); - ASSERT_EQ(res, 0); - order.emplace_back(current()); - res = resume(rVec[1]); - ASSERT_EQ(res, 0); - order.emplace_back(current()); - for (auto wId : wVec) { - res = resume(wId); - ASSERT_EQ(res, 0); - order.emplace_back(current()); - } - res = resume(rVec[0]); - ASSERT_TRUE(res < 0) << res; - order.emplace_back(current()); - - res = resume(rVec[1]); - ASSERT_TRUE(res < 0) << res; - order.emplace_back(current()); - - res = resume(rVec[2]); - ASSERT_EQ(res, 0); - order.emplace_back(current()); - - while (ch.size() < ch.capacity()) { - ch.push(0); - order.emplace_back(current()); - } - auto wId = create(wrFn); - ASSERT_TRUE(wId > 0) << size_t(wId); - order.emplace_back(current()); - res = resume(wId); - ASSERT_EQ(res, 0); - order.emplace_back(current()); - ch.close(); // We will get an unhandled exception in writing routine - // We will also get ASAN warning on exception, but this doesn't matter - order.emplace_back(current()); - ASSERT_EQ(ch.size(), ch.capacity()); - }; - auto coId1 = create(fn1); - ASSERT_TRUE(coId1 > 0) << size_t(coId1); - order.emplace_back(current()); - int res = resume(coId1); - ASSERT_EQ(res, 0); - order.emplace_back(current()); - - auto fn2 = [&order] { - order.emplace_back(current()); - channel ch(2); - auto coId = create([&order, &ch] { - order.emplace_back(current()); - auto coId = create([&order, &ch] { - order.emplace_back(current()); - auto coId = create([&order, &ch] { - order.emplace_back(current()); - auto coId = create([&order, &ch] { - order.emplace_back(current()); - auto coId = create([&order, &ch] { - order.emplace_back(current()); - auto dp = ch.pop(); - ASSERT_FALSE(dp.second); - order.emplace_back(current()); - }); - - ASSERT_TRUE(coId > 0) << size_t(coId); - order.emplace_back(current()); - int res = resume(coId); - ASSERT_EQ(res, 0); - order.emplace_back(current()); - }); - - ASSERT_TRUE(coId > 0) << size_t(coId); - order.emplace_back(current()); - int res = resume(coId); - ASSERT_EQ(res, 0); - order.emplace_back(current()); - }); - - ASSERT_TRUE(coId > 0) << size_t(coId); - order.emplace_back(current()); - int res = resume(coId); - ASSERT_EQ(res, 0); - order.emplace_back(current()); - }); - - ASSERT_TRUE(coId > 0) << coId; - order.emplace_back(current()); - int res = resume(coId); - ASSERT_EQ(res, 0); - order.emplace_back(current()); - }); - - ASSERT_TRUE(coId > 0) << size_t(coId); - order.emplace_back(current()); - int res = resume(coId); - ASSERT_EQ(res, 0); - order.emplace_back(current()); - ch.close(); - order.emplace_back(current()); - }; - auto coId2 = create(fn2); - ASSERT_TRUE(coId2 > 0) << size_t(coId2); - order.emplace_back(current()); - res = resume(coId2); - ASSERT_EQ(res, 0); - order.emplace_back(current()); - - res = resume(coId1); - ASSERT_EQ(res, 0); - order.emplace_back(current()); - - res = resume(coId1); - ASSERT_TRUE(res < 0) << res; - order.emplace_back(current()); - }; - - order.emplace_back(current()); - auto coId = create(testFn); - ASSERT_TRUE(coId > 0) << size_t(coId); - order.emplace_back(current()); - int res = resume(coId); - ASSERT_EQ(res, 0); - order.emplace_back(current()); - - if (order != kExpectedOrder) { - std::cerr << "Expected order is:\n{ "; - OutputVector(kExpectedOrder); - std::cerr << "Actual order is:\n{ "; - OutputVector(order); - ASSERT_TRUE(false); - } -} diff --git a/cpp_src/gtests/tests/unit/dsl_parser_test.cc b/cpp_src/gtests/tests/unit/dsl_parser_test.cc deleted file mode 100644 index 8ca312173..000000000 --- a/cpp_src/gtests/tests/unit/dsl_parser_test.cc +++ /dev/null @@ -1,104 +0,0 @@ -#include "join_selects_api.h" - -static void checkQueryDslParse(const reindexer::Query& q) { - const std::string dsl = q.GetJSON(); - Query parsedQuery; - Error err = parsedQuery.FromJSON(dsl); - ASSERT_TRUE(err.ok()) << err.what() << "\nDSL:\n" << dsl; - ASSERT_EQ(q, parsedQuery) << "DSL:\n" << dsl << "\nOriginal query:\n" << q.GetSQL() << "\nParsed query:\n" << parsedQuery.GetSQL(); -} - -TEST_F(JoinSelectsApi, JoinsDSLTest) { - Query queryGenres(genres_namespace); - Query queryAuthors(authors_namespace); - Query queryBooks{Query(books_namespace, 0, 10).Where(price, CondGe, 500)}; - queryBooks.OrInnerJoin(genreId_fk, genreid, CondEq, std::move(queryGenres)); - queryBooks.LeftJoin(authorid_fk, authorid, CondEq, std::move(queryAuthors)); - checkQueryDslParse(queryBooks); -} - -TEST_F(JoinSelectsApi, EqualPositionDSLTest) { - Query query = Query(default_namespace); - query.Where("f1", CondEq, 1).Where("f2", CondEq, 2).Or().Where("f3", CondEq, 2); - query.AddEqualPosition({"f1", "f2"}); - query.AddEqualPosition({"f1", "f3"}); - query.OpenBracket().Where("f4", CondEq, 4).Where("f5", CondLt, 10); - query.AddEqualPosition({"f4", "f5"}); - query.CloseBracket(); - checkQueryDslParse(query); -} - -TEST_F(JoinSelectsApi, MergedQueriesDSLTest) { - Query mainBooksQuery{Query(books_namespace, 0, 10).Where(price, CondGe, 500)}; - Query firstMergedQuery{Query(books_namespace, 10, 100).Where(pages, CondLe, 250)}; - Query secondMergedQuery{Query(books_namespace, 100, 50).Where(bookid, CondGe, 100)}; - - mainBooksQuery.Merge(std::move(firstMergedQuery)); - mainBooksQuery.Merge(std::move(secondMergedQuery)); - checkQueryDslParse(mainBooksQuery); -} - -TEST_F(JoinSelectsApi, AggregateFunctonsDSLTest) { - Query query{Query(books_namespace, 10, 100).Where(pages, CondGe, 150)}; - query.aggregations_.push_back({AggAvg, {price}}); - query.aggregations_.push_back({AggSum, {pages}}); - query.aggregations_.push_back({AggFacet, {title, pages}, {{{title, true}}}, 100, 10}); - checkQueryDslParse(query); -} - -TEST_F(JoinSelectsApi, SelectFilterDSLTest) { - Query query{Query(books_namespace, 10, 100).Where(pages, CondGe, 150).Select({price, pages, title})}; - checkQueryDslParse(query); -} - -TEST_F(JoinSelectsApi, SelectFilterInJoinDSLTest) { - Query queryBooks = Query(books_namespace, 0, 10).Select({price, title}); - { - Query queryAuthors = Query(authors_namespace).Select({authorid, age}); - - queryBooks.LeftJoin(authorid_fk, authorid, CondEq, std::move(queryAuthors)); - } - checkQueryDslParse(queryBooks); -} - -TEST_F(JoinSelectsApi, ReqTotalDSLTest) { - Query query{Query(books_namespace, 10, 100, ModeNoTotal).Where(pages, CondGe, 150)}; - checkQueryDslParse(query); - - query.CachedTotal(); - checkQueryDslParse(query); - - query.ReqTotal(); - checkQueryDslParse(query); -} - -TEST_F(JoinSelectsApi, SelectFunctionsDSLTest) { - Query query{Query(books_namespace, 10, 100).Where(pages, CondGe, 150)}; - query.AddFunction("f1()"); - query.AddFunction("f2()"); - query.AddFunction("f3()"); - checkQueryDslParse(query); -} - -TEST_F(JoinSelectsApi, CompositeValuesDSLTest) { - std::string pagesBookidIndex = pages + std::string("+") + bookid; - Query query{Query(books_namespace).WhereComposite(pagesBookidIndex.c_str(), CondGe, {{Variant(500), Variant(10)}})}; - checkQueryDslParse(query); -} - -TEST_F(JoinSelectsApi, GeneralDSLTest) { - Query queryGenres(genres_namespace); - Query queryAuthors(authors_namespace); - Query queryBooks{Query(books_namespace, 0, 10).Where(price, CondGe, 500)}; - Query innerJoinQuery = queryBooks.InnerJoin(authorid_fk, authorid, CondEq, std::move(queryAuthors)); - - Query testDslQuery = innerJoinQuery.OrInnerJoin(genreId_fk, genreid, CondEq, std::move(queryGenres)); - testDslQuery.Merge(std::move(queryBooks)); - testDslQuery.Merge(std::move(innerJoinQuery)); - testDslQuery.Select({genreid, bookid, authorid_fk}); - testDslQuery.AddFunction("f1()"); - testDslQuery.AddFunction("f2()"); - testDslQuery.aggregations_.push_back({AggDistinct, {bookid}}); - - checkQueryDslParse(testDslQuery); -} diff --git a/cpp_src/gtests/tests/unit/equalposition_tests.cc b/cpp_src/gtests/tests/unit/equalposition_tests.cc deleted file mode 100644 index 0a2138349..000000000 --- a/cpp_src/gtests/tests/unit/equalposition_tests.cc +++ /dev/null @@ -1,324 +0,0 @@ -#include "equalpositionapi.h" - -using QueryResults = ReindexerApi::QueryResults; -using Item = ReindexerApi::Item; -using Reindexer = ReindexerApi::Reindexer; - -bool Compare(const Variant& key1, const Variant& key2, CondType condType) { - int res = key1.Compare(key2); - switch (condType) { - case CondEq: - return res == 0; - case CondGe: - return res >= 0; - case CondGt: - return res > 0; - case CondLe: - return res <= 0; - case CondLt: - return res < 0; - case CondAny: - case CondRange: - case CondSet: - case CondAllSet: - case CondEmpty: - case CondLike: - case CondDWithin: - throw std::runtime_error("Do not support this operation yet!"); - } - return false; -} - -void VerifyQueryResult(const QueryResults& qr, const std::vector& fields, const std::vector& keys, - const std::vector& condTypes) { - EXPECT_TRUE(fields.size() == keys.size()); - EXPECT_TRUE(keys.size() == condTypes.size()); - size_t totalFound = 0; - for (auto& iter : qr) { - size_t len = INT_MAX; - Item it = iter.GetItem(false); - - std::vector vals(keys.size()); - for (size_t j = 0; j < fields.size(); ++j) { - VariantArray v = it[fields[j]]; - vals[j] = v; - len = std::min(static_cast(vals[j].size()), len); - } - size_t j = 0; - auto eof = [&j, &len]() { return j >= len; }; - bool equal = true; - for (;;) { - size_t key = 0; - while ((j < len) && !Compare(vals[key][j], keys[key], condTypes[key])) ++j; - if (eof()) break; - equal = true; - while (++key < keys.size()) { - equal &= Compare(vals[key][j], keys[key], condTypes[key]); - if (!equal) { - break; - } - } - if (equal) { - ++totalFound; - break; - } - ++j; - } - if (!equal) TEST_COUT << it.GetJSON() << std::endl; - } - EXPECT_TRUE(totalFound == qr.Count()) << " totalFound=" << totalFound << ", qr.Count()=" << qr.Count(); -} - -TEST_F(EqualPositionApi, SelectGt) { - QueryResults qr; - const Variant key1(static_cast(1050)); - const Variant key2(static_cast(2100)); - Query q{Query(default_namespace).Debug(LogTrace).Where(kFieldA1, CondGt, key1).Where(kFieldA2, CondGt, key2)}; - q.AddEqualPosition({kFieldA1, kFieldA2}); - Error err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - VerifyQueryResult(qr, {kFieldA1, kFieldA2}, {key1, key2}, {CondGt, CondGt}); -} - -TEST_F(EqualPositionApi, SelectGt2) { - QueryResults qr; - const Variant key1(static_cast(1120)); - const Variant key2(static_cast(2240)); - Query q{Query(default_namespace).Debug(LogTrace).Where(kFieldA1, CondGt, key1).Where(kFieldA2, CondGt, key2)}; - q.AddEqualPosition({kFieldA1, kFieldA2}); - Error err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - VerifyQueryResult(qr, {kFieldA1, kFieldA2}, {key1, key2}, {CondGt, CondGt}); -} - -TEST_F(EqualPositionApi, SelectGe) { - QueryResults qr; - const Variant key1(static_cast(1120)); - const Variant key2(static_cast(2240)); - Query q{Query(default_namespace).Debug(LogTrace).Where(kFieldA1, CondGe, key1).Where(kFieldA2, CondGe, key2)}; - q.AddEqualPosition({kFieldA1, kFieldA2}); - Error err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - VerifyQueryResult(qr, {kFieldA1, kFieldA2}, {key1, key2}, {CondGe, CondGe}); -} - -TEST_F(EqualPositionApi, SelectGe2) { - QueryResults qr; - const Variant key1(static_cast(0)); - const Variant key2(static_cast(0)); - Query q{Query(default_namespace).Debug(LogTrace).Where(kFieldA1, CondGe, key1).Where(kFieldA2, CondGe, key2)}; - q.AddEqualPosition({kFieldA1, kFieldA2}); - Error err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - VerifyQueryResult(qr, {kFieldA1, kFieldA2}, {key1, key2}, {CondGe, CondGe}); -} - -TEST_F(EqualPositionApi, SelectLt) { - QueryResults qr; - const Variant key1(static_cast(400)); - const Variant key2(static_cast(800)); - Query q{Query(default_namespace).Debug(LogTrace).Where(kFieldA1, CondLt, key1).Where(kFieldA2, CondLt, key2)}; - q.AddEqualPosition({kFieldA1, kFieldA2}); - Error err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - VerifyQueryResult(qr, {kFieldA1, kFieldA2}, {key1, key2}, {CondLt, CondLt}); -} - -TEST_F(EqualPositionApi, SelectEq) { - QueryResults qr; - const Variant key1(static_cast(900)); - const Variant key2(static_cast(1800)); - const Variant key3(static_cast(2700)); - Query q{ - Query(default_namespace).Debug(LogTrace).Where(kFieldA1, CondEq, key1).Where(kFieldA2, CondEq, key2).Where(kFieldA3, CondEq, key3)}; - q.AddEqualPosition({kFieldA1, kFieldA2, kFieldA3}); - Error err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - VerifyQueryResult(qr, {kFieldA1, kFieldA2, kFieldA3}, {key1, key2, key3}, {CondEq, CondEq, CondEq}); -} - -TEST_F(EqualPositionApi, SelectNonIndexedArrays) { - const char* ns = "ns2"; - Error err = rt.reindexer->OpenNamespace(ns, StorageOpts().Enabled(false)); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(ns, {"id", "hash", "string", IndexOpts().PK()}); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Commit(ns); - EXPECT_TRUE(err.ok()) << err.what(); - - const char jsonPattern[] = R"xxx({"id": "%s", "nested": {"a1": [%d, %d, %d], "a2": [%d, %d, %d], "a3": [%d, %d, %d]}})xxx"; - - for (int i = 0; i < 100; ++i) { - Item item = rt.reindexer->NewItem(ns); - EXPECT_TRUE(item.Status().ok()) << item.Status().what(); - - char json[1024]; - std::string pk("pk" + std::to_string(i)); - snprintf(json, sizeof(json) - 1, jsonPattern, pk.c_str(), rand() % 10, rand() % 10, rand() % 10, rand() % 10, rand() % 10, - rand() % 10, rand() % 10, rand() % 10, rand() % 10); - - err = item.FromJSON(json); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Upsert(ns, item); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Commit(ns); - EXPECT_TRUE(err.ok()) << err.what(); - } - - QueryResults qr; - const Variant key1(static_cast(3)); - const Variant key2(static_cast(4)); - Query q{Query(ns).Debug(LogTrace).Where("nested.a2", CondGe, key1).Where("nested.a3", CondGe, key2)}; - q.AddEqualPosition({"nested.a2", "nested.a3"}); - err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - VerifyQueryResult(qr, {"nested.a2", "nested.a3"}, {key1, key2}, {CondGe, CondGe}); -} - -TEST_F(EqualPositionApi, SelectMixedArrays) { - const char* ns = "ns2"; - Error err = rt.reindexer->OpenNamespace(ns, StorageOpts().Enabled(false)); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(ns, {"id", "hash", "string", IndexOpts().PK()}); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(ns, {"a1", "hash", "int64", IndexOpts().Array()}); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Commit(ns); - EXPECT_TRUE(err.ok()) << err.what(); - - const char jsonPattern[] = R"xxx({"id": "%s", "a1": [%d, %d, %d], "nested": {"a2": [%d, %d, %d], "a3": [%d, %d, %d]}})xxx"; - - for (int i = 0; i < 100; ++i) { - Item item = rt.reindexer->NewItem(ns); - EXPECT_TRUE(item.Status().ok()) << item.Status().what(); - - char json[1024]; - std::string pk("pk" + std::to_string(i)); - snprintf(json, sizeof(json) - 1, jsonPattern, pk.c_str(), rand() % 10, rand() % 10, rand() % 10, rand() % 10, rand() % 10, - rand() % 10, rand() % 10, rand() % 10, rand() % 10); - - err = item.FromJSON(json); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Upsert(ns, item); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Commit(ns); - EXPECT_TRUE(err.ok()) << err.what(); - } - - QueryResults qr; - const Variant key1(static_cast(4)); - const Variant key2(static_cast(5)); - Query q{Query(ns).Debug(LogTrace).Where("a1", CondGe, key1).Where("nested.a2", CondGe, key2)}; - q.AddEqualPosition({"a1", "nested.a2"}); - err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - VerifyQueryResult(qr, {"a1", "nested.a2"}, {key1, key2}, {CondGe, CondGe}); -} - -TEST_F(EqualPositionApi, EmptyCompOpErr) { - const char* ns = "ns2"; - Error err = rt.reindexer->OpenNamespace(ns, StorageOpts().Enabled(false)); - EXPECT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(ns, {"id", "hash", "int", IndexOpts().PK()}); - EXPECT_TRUE(err.ok()) << err.what(); - const char jsonPattern[] = R"xxx({"id": %d, "a1": [10, 20, 30], "a2": [20, 30, 40]}})xxx"; - for (int i = 0; i < 10; ++i) { - Item item = rt.reindexer->NewItem(ns); - EXPECT_TRUE(item.Status().ok()) << item.Status().what(); - - char json[1024]; - std::string pk("pk" + std::to_string(i)); - - snprintf(json, sizeof(json) - 1, jsonPattern, i); - - err = item.FromJSON(json); - EXPECT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Upsert(ns, item); - EXPECT_TRUE(err.ok()) << err.what(); - } - { - QueryResults qr; - Query q = Query::FromSQL("SELECT * FROM ns2 WHERE a1=10 AND a2=20 equal_position(a1, a2)"); - err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - } - { - QueryResults qr; - Query q = Query::FromSQL("SELECT * FROM ns2 WHERE a1 IS NULL AND a2=20 equal_position(a1, a2)"); - err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.what() == "Condition IN(with empty parameter list), IS NULL, IS EMPTY not allowed for equal position!") - << err.what(); - EXPECT_FALSE(err.ok()); - } - { - QueryResults qr; - Query q = Query::FromSQL("SELECT * FROM ns2 WHERE a1 =10 AND a2 IS EMPTY equal_position(a1, a2)"); - err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.what() == "Condition IN(with empty parameter list), IS NULL, IS EMPTY not allowed for equal position!") - << err.what(); - EXPECT_FALSE(err.ok()); - } - { - QueryResults qr; - Query q = Query::FromSQL("SELECT * FROM ns2 WHERE a1 IN () AND a2 IS EMPTY equal_position(a1, a2)"); - err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.what() == "Condition IN(with empty parameter list), IS NULL, IS EMPTY not allowed for equal position!") - << err.what(); - EXPECT_FALSE(err.ok()); - } -} - -// Make sure equal_position() works only with unique fields -TEST_F(EqualPositionApi, SamePosition) { - QueryResults qr; - const Variant key(static_cast(1050)); - // Build query that contains conditions for field 'a1' - Query q{Query(default_namespace).Debug(LogTrace).Where(kFieldA1, CondGt, key).Where(kFieldA1, CondGt, key)}; - // query contains equal_position() for field 'a1' twice - q.AddEqualPosition({kFieldA1, kFieldA1}); - // Make sure processing this query leads to error - const Error err = rt.reindexer->Select(q, qr); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "equal positions fields should be unique: [a1, a1]"); -} - -// Make sure equal_position() works only with unique fields -// when it set by SQL query -TEST_F(EqualPositionApi, SamePositionFromSql) { - QueryResults qr; - // SQL query contains equal_position() for field 'a1' twice - const std::string_view sql = "select * from test_namespace where a1 > 0 and a1 < 10 equal_position(a1, a1)"; - Query q = Query::FromSQL(sql); - // Make sure processing this query leads to error - const Error err = rt.reindexer->Select(q, qr); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "equal positions fields should be unique: [a1, a1]"); -} - -TEST_F(EqualPositionApi, SelectBrackets) { - QueryResults qr; - const Variant key1(static_cast(900)); - const Variant key2(static_cast(1800)); - const Variant key3(static_cast(2700)); - Query q = Query(default_namespace) - .Debug(LogTrace) - .OpenBracket() - .Where(kFieldA1, CondEq, key1) - .Where(kFieldA2, CondEq, key2) - .Where(kFieldA3, CondEq, key3) - .AddEqualPosition({kFieldA1, kFieldA2, kFieldA3}) - .CloseBracket(); - Error err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - VerifyQueryResult(qr, {kFieldA1, kFieldA2, kFieldA3}, {key1, key2, key3}, {CondEq, CondEq, CondEq}); -} diff --git a/cpp_src/gtests/tests/unit/ft/ft_dsl.cc b/cpp_src/gtests/tests/unit/ft/ft_dsl.cc deleted file mode 100644 index 97095b48a..000000000 --- a/cpp_src/gtests/tests/unit/ft/ft_dsl.cc +++ /dev/null @@ -1,168 +0,0 @@ -#include -#include "core/ft/ftdsl.h" -#include "ft_api.h" - -using namespace std::string_view_literals; - -class FTDSLParserApi : public FTApi { -protected: - std::string_view GetDefaultNamespace() noexcept override { return "ft_dsl_default_namespace"; } - - template - bool AreFloatingValuesEqual(T a, T b) { - return std::abs(a - b) < std::numeric_limits::epsilon(); - } -}; - -TEST_P(FTDSLParserApi, MatchSymbolTest) { - FTDSLQueryParams params; - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - ftdsl.parse("*search*this*"); - EXPECT_TRUE(ftdsl.size() == 2); - EXPECT_TRUE(ftdsl[0].opts.suff); - EXPECT_TRUE(ftdsl[0].opts.pref); - EXPECT_TRUE(ftdsl[0].pattern == L"search"); - EXPECT_TRUE(!ftdsl[1].opts.suff); - EXPECT_TRUE(ftdsl[1].opts.pref); - EXPECT_TRUE(ftdsl[1].pattern == L"this"); -} - -TEST_P(FTDSLParserApi, MisspellingTest) { - FTDSLQueryParams params; - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - ftdsl.parse("black~ -white"); - EXPECT_TRUE(ftdsl.size() == 2); - EXPECT_TRUE(ftdsl[0].opts.typos); - EXPECT_TRUE(ftdsl[0].pattern == L"black"); - EXPECT_TRUE(!ftdsl[1].opts.typos); - EXPECT_TRUE(ftdsl[1].opts.op == OpNot); - EXPECT_TRUE(ftdsl[1].pattern == L"white"); -} - -TEST_P(FTDSLParserApi, FieldsPartOfRequest) { - FTDSLQueryParams params; - params.fields = {{"name", 0}, {"title", 1}}; - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - ftdsl.parse("@name^1.5,+title^0.5 rush"); - EXPECT_EQ(ftdsl.size(), 1); - EXPECT_EQ(ftdsl[0].pattern, L"rush"); - EXPECT_EQ(ftdsl[0].opts.fieldsOpts.size(), 2); - EXPECT_TRUE(AreFloatingValuesEqual(ftdsl[0].opts.fieldsOpts[0].boost, 1.5f)); - EXPECT_FALSE(ftdsl[0].opts.fieldsOpts[0].needSumRank); - EXPECT_TRUE(AreFloatingValuesEqual(ftdsl[0].opts.fieldsOpts[1].boost, 0.5f)); - EXPECT_TRUE(ftdsl[0].opts.fieldsOpts[1].needSumRank); -} - -TEST_P(FTDSLParserApi, TermRelevancyBoostTest) { - FTDSLQueryParams params; - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - ftdsl.parse("+mongodb^0.5 +arangodb^0.25 +reindexer^2.5"); - EXPECT_TRUE(ftdsl.size() == 3); - EXPECT_TRUE(ftdsl[0].pattern == L"mongodb"); - EXPECT_TRUE(AreFloatingValuesEqual(ftdsl[0].opts.boost, 0.5f)); - EXPECT_TRUE(ftdsl[1].pattern == L"arangodb"); - EXPECT_TRUE(AreFloatingValuesEqual(ftdsl[1].opts.boost, 0.25f)); - EXPECT_TRUE(ftdsl[2].pattern == L"reindexer"); - EXPECT_TRUE(AreFloatingValuesEqual(ftdsl[2].opts.boost, 2.5f)); -} - -TEST_P(FTDSLParserApi, WrongRelevancyTest) { - FTDSLQueryParams params; - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - EXPECT_THROW(ftdsl.parse("+wrong +boost^X"), reindexer::Error); -} - -TEST_P(FTDSLParserApi, DistanceTest) { - FTDSLQueryParams params; - - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - ftdsl.parse("'long nose'~3"); - EXPECT_TRUE(ftdsl.size() == 2); - EXPECT_TRUE(ftdsl[0].pattern == L"long"); - EXPECT_TRUE(ftdsl[1].pattern == L"nose"); - EXPECT_TRUE(ftdsl[0].opts.distance == INT_MAX); - EXPECT_TRUE(ftdsl[1].opts.distance == 3); -} - -TEST_P(FTDSLParserApi, WrongDistanceTest) { - FTDSLQueryParams params; - { - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - EXPECT_THROW(ftdsl.parse("'this is a wrong distance'~X"), reindexer::Error); - } - { - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - EXPECT_THROW(ftdsl.parse("'long nose'~-1"), reindexer::Error); - } - { - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - EXPECT_THROW(ftdsl.parse("'long nose'~0"), reindexer::Error); - } - { - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - EXPECT_THROW(ftdsl.parse("'long nose'~2.89"), reindexer::Error); - } -} - -TEST_P(FTDSLParserApi, NoClosingQuoteTest) { - FTDSLQueryParams params; - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - EXPECT_THROW(ftdsl.parse("\"forgot to close this quote"), reindexer::Error); -} - -TEST_P(FTDSLParserApi, WrongFieldNameTest) { - FTDSLQueryParams params; - params.fields = {{"id", 0}, {"fk_id", 1}, {"location", 2}}; - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - EXPECT_THROW(ftdsl.parse("@name,text,desc Thrones"), reindexer::Error); -} - -TEST_P(FTDSLParserApi, BinaryOperatorsTest) { - FTDSLQueryParams params; - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - ftdsl.parse("+Jack -John +Joe"); - EXPECT_TRUE(ftdsl.size() == 3); - EXPECT_TRUE(ftdsl[0].opts.op == OpAnd); - EXPECT_TRUE(ftdsl[0].pattern == L"jack"); - EXPECT_TRUE(ftdsl[1].opts.op == OpNot); - EXPECT_TRUE(ftdsl[1].pattern == L"john"); - EXPECT_TRUE(ftdsl[2].opts.op == OpAnd); - EXPECT_TRUE(ftdsl[2].pattern == L"joe"); -} - -TEST_P(FTDSLParserApi, EscapingCharacterTest) { - FTDSLQueryParams params; - params.extraWordSymbols = "+-\\"; - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - ftdsl.parse("\\-hell \\+well \\+bell"); - EXPECT_TRUE(ftdsl.size() == 3) << ftdsl.size(); - EXPECT_TRUE(ftdsl[0].opts.op == OpOr); - EXPECT_TRUE(ftdsl[0].pattern == L"-hell"); - EXPECT_TRUE(ftdsl[1].opts.op == OpOr); - EXPECT_TRUE(ftdsl[1].pattern == L"+well"); - EXPECT_TRUE(ftdsl[2].opts.op == OpOr); - EXPECT_TRUE(ftdsl[2].pattern == L"+bell"); -} - -TEST_P(FTDSLParserApi, ExactMatchTest) { - FTDSLQueryParams params; - reindexer::FtDSLQuery ftdsl(params.fields, params.stopWords, params.extraWordSymbols); - ftdsl.parse("=moskva77"); - EXPECT_TRUE(ftdsl.size() == 1); - EXPECT_TRUE(ftdsl[0].opts.exact); - EXPECT_TRUE(ftdsl[0].pattern == L"moskva77"); -} - -INSTANTIATE_TEST_SUITE_P(, FTDSLParserApi, - ::testing::Values(reindexer::FtFastConfig::Optimization::Memory, reindexer::FtFastConfig::Optimization::CPU), - [](const auto& info) { - switch (info.param) { - case reindexer::FtFastConfig::Optimization::Memory: - return "OptimizationByMemory"; - case reindexer::FtFastConfig::Optimization::CPU: - return "OptimizationByCPU"; - default: - assert(false); - std::abort(); - } - }); diff --git a/cpp_src/gtests/tests/unit/ft/ft_generic.cc b/cpp_src/gtests/tests/unit/ft/ft_generic.cc deleted file mode 100644 index b10f5b871..000000000 --- a/cpp_src/gtests/tests/unit/ft/ft_generic.cc +++ /dev/null @@ -1,1580 +0,0 @@ -#include -#include -#include "core/cjson/jsonbuilder.h" -#include "ft_api.h" -#include "tools/logger.h" -#include "yaml-cpp/yaml.h" - -using namespace std::string_view_literals; -using reindexer::fast_hash_map; -using reindexer::Query; - -class FTGenericApi : public FTApi { -protected: - std::string_view GetDefaultNamespace() noexcept override { return "ft_generic_default_namespace"; } - - void CreateAndFillSimpleNs(const std::string& ns, int from, int to, fast_hash_map* outItems) { - assertrx(from <= to); - std::vector items; - items.reserve(to - from); - auto err = rt.reindexer->OpenNamespace(ns); - ASSERT_TRUE(err.ok()) << err.what(); - rt.DefineNamespaceDataset( - ns, {IndexDeclaration{"id", "hash", "int", IndexOpts().PK(), 0}, IndexDeclaration{"data", "hash", "string", IndexOpts(), 0}}); - reindexer::WrSerializer ser; - for (int i = from; i < to; ++i) { - ser.Reset(); - reindexer::JsonBuilder jb(ser); - jb.Put("id", i); - jb.Put("data", rt.RandString()); - jb.End(); - auto item = rt.NewItem(ns); - if (outItems) { - (*outItems)[i] = ser.Slice(); - } - err = item.FromJSON(ser.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - rt.Upsert(ns, item); - } - } -}; - -TEST_P(FTGenericApi, CompositeSelect) { - Init(GetDefaultConfig(), NS1 | NS2); - Add("An entity is something|"sv, "| that in exists entity as itself"sv, NS1 | NS2); - Add("In law, a legal entity is|"sv, "|an entity that is capable of something bearing legal rights"sv, NS1 | NS2); - Add("In politics, entity is used as|"sv, "| term for entity territorial divisions of some countries"sv, NS1 | NS2); - - for (const auto& query : CreateAllPermutatedQueries("", {"*entity", "somethin*"}, "")) { - auto res = SimpleCompositeSelect(query); - std::unordered_set data{"An entity is something|"sv, - "| that in exists entity as itself"sv, - "An entity is something|d"sv, - "| that in exists entity as itself"sv, - "In law, a legal entity is|"sv, - "|an entity that is capable of something bearing legal rights"sv, - "al entity id"sv, - "|an entity that is capable of something bearing legal rights"sv, - "In politics, entity is used as|"sv, - "| term for entity territorial divisions of some countries"sv, - "s, entity id"sv, - "| term for entity territorial divisions of some countries"sv}; - - rt.PrintQueryResults("nm1", res); - for (auto it : res) { - auto ritem(it.GetItem(false)); - for (auto idx = 1; idx < ritem.NumFields(); idx++) { - auto field = ritem[idx].Name(); - if (field == "id") continue; - auto it = data.find(ritem[field].As()); - ASSERT_TRUE(it != data.end()); - data.erase(it); - } - } - EXPECT_TRUE(data.empty()); - } -} - -TEST_P(FTGenericApi, CompositeSelectWithFields) { - Init(GetDefaultConfig(), NS1 | NS2); - AddInBothFields("An entity is something|"sv, "| that in exists entity as itself"sv, NS1 | NS2); - AddInBothFields("In law, a legal entity is|"sv, "|an entity that is capable of something bearing legal rights"sv, NS1 | NS2); - AddInBothFields("In politics, entity is used as|"sv, "| term for entity territorial divisions of some countries"sv, NS1 | NS2); - - for (const auto& query : CreateAllPermutatedQueries("", {"*entity", "somethin*"}, "")) { - for (const char* field : {"ft1", "ft2"}) { - auto res = CompositeSelectField(field, query); - std::unordered_set data{"An entity is something|"sv, - "An entity is something|d"sv, - "| that in exists entity as itself"sv, - "In law, a legal entity is|"sv, - "|an entity that is capable of something bearing legal rights"sv, - "an entity tdof something bd"sv, - "al entity id"sv, - "In politics, entity is used as|"sv, - "| term for entity territorial divisions of some countries"sv, - "ts entity ad"sv, - "s, entity id"sv, - "or entity td"sv}; - - rt.PrintQueryResults("nm1", res); - for (auto it : res) { - auto ritem(it.GetItem(false)); - for (auto idx = 1; idx < ritem.NumFields(); idx++) { - auto curField = ritem[idx].Name(); - if (curField != field) continue; - auto it = data.find(ritem[curField].As()); - ASSERT_TRUE(it != data.end()); - data.erase(it); - } - } - EXPECT_TRUE(data.empty()); - } - } -} - -TEST_P(FTGenericApi, MergeWithSameNSAndSelectFunctions) { - Init(GetDefaultConfig()); - AddInBothFields("An entity is something|"sv, "| that in exists entity as itself"sv); - AddInBothFields("In law, a legal entity is|"sv, "|an entity that is capable of something bearing legal rights"sv); - AddInBothFields("In politics, entity is used as|"sv, "| term for entity territorial divisions of some countries"sv); - - for (const auto& query : CreateAllPermutatedQueries("", {"*entity", "somethin*"}, "")) { - for (const auto& field : {std::string("ft1"), std::string("ft2")}) { - auto dsl = std::string("@").append(field).append(" ").append(query); - auto qr{reindexer::Query("nm1").Where("ft3", CondEq, dsl)}; - reindexer::QueryResults res; - auto mqr{reindexer::Query("nm1").Where("ft3", CondEq, std::move(dsl))}; - mqr.AddFunction(field + " = snippet(,\"\",3,2,,d)"); - - qr.Merge(std::move(mqr)); - qr.AddFunction(field + " = highlight(,)"); - auto err = rt.reindexer->Select(qr, res); - EXPECT_TRUE(err.ok()) << err.what(); - - std::unordered_set data{"An entity is something|"sv, - "An entity is something|d"sv, - "| that in exists entity as itself"sv, - "In law, a legal entity is|"sv, - "|an entity that is capable of something bearing legal rights"sv, - "an entity tdof something bd"sv, - "al entity id"sv, - "In politics, entity is used as|"sv, - "| term for entity territorial divisions of some countries"sv, - "ts entity ad"sv, - "s, entity id"sv, - "or entity td"sv}; - - rt.PrintQueryResults("nm1", res); - for (auto it : res) { - auto ritem(it.GetItem(false)); - for (auto idx = 1; idx < ritem.NumFields(); idx++) { - auto curField = ritem[idx].Name(); - if (curField != field) continue; - auto it = data.find(ritem[curField].As()); - ASSERT_TRUE(it != data.end()); - data.erase(it); - } - } - EXPECT_TRUE(data.empty()); - } - } -} - -TEST_P(FTGenericApi, SelectWithPlus) { - Init(GetDefaultConfig()); - - Add("added three words"sv); - Add("added something else"sv); - - CheckAllPermutations("", {"+added"}, "", {{"!added! something else", ""}, {"!added! three words", ""}}); -} - -TEST_P(FTGenericApi, SelectWithPlusWithSingleAlternative) { - auto cfg = GetDefaultConfig(); - cfg.enableKbLayout = false; - cfg.enableTranslit = false; - Init(cfg); - - Add("мониторы"sv); - - // FT search by single mandatory word with single alternative - CheckAllPermutations("", {"+монитор*"}, "", {{"!мониторы!", ""}}); -} - -TEST_P(FTGenericApi, SelectWithMinus) { - Init(GetDefaultConfig()); - - Add("including me, excluding you"sv); - Add("including all of them"sv); - - CheckAllPermutations("", {"+including", "-excluding"}, "", {{"!including! all of them", ""}}); - CheckAllPermutations("", {"including", "-excluding"}, "", {{"!including! all of them", ""}}); -} - -TEST_P(FTGenericApi, SelectWithFieldsList) { - Init(GetDefaultConfig()); - - Add("nm1"sv, "Never watch their games"sv, "Because nothing can be worse than Spartak Moscow"sv); - Add("nm1"sv, "Spartak Moscow is the worst team right now"sv, "Yes, for sure"sv); - - CheckAllPermutations("@ft1 ", {"Spartak", "Moscow"}, "", {{"!Spartak Moscow! is the worst team right now", "Yes, for sure"}}); -} - -TEST_P(FTGenericApi, SelectWithRelevanceBoost) { - Init(GetDefaultConfig()); - - Add("She was a very bad girl"sv); - Add("All the naughty kids go to hell, not to heaven"sv); - Add("I've never seen a man as cruel as him"sv); - - CheckAllPermutations("@ft1 ", {"girl^2", "kids", "cruel^3"}, "", - {{"I've never seen a man as !cruel! as him", ""}, - {"She was a very bad !girl!", ""}, - {"All the naughty !kids! go to hell, not to heaven", ""}}, - true); -} - -TEST_P(FTGenericApi, SelectWithDistance) { - Init(GetDefaultConfig()); - - Add("Her nose was very very long"sv); - Add("Her nose was exceptionally long"sv); - Add("Her nose was long"sv); - - CheckResults("'nose long'~3", {{"Her !nose was long!", ""}, {"Her !nose was exceptionally long!", ""}}, true); -} - -TEST_P(FTGenericApi, AreasOnSuffix) { - auto ftCfg = GetDefaultConfig(); - Init(ftCfg); - - Add("the nos1 the nos2 the nosmn the nose"sv); - Add("the ssmask the nnmask the mask the "sv); - Add("the sslevel1 the nnlevel2 the kklevel the level"sv); - Add("the nos1 the mmask stop nos2 table"sv); - Add("Маша ела кашу. Каша кушалась сама. Маша кашляла."sv); - - CheckResults("каш*", {{"Маша ела !кашу. Каша! кушалась сама. Маша !кашляла!.", ""}}, false); - CheckResults("nos*", {{"the !nos1! the !nos2! the !nosmn! the !nose!", ""}, {"the !nos1! the mmask stop !nos2! table", ""}}, false); - CheckResults("*mask", {{"the !ssmask! the !nnmask! the !mask! the ", ""}, {"the nos1 the !mmask! stop nos2 table", ""}}, false); - CheckResults("*level*", {{"the !sslevel1! the !nnlevel2! the !kklevel! the !level!", ""}}, false); - CheckResults("+nos* +*mask ", {{"the !nos1! the !mmask! stop !nos2! table", ""}}, false); -} - -TEST_P(FTGenericApi, AreasMaxRank) { - auto ftCfg = GetDefaultConfig(); - ftCfg.maxAreasInDoc = 3; - Init(ftCfg); - // the longer the word, the greater its rank - Add("empty bb empty ccc empty dddd empty eeeee empty ffffff empty gggggggg empty hhhhhhhhh empty iiiiiiiiii empty jjjjjjjjjjj empty kkkkkkkkkkkk empty lllllllllllll"sv); - Add("empty lllllllllllll empty ccc empty dddd empty eeeee empty ffffff empty gggggggg empty hhhhhhhhh empty iiiiiiiiii empty jjjjjjjjjjj empty kkkkkkkkkkkk empty bb"sv); - // clang-format off - CheckResults("bb ccc dddd eeeee ffffff gggggggg hhhhhhhhh iiiiiiiiii jjjjjjjjjjj kkkkkkkkkkkk lllllllllllll", - { - {"empty bb empty ccc empty dddd empty eeeee empty ffffff empty gggggggg empty hhhhhhhhh empty iiiiiiiiii empty !jjjjjjjjjjj! empty !kkkkkkkkkkkk! empty !lllllllllllll!", ""}, - {"empty !lllllllllllll! empty ccc empty dddd empty eeeee empty ffffff empty gggggggg empty hhhhhhhhh empty iiiiiiiiii empty !jjjjjjjjjjj! empty !kkkkkkkkkkkk! empty bb", ""} - }, - false); - CheckResults("lllllllllllll bb ccc dddd eeeee ffffff gggggggg hhhhhhhhh iiiiiiiiii jjjjjjjjjjj kkkkkkkkkkkk", - { - {"empty !bb! empty !ccc! empty dddd empty eeeee empty ffffff empty gggggggg empty hhhhhhhhh empty iiiiiiiiii empty jjjjjjjjjjj empty kkkkkkkkkkkk empty !lllllllllllll!", ""}, - {"empty !lllllllllllll! empty !ccc! empty dddd empty eeeee empty ffffff empty gggggggg empty hhhhhhhhh empty iiiiiiiiii empty jjjjjjjjjjj empty kkkkkkkkkkkk empty !bb!", ""} - }, - false); - CheckResults("bb ccc lllllllllllll dddd eeeee ffffff gggggggg hhhhhhhhh iiiiiiiiii jjjjjjjjjjj kkkkkkkkkkkk", - { - {"empty !bb! empty !ccc! empty dddd empty eeeee empty ffffff empty gggggggg empty hhhhhhhhh empty iiiiiiiiii empty jjjjjjjjjjj empty kkkkkkkkkkkk empty !lllllllllllll!", ""}, - {"empty !lllllllllllll! empty !ccc! empty dddd empty eeeee empty ffffff empty gggggggg empty hhhhhhhhh empty iiiiiiiiii empty jjjjjjjjjjj empty kkkkkkkkkkkk empty !bb!", ""} - }, - false); - CheckResults("lllllllllllll jjjjjjjjjjj kkkkkkkkkkkk bb ccc dddd eeeee ffffff gggggggg hhhhhhhhh iiiiiiiiii", - { - {"empty bb empty ccc empty dddd empty eeeee empty ffffff empty gggggggg empty hhhhhhhhh empty iiiiiiiiii empty !jjjjjjjjjjj! empty !kkkkkkkkkkkk! empty !lllllllllllll!", ""}, - {"empty !lllllllllllll! empty ccc empty dddd empty eeeee empty ffffff empty gggggggg empty hhhhhhhhh empty iiiiiiiiii empty !jjjjjjjjjjj! empty !kkkkkkkkkkkk! empty bb", ""} - }, - false); - - // clang-format on -} - -TEST_P(FTGenericApi, SelectWithDistance2) { - auto check = [&](bool withHighlight) { - { - std::vector> expectedResultsH = { - {"!one two!", ""}, {"!one ецщ!", ""}, {"empty !one two!", ""}, {"empty !one two! word", ""}}; - CheckResults(R"s("one two")s", withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, withHighlight); - } - { - std::vector> expectedResultsH = { - {"!one two!", ""}, {"!one ецщ!", ""}, {"empty !one two!", ""}, {"empty !one two! word", ""}}; - CheckResults(R"s("one two"~1)s", withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, withHighlight); - } - { - std::vector> expectedResultsH = {{"!one two!", ""}, - {"!one ецщ!", ""}, - {"empty !one two!", ""}, - {"!one empty two!", ""}, - {"empty !one two! word", ""}, - {"word !one empty two!", ""}, - {"word !one empty empty two! word", ""}}; - CheckResults(R"s(+"one two"~3)s", withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, withHighlight); - } - { - std::vector> expectedResultsH = {{"!one two!", ""}, - {"!one ецщ!", ""}, - {"empty !one two!", ""}, - {"!one empty two!", ""}, - {"empty !one two! word", ""}, - {"word !one empty two!", ""}, - {"word !one empty empty two! word", ""}}; - CheckResults(R"s("one two"~3)s", withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, withHighlight); - } - { - std::vector> expectedResultsH = { - {"!one two!", ""}, {"!one ецщ!", ""}, {"!empty one two!", ""}, {"!empty one two! word", ""}}; - CheckAllPermutations("", {"empty", R"s(+"one two")s"}, "", - withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, " ", withHighlight); - } - { - std::vector> expectedResultsH = {{"!empty one two!", ""}, {"!empty one two! word", ""}}; - CheckAllPermutations("", {"+empty", R"s(+"one two")s"}, "", - withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, " ", withHighlight); - } - { - std::vector> expectedResultsH = {{"!empty!", ""}, - {"!one two!", ""}, - {"!one ецщ!", ""}, - {"!empty one two!", ""}, - {"!empty one two! word", ""}, - {"one !empty! two", ""}, - {"word one !empty empty! two word", ""}, - {"word one !empty empty empty! two word", ""}, - {"word one !empty! two", ""}, - {"word one !empty empty empty! two two word", ""}, - {"word one one !empty empty empty! two word", ""}}; - CheckAllPermutations("", {"empty", R"s("one two")s"}, "", withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), - false, " ", withHighlight); - } - { - std::vector> expectedResultsH = {{"!empty!", ""}, - {"one !empty! two", ""}, - {"word one !empty! two", ""}, - {"word one !empty empty! two word", ""}, - {"word one !empty empty empty! two word", ""}, - {"word one !empty empty empty! two two word", ""}, - {"word one one !empty empty empty! two word", ""}}; - CheckAllPermutations("", {"empty", R"s(-"one two")s"}, "", - withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, " ", withHighlight); - } - { - std::vector> expectedResultsH = {{"!empty!", ""}, - {"one !empty! two", ""}, - {"word one !empty! two", ""}, - {"word one !empty empty! two word", ""}, - {"word one !empty empty empty! two word", ""}, - {"word one !empty empty empty! two two word", ""}, - {"word one one !empty empty empty! two word", ""}}; - CheckAllPermutations("", {R"s(-"one two")s", "+empty"}, "", - withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, " ", withHighlight); - } - }; - - Init(GetDefaultConfig()); - - Add("one"sv); - Add("two"sv); - Add("empty"sv); - Add("one two"sv); - Add("empty one two"sv); - Add("empty one two word"sv); - Add("one empty two"sv); - Add("word one empty two"sv); - Add("word one empty empty two word"sv); - Add("word one empty empty empty two word"sv); - Add("one ецщ"sv); - Add("word one empty empty empty two two word"sv); - Add("word one one empty empty empty two word"sv); - - check(true); - check(false); -} - -TEST_P(FTGenericApi, SelectWithDistance3) { - Init(GetDefaultConfig()); - - Add("one"sv); - Add("two"sv); - Add("three"sv); - Add("empty"sv); - Add("one two three"sv); - Add("empty one two three"sv); - Add("empty one two three word"sv); - Add("one empty two three"sv); - Add("word one empty two three"sv); - Add("word one empty empty two word three"sv); - Add("word one empty empty empty two word three"sv); - Add("one ецщ three"sv); - Add("one two empty two three"sv); - Add("one two empty two empty empty three"sv); - auto check = [&](bool withHighlight) { - { - std::vector> expectedResultsH = { - {"!one two three!", ""}, {"!one ецщ three!", ""}, {"empty !one two three!", ""}, {"empty !one two three! word", ""}}; - CheckResults(R"s("one two three")s", withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, - withHighlight); - } - { - std::vector> expectedResultsH = { - {"!one two three!", ""}, {"!one ецщ three!", ""}, {"empty !one two three!", ""}, {"empty !one two three! word", ""}}; - CheckResults(R"s("one two three"~1)s", withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, - withHighlight); - } - { - std::vector> expectedResultsH = {{"!one two three!", ""}, - {"!one ецщ three!", ""}, - {"empty !one two three!", ""}, - {"!one empty two three!", ""}, - {"empty !one two three! word", ""}, - {"word !one empty two three!", ""}, - {"word !one empty empty two word three!", ""}, - {"!one two empty two three!", ""}, - {"!one two empty two empty empty three!", ""}}; - CheckResults(R"s("one two three"~3)s", withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, - withHighlight); - } - { - std::vector> expectedResultsH = {{"!one two three!", ""}, - {"!one ецщ three!", ""}, - {"empty !one two three!", ""}, - {"!one empty two three!", ""}, - {"empty !one two three! word", ""}, - {"word !one empty two three!", ""}}; - CheckResults(R"s("one two three"~2)s", withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, - withHighlight); - } - { - std::vector> expectedResultsH = { - {"!one two three!", ""}, {"!one ецщ three!", ""}, {"!empty one two three!", ""}, {"!empty one two three! word", ""}}; - CheckAllPermutations("", {"empty", R"s(+"one two three")s"}, "", - withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, " ", withHighlight); - } - - { - std::vector> expectedResultsH = {{"!empty one two three!", ""}, - {"!empty one two three! word", ""}}; - CheckAllPermutations("", {"+empty", R"s(+"one two three")s"}, "", - withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, " ", withHighlight); - } - - { - std::vector> expectedResultsH = {{"!empty!", ""}, - {"!one two three!", ""}, - {"!one ецщ three!", ""}, - {"!empty one two three!", ""}, - {"one two !empty! two three", ""}, - {"!empty one two three! word", ""}, - {"one !empty! two three", ""}, - {"word one !empty empty! two word three", ""}, - {"word one !empty empty empty! two word three", ""}, - {"word one !empty! two three", ""}, - {"one two !empty! two !empty empty! three", ""}}; - CheckAllPermutations("", {"empty", R"s("one two three")s"}, "", - withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, " ", withHighlight); - } - { - std::vector> expectedResultsH = {{"!empty!", ""}, - {"one !empty! two three", ""}, - {"word one !empty! two three", ""}, - {"word one !empty empty! two word three", ""}, - {"word one !empty empty empty! two word three", ""}, - {"one two !empty! two three", ""}, - {"one two !empty! two !empty empty! three", ""}}; - CheckAllPermutations("", {"empty", R"s(-"one two three")s"}, "", - withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, " ", withHighlight); - } - { - std::vector> expectedResultsH = {{"!empty!", ""}, - {"one !empty! two three", ""}, - {"word one !empty! two three", ""}, - {"word one !empty empty! two word three", ""}, - {"word one !empty empty empty! two word three", ""}, - {"one two !empty! two three", ""}, - {"one two !empty! two !empty empty! three", ""}}; - CheckAllPermutations("", {R"s(-"one two three")s", "+empty"}, "", - withHighlight ? expectedResultsH : DelHighlightSign(expectedResultsH), false, " ", withHighlight); - } - }; - check(true); - check(false); -} -TEST_P(FTGenericApi, SelectWithDistanceSubTerm) { - Init(GetDefaultConfig()); - Add("one two empty щту two empty one ецщ"sv); - CheckResults(R"s("one two")s", {{"!one two! empty !щту two! empty !one ецщ!", ""}}, false, true); - CheckResults(R"s("one two")s", {{"one two empty щту two empty one ецщ", ""}}, false, false); -} - -TEST_P(FTGenericApi, SelectWithDistance2Field) { - Init(GetDefaultConfig()); - Add("empty two empty two one"sv, "two"sv); - // 24 bits - the number of words in the field - CheckResults("'one two'~" + std::to_string((1 << 24) + 100), {}, false); -} - -TEST_P(FTGenericApi, SelectWithSeveralGroup) { - Init(GetDefaultConfig()); - - Add("one empty two word three four"sv); - Add("word one empty two word three four word"sv); - Add("one three two four"sv); - Add("word three one two four word"sv); - CheckAllPermutations("", {R"s(+"one two"~2)s", R"s(+"three four"~3)s"}, "", - {{"!one empty two! word !three four!", ""}, - {"word !one empty two! word !three four! word", ""}, - {"!one three two four!", ""}, - {"word !three one two four! word", ""}}, - false); -} - -TEST_P(FTGenericApi, NumberToWordsSelect) { - Init(GetDefaultConfig()); - Add("оценка 5 майкл джордан 23"sv, ""sv); - - CheckAllPermutations("", {"пять", "+двадцать", "+три"}, "", {{"оценка !5! майкл джордан !23!", ""}}); -} - -// Make sure FT seeks by a huge number set by string in DSL -TEST_P(FTGenericApi, HugeNumberToWordsSelect) { - // Initialize namespace - Init(GetDefaultConfig()); - // Add a record with a big number - Add("много 7343121521906522180408440 денег"sv, ""sv); - // Execute FT query, where search words are set as strings - auto qr = SimpleSelect( - "+семь +септиллионов +триста +сорок +три +секстиллиона +сто +двадцать +один +квинтиллион +пятьсот +двадцать +один +квадриллион " - "+девятьсот +шесть +триллионов +пятьсот +двадцать +два +миллиарда +сто +восемьдесят +миллионов +четыреста +восемь +тысяч " - "+четыреста +сорок"); - // Make sure it found this only string - ASSERT_TRUE(qr.Count() == 1); -} - -// Make sure way too huge numbers are ignored in FT -TEST_P(FTGenericApi, HugeNumberToWordsSelect2) { - // Initialize namespace - Init(GetDefaultConfig()); - // Add a record with a huge number - Add("1127343121521906522180408440"sv, ""sv); - // Execute FT query, where search words are set as strings - reindexer::QueryResults qr; - const std::string searchWord = - "+один +октиллион +сто +двадцать +семь +септиллионов +триста +сорок +три +секстиллиона +сто +двадцать +один +квинтиллион +пятьсот " - "+двадцать +один +квадриллион +девятьсот +шесть +триллионов +пятьсот +двадцать +два +миллиарда +сто +восемьдесят +миллионов " - "+четыреста +восемь +тысяч +четыреста +сорок"; - auto q{reindexer::Query("nm1").Where("ft3", CondEq, searchWord)}; - auto err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - // Make sure it has found absolutely nothing - ASSERT_EQ(qr.Count(), 0); -} - -TEST_P(FTGenericApi, DeleteTest) { - Init(GetDefaultConfig()); - - std::unordered_map data; - for (int i = 0; i < 10000; ++i) { - data.insert(Add(rt.RuRandString())); - } - auto res = SimpleSelect("entity"); - for (int i = 0; i < 10000; ++i) { - data.insert(Add(rt.RuRandString())); - } - res = SimpleSelect("entity"); - - data.insert(Add("An entity is something that exists as itself"sv)); - data.insert(Add("In law, a legal entity is an entity that is capable of bearing legal rights"sv)); - data.insert(Add("In politics, entity is used as term for territorial divisions of some countries"sv)); - data.insert(Add("Юридическое лицо — организация, которая имеет обособленное имущество"sv)); - data.insert(Add("Aftermath - the consequences or aftereffects of a significant unpleasant event"sv)); - data.insert(Add("Food prices soared in the aftermath of the drought"sv)); - data.insert(Add("In the aftermath of the war ..."sv)); - - // Delete(data[1].first); - // Delete(data[1].first); - - const auto err = Delete(data.find("In law, a legal entity is an entity that is capable of bearing legal rights")->second); - ASSERT_TRUE(err.ok()) << err.what(); - res = SimpleSelect("entity"); - - // for (auto it : res) { - // Item ritem(it.GetItem()); - // std::cout << ritem["ft1"].as() << std::endl; - // } - // TODO: add validation -} - -TEST_P(FTGenericApi, RebuildAfterDeletion) { - Init(GetDefaultConfig()); - - auto cfg = GetDefaultConfig(); - cfg.maxStepSize = 5; - auto err = SetFTConfig(cfg, "nm1", "ft1", {"ft1"}); - ASSERT_TRUE(err.ok()) << err.what(); - - auto selectF = [this](const std::string& word) { - const auto q{reindexer::Query("nm1").Where("ft1", CondEq, word)}; - reindexer::QueryResults res; - auto err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - return res; - }; - - std::unordered_map data; - data.insert(Add("An entity is something that exists as itself"sv)); - data.insert(Add("In law, a legal entity is an entity that is capable of bearing legal rights"sv)); - data.insert(Add("In politics, entity is used as term for territorial divisions of some countries"sv)); - data.insert(Add("Юридическое лицо — организация, которая имеет обособленное имущество"sv)); - data.insert(Add("Aftermath - the consequences or aftereffects of a significant unpleasant event"sv)); - data.insert(Add("Food prices soared in the aftermath of the drought"sv)); - data.insert(Add("In the aftermath of the war ..."sv)); - - auto res = selectF("entity"); - ASSERT_EQ(res.Count(), 3); - - err = Delete(data.find("In law, a legal entity is an entity that is capable of bearing legal rights")->second); - ASSERT_TRUE(err.ok()) << err.what(); - res = selectF("entity"); - ASSERT_EQ(res.Count(), 2); -} - -TEST_P(FTGenericApi, Unique) { - Init(GetDefaultConfig()); - - std::vector data; - std::set check; - std::set checks; - reindexer::logInstallWriter([](int, char*) { /*std::cout << buf << std::endl;*/ }, reindexer::LoggerPolicy::WithLocks); - - for (int i = 0; i < 1000; ++i) { - bool inserted = false; - size_t n; - std::string s; - - while (!inserted) { - n = rand(); - auto res = check.insert(n); - inserted = res.second; - } - - inserted = false; - - while (!inserted) { - s = rt.RandString(); - auto res = checks.insert(s); - inserted = res.second; - } - - data.push_back(s + std::to_string(n)); - } - - for (size_t i = 0; i < data.size(); i++) { - Add(data[i], data[i]); - if (i % 5 == 0) { - for (size_t j = 0; j < i; j++) { - if (i == 40 && j == 26) { - int a = 3; // NOLINT(*unused-but-set-variable) This code is just to load CPU by non-rx stuff - a++; - (void)a; - } - auto res = StressSelect(data[j]); - if (res.Count() != 1) { - for (auto it : res) { - auto ritem(it.GetItem(false)); - } - abort(); - } - } - } - } -} - -TEST_P(FTGenericApi, SummationOfRanksInSeveralFields) { - auto ftCfg = GetDefaultConfig(3); - ftCfg.summationRanksByFieldsRatio = 0.0f; - Init(ftCfg, NS3); - - Add("nm3"sv, "word"sv, "word"sv, "word"sv); - Add("nm3"sv, "word"sv, "test"sv, "test"sv); - Add("nm3"sv, "test"sv, "word"sv, "test"sv); - Add("nm3"sv, "test"sv, "test"sv, "word"sv); - uint16_t rank = 0; - // Do not sum ranks by fields, as it is not asked in request and sum ratio in config is zero - const auto queries = CreateAllPermutatedQueries("@", {"ft1", "ft2", "ft3"}, " word", ","); - for (size_t i = 0; i < queries.size(); ++i) { - const auto& q = queries[i]; - const auto qr = SimpleSelect3(q); - CheckResults(q, qr, - {{"!word!", "!word!", "!word!"}, {"!word!", "test", "test"}, {"test", "!word!", "test"}, {"test", "test", "!word!"}}, - false); - assert(qr.IsLocal()); - auto& lqr = qr.ToLocalQr(); - auto it = lqr.begin(); - if (i == 0) { - rank = it.GetItemRef().Proc(); - } - for (const auto end = lqr.end(); it != end; ++it) { - EXPECT_EQ(rank, it.GetItemRef().Proc()) << q; - } - } - - // Do not sum ranks by fields, inspite of it is asked in request, as sum ratio in config is zero - for (const auto& q : CreateAllPermutatedQueries("@", {"+ft1", "+ft2", "+ft3"}, " word", ",")) { - const auto qr = SimpleSelect3(q); - CheckResults(q, qr, - {{"!word!", "!word!", "!word!"}, {"!word!", "test", "test"}, {"test", "!word!", "test"}, {"test", "test", "!word!"}}, - false); - assert(qr.IsLocal()); - for (const auto& it : qr.ToLocalQr()) { - EXPECT_EQ(rank, it.GetItemRef().Proc()) << q; - } - } - - // Do not sum ranks by fields, inspite of it is asked in request, as sum ratio in config is zero - for (const auto& q : CreateAllPermutatedQueries("@", {"+*"}, " word", ",")) { - const auto qr = SimpleSelect3(q); - CheckResults(q, qr, - {{"!word!", "!word!", "!word!"}, {"!word!", "test", "test"}, {"test", "!word!", "test"}, {"test", "test", "!word!"}}, - false); - assert(qr.IsLocal()); - for (const auto& it : qr.ToLocalQr()) { - EXPECT_EQ(rank, it.GetItemRef().Proc()) << q; - } - } - - ftCfg.summationRanksByFieldsRatio = 1.0f; - auto err = SetFTConfig(ftCfg, "nm3", "ft", {"ft1", "ft2", "ft3"}); - ASSERT_TRUE(err.ok()) << err.what(); - Add("nm3"sv, "test"sv, "test"sv, "test"sv); - // Do not sum ranks by fields, inspite of sum ratio in config is not zero, as it is not asked in request - for (const auto& q : CreateAllPermutatedQueries("@", {"ft1", "ft2", "ft3"}, " word", ",")) { - const auto qr = SimpleSelect3(q); - CheckResults(q, qr, - {{"!word!", "!word!", "!word!"}, {"!word!", "test", "test"}, {"test", "!word!", "test"}, {"test", "test", "!word!"}}, - false); - assert(qr.IsLocal()); - for (const auto& it : qr.ToLocalQr()) { - EXPECT_EQ(rank, it.GetItemRef().Proc()) << q; - } - } - - // Do sum ranks by fields, as it is asked in request and sum ratio in config is not zero - for (const auto& q : CreateAllPermutatedQueries("@", {"+ft1", "+ft2", "+ft3"}, " word", ",")) { - const auto qr = SimpleSelect3(q); - CheckResults(q, qr, - {{"!word!", "!word!", "!word!"}, {"!word!", "test", "test"}, {"test", "!word!", "test"}, {"test", "test", "!word!"}}, - false); - assert(qr.IsLocal()); - auto it = qr.ToLocalQr().begin(); - rank = it.GetItemRef().Proc() / 3; - ++it; - for (const auto end = qr.ToLocalQr().end(); it != end; ++it) { - EXPECT_LE(it.GetItemRef().Proc(), rank + 1) << q; - EXPECT_GE(it.GetItemRef().Proc(), rank - 1) << q; - } - } - - // Do sum ranks by fields, as it is asked in request and sum ratio in config is not zero - for (const auto& q : CreateAllPermutatedQueries("@", {"+*"}, " word", ",")) { - const auto qr = SimpleSelect3(q); - CheckResults(q, qr, - {{"!word!", "!word!", "!word!"}, {"!word!", "test", "test"}, {"test", "!word!", "test"}, {"test", "test", "!word!"}}, - false); - assert(qr.IsLocal()); - auto it = qr.ToLocalQr().begin(); - rank = it.GetItemRef().Proc() / 3; - ++it; - for (const auto end = qr.ToLocalQr().end(); it != end; ++it) { - EXPECT_LE(it.GetItemRef().Proc(), rank + 1) << q; - EXPECT_GE(it.GetItemRef().Proc(), rank - 1) << q; - } - } - - // ft2 is skipped as is not marked with + - for (const auto& q : CreateAllPermutatedQueries("@", {"+ft1", "ft2", "+ft3"}, " word", ",")) { - const auto qr = SimpleSelect3(q); - CheckResults(q, qr, - {{"!word!", "!word!", "!word!"}, {"!word!", "test", "test"}, {"test", "!word!", "test"}, {"test", "test", "!word!"}}, - false); - assert(qr.IsLocal()); - auto it = qr.ToLocalQr().begin(); - rank = it.GetItemRef().Proc() / 2; - ++it; - for (const auto end = qr.ToLocalQr().end(); it != end; ++it) { - EXPECT_LE(it.GetItemRef().Proc(), rank + 1) << q; - EXPECT_GE(it.GetItemRef().Proc(), rank - 1) << q; - } - } - - // ft2 is not skipped as it has max rank - for (const auto& q : CreateAllPermutatedQueries("@", {"+ft1", "ft2^2", "+ft3"}, " word", ",")) { - const auto qr = SimpleSelect3(q); - CheckResults(q, qr, - {{"!word!", "!word!", "!word!"}, {"!word!", "test", "test"}, {"test", "!word!", "test"}, {"test", "test", "!word!"}}, - false); - assert(qr.IsLocal()); - auto it = qr.ToLocalQr().begin(); - rank = it.GetItemRef().Proc() / 4; - ++it; - EXPECT_LE(it.GetItemRef().Proc(), (rank + 1) * 2) << q; - EXPECT_GE(it.GetItemRef().Proc(), (rank - 1) * 2) << q; - ++it; - for (const auto end = qr.ToLocalQr().end(); it != end; ++it) { - EXPECT_LE(it.GetItemRef().Proc(), rank + 1) << q; - EXPECT_GE(it.GetItemRef().Proc(), rank - 1) << q; - } - } - - // Ranks summated with ratio 0.5 - ftCfg.summationRanksByFieldsRatio = 0.5f; - err = SetFTConfig(ftCfg, "nm3", "ft", {"ft1", "ft2", "ft3"}); - ASSERT_TRUE(err.ok()) << err.what(); - Add("nm3"sv, "test"sv, "test"sv, "test"sv); - for (const auto& q : CreateAllPermutatedQueries("@", {"+ft1", "+ft2", "+ft3"}, " word", ",")) { - const auto qr = SimpleSelect3(q); - CheckResults(q, qr, - {{"!word!", "!word!", "!word!"}, {"!word!", "test", "test"}, {"test", "!word!", "test"}, {"test", "test", "!word!"}}, - false); - assert(qr.IsLocal()); - auto it = qr.ToLocalQr().begin(); - rank = it.GetItemRef().Proc() / (1.0 + 0.5 + 0.5 * 0.5); - ++it; - for (const auto end = qr.ToLocalQr().end(); it != end; ++it) { - EXPECT_LE(it.GetItemRef().Proc(), rank + 1) << q; - EXPECT_GE(it.GetItemRef().Proc(), rank - 1) << q; - } - } - - // Ranks summated with ratio 0.5 - for (const auto& q : CreateAllPermutatedQueries("@", {"+ft1^1.5", "+ft2^1.3", "+ft3"}, " word", ",")) { - const auto qr = SimpleSelect3(q); - CheckResults(q, qr, - {{"!word!", "!word!", "!word!"}, {"!word!", "test", "test"}, {"test", "!word!", "test"}, {"test", "test", "!word!"}}, - true); - assert(qr.IsLocal()); - auto it = qr.ToLocalQr().begin(); - rank = it.GetItemRef().Proc() / (1.5 + 0.5 * 1.3 + 0.5 * 0.5); - ++it; - EXPECT_LE(it.GetItemRef().Proc(), (rank + 5) * 1.5) << q; - EXPECT_GE(it.GetItemRef().Proc(), (rank - 5) * 1.5) << q; - ++it; - EXPECT_LE(it.GetItemRef().Proc(), (rank + 5) * 1.3) << q; - EXPECT_GE(it.GetItemRef().Proc(), (rank - 5) * 1.3) << q; - ++it; - EXPECT_LE(it.GetItemRef().Proc(), rank + 5) << q; - EXPECT_GE(it.GetItemRef().Proc(), rank - 5) << q; - } -} - -TEST_P(FTGenericApi, SelectTranslitWithComma) { - auto ftCfg = GetDefaultConfig(); - ftCfg.logLevel = 5; - Init(ftCfg); - - Add("nm1"sv, "хлебопечка"sv, ""sv); - Add("nm1"sv, "электрон"sv, ""sv); - Add("nm1"sv, "матэ"sv, ""sv); - - auto qr = SimpleSelect("@ft1 [kt,jgtxrf"); - EXPECT_EQ(qr.Count(), 1); - auto item = qr.begin().GetItem(false); - EXPECT_EQ(item["ft1"].As(), "!хлебопечка!"); - - qr = SimpleSelect("@ft1 \\'ktrnhjy"); - EXPECT_EQ(qr.Count(), 1); - item = qr.begin().GetItem(false); - EXPECT_EQ(item["ft1"].As(), "!электрон!"); - - qr = SimpleSelect("@ft1 vfn\\'"); - EXPECT_EQ(qr.Count(), 1); - item = qr.begin().GetItem(false); - EXPECT_EQ(item["ft1"].As(), "!матэ!"); -} - -TEST_P(FTGenericApi, RankWithPosition) { - auto ftCfg = GetDefaultConfig(); - ftCfg.fieldsCfg[0].positionWeight = 1.0; - Init(ftCfg); - - Add("nm1"sv, "one two three word"sv, ""sv); - Add("nm1"sv, "one two three four five six word"sv, ""sv); - Add("nm1"sv, "one two three four word"sv, ""sv); - Add("nm1"sv, "one word"sv, ""sv); - Add("nm1"sv, "one two three four five word"sv, ""sv); - Add("nm1"sv, "word"sv, ""sv); - Add("nm1"sv, "one two word"sv, ""sv); - - CheckAllPermutations("", {"word"}, "", - {{"!word!", ""}, - {"one !word!", ""}, - {"one two !word!", ""}, - {"one two three !word!", ""}, - {"one two three four !word!", ""}, - {"one two three four five !word!", ""}, - {"one two three four five six !word!", ""}}, - true); -} - -TEST_P(FTGenericApi, DifferentFieldRankPosition) { - auto ftCfg = GetDefaultConfig(); - ftCfg.fieldsCfg[0].positionWeight = 1.0; - ftCfg.fieldsCfg[0].positionBoost = 10.0; - Init(ftCfg); - - Add("nm1"sv, "one two three word"sv, "one word"sv); - Add("nm1"sv, "one two three four five six word"sv, "one two word"sv); - Add("nm1"sv, "one two three four word"sv, "one two three four five word"sv); - Add("nm1"sv, "one word"sv, "one two three four five six word"sv); - Add("nm1"sv, "one two three four five word"sv, "one two three word"sv); - Add("nm1"sv, "word"sv, "one two three four word"sv); - Add("nm1"sv, "one two word"sv, "word"sv); - - CheckAllPermutations("", {"word"}, "", - {{"!word!", "one two three four !word!"}, - {"one !word!", "one two three four five six !word!"}, - {"one two !word!", "!word!"}, - {"one two three !word!", "one !word!"}, - {"one two three four !word!", "one two three four five !word!"}, - {"one two three four five !word!", "one two three !word!"}, - {"one two three four five six !word!", "one two !word!"}}, - true); - - ftCfg.fieldsCfg[0].positionWeight = 0.1; - ftCfg.fieldsCfg[0].positionBoost = 1.0; - ftCfg.fieldsCfg[1].positionWeight = 1.0; - ftCfg.fieldsCfg[1].positionBoost = 10.0; - SetFTConfig(ftCfg); - - CheckAllPermutations("", {"word"}, "", - {{"one two !word!", "!word!"}, - {"one two three !word!", "one !word!"}, - {"one two three four five six !word!", "one two !word!"}, - {"one two three four five !word!", "one two three !word!"}, - {"!word!", "one two three four !word!"}, - {"one two three four !word!", "one two three four five !word!"}, - {"one !word!", "one two three four five six !word!"}}, - true); -} - -TEST_P(FTGenericApi, PartialMatchRank) { - auto ftCfg = GetDefaultConfig(); - ftCfg.partialMatchDecrease = 0; - Init(ftCfg); - - Add("nm1"sv, "ТНТ4"sv, ""sv); - Add("nm1"sv, ""sv, "ТНТ"sv); - - CheckAllPermutations("@", {"ft1^1.1", "ft2^1"}, " ТНТ*", {{"!ТНТ4!", ""}, {"", "!ТНТ!"}}, true, ", "); - - ftCfg.partialMatchDecrease = 100; - SetFTConfig(ftCfg); - - CheckAllPermutations("@", {"ft1^1.1", "ft2^1"}, " ТНТ*", {{"", "!ТНТ!"}, {"!ТНТ4!", ""}}, true, ", "); -} - -TEST_P(FTGenericApi, SelectFullMatch) { - auto ftCfg = GetDefaultConfig(); - ftCfg.fullMatchBoost = 0.9; - Init(ftCfg); - - Add("nm1"sv, "test"sv, "love"sv); - Add("nm1"sv, "test"sv, "love second"sv); - - CheckAllPermutations("", {"love"}, "", {{"test", "!love! second"}, {"test", "!love!"}}, true); - - ftCfg.fullMatchBoost = 1.1; - SetFTConfig(ftCfg); - CheckAllPermutations("", {"love"}, "", {{"test", "!love!"}, {"test", "!love! second"}}, true); -} - -TEST_P(FTGenericApi, SetFtFieldsCfgErrors) { - auto cfg = GetDefaultConfig(2); - Init(cfg); - cfg.fieldsCfg[0].positionWeight = 0.1; - cfg.fieldsCfg[1].positionWeight = 0.2; - // Задаем уникальный конфиг для поля ft, которого нет в индексе ft3 - auto err = SetFTConfig(cfg, "nm1", "ft3", {"ft", "ft2"}); - // Получаем ошибку - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "Field 'ft' is not included to full text index"); - - err = rt.reindexer->OpenNamespace("nm3"); - ASSERT_TRUE(err.ok()) << err.what(); - rt.DefineNamespaceDataset( - "nm3", {IndexDeclaration{"id", "hash", "int", IndexOpts().PK(), 0}, IndexDeclaration{"ft", "text", "string", IndexOpts(), 0}}); - // Задаем уникальный конфиг для единственного поля ft в индексе ft - err = SetFTConfig(cfg, "nm3", "ft", {"ft"}); - // Получаем ошибку - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "Configuration for single field fulltext index cannot contain field specifications"); - - // maxTypos < 0 - cfg.maxTypos = -1; - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - // Error - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "FtFastConfig: Value of 'max_typos' - -1 is out of bounds: [0,4]"); - - // maxTypos > 4 - cfg.maxTypos = 5; - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - // Error - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "FtFastConfig: Value of 'max_typos' - 5 is out of bounds: [0,4]"); -} - -TEST_P(FTGenericApi, MergeLimitConstraints) { - auto cfg = GetDefaultConfig(); - Init(cfg); - cfg.mergeLimit = kMinMergeLimitValue - 1; - auto err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_EQ(err.code(), errParseJson); - cfg.mergeLimit = kMaxMergeLimitValue + 1; - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_EQ(err.code(), errParseJson); - cfg.mergeLimit = kMinMergeLimitValue; - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_TRUE(err.ok()) << err.what(); - cfg.mergeLimit = kMaxMergeLimitValue; - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_TRUE(err.ok()) << err.what(); -} - -TEST_P(FTGenericApi, ConfigBm25Coefficients) { - reindexer::FtFastConfig cfgDef = GetDefaultConfig(); - cfgDef.maxAreasInDoc = 100; - reindexer::FtFastConfig cfg = cfgDef; - cfg.bm25Config.bm25b = 0.0; - cfg.bm25Config.bm25Type = reindexer::FtFastConfig::Bm25Config::Bm25Type::rx; - - Init(cfg); - Add("nm1"sv, "слово пусто слова пусто словами"sv, ""sv); - Add("nm1"sv, "слово пусто слово"sv, ""sv); - Add("nm1"sv, "otherword targetword"sv, ""sv); - Add("nm1"sv, "otherword targetword otherword targetword"sv, ""sv); - Add("nm1"sv, "otherword targetword otherword targetword targetword"sv, ""sv); - Add("nm1"sv, - "otherword targetword otherword otherword otherword targetword otherword targetword otherword targetword otherword otherword otherword otherword otherword otherword otherword otherword targetword"sv, - ""sv); - - CheckResults("targetword", - {{"otherword !targetword! otherword otherword otherword !targetword! otherword !targetword! otherword !targetword! " - "otherword otherword otherword otherword otherword otherword otherword otherword !targetword!", - ""}, - {"otherword !targetword! otherword !targetword targetword!", ""}, - {"otherword !targetword! otherword !targetword!", ""}, - {"otherword !targetword!", ""}}, - true); - - cfg = cfgDef; - cfg.bm25Config.bm25b = 0.75; - reindexer::Error err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_TRUE(err.ok()) << err.what(); - - CheckResults("targetword", - {{"otherword !targetword! otherword !targetword targetword!", ""}, - {"otherword !targetword! otherword !targetword!", ""}, - {"otherword !targetword! otherword otherword otherword !targetword! otherword !targetword! otherword !targetword! " - "otherword otherword otherword otherword otherword otherword otherword otherword !targetword!", - ""}, - {"otherword !targetword!", ""}}, - true); - cfg = cfgDef; - cfg.bm25Config.bm25Type = reindexer::FtFastConfig::Bm25Config::Bm25Type::wordCount; - cfg.fieldsCfg[0].positionWeight = 0.0; - cfg.fullMatchBoost = 1.0; - - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_TRUE(err.ok()) << err.what(); - - CheckResults("targetword", - { - {"otherword !targetword! otherword otherword otherword !targetword! otherword !targetword! otherword !targetword! " - "otherword otherword otherword otherword otherword otherword otherword otherword !targetword!", - ""}, - {"otherword !targetword! otherword !targetword targetword!", ""}, - {"otherword !targetword! otherword !targetword!", ""}, - {"otherword !targetword!", ""}, - - }, - true); - - CheckResults("словах", {{"!слово! пусто !слово!", ""}, {"!слово! пусто !слова! пусто !словами!", ""}}, true); -} - -TEST_P(FTGenericApi, ConfigFtProc) { - reindexer::FtFastConfig cfgDef = GetDefaultConfig(); - cfgDef.synonyms = {{{"тестов"}, {"задача"}}}; - reindexer::FtFastConfig cfg = cfgDef; - - cfg.rankingConfig.fullMatch = 100; - cfg.rankingConfig.stemmerPenalty = 1; // for idf/tf boost - cfg.rankingConfig.translit = 50; - cfg.rankingConfig.kblayout = 40; - cfg.rankingConfig.synonyms = 30; - Init(cfg); - Add("nm1"sv, "маленький тест"sv, ""); - Add("nm1"sv, "один тестов очень очень тестов тестов тестов"sv, ""); - Add("nm1"sv, "два тестов очень очень тестов тестов тестов"sv, ""); - Add("nm1"sv, "testov"sv, ""); - Add("nm1"sv, "ntcnjd"sv, ""); - Add("nm1"sv, "задача"sv, ""); - Add("nm1"sv, "Местов"sv, ""); - Add("nm1"sv, "МестоД"sv, ""); - - reindexer::Error err; - CheckResults("тестов", - {{"маленький !тест!", ""}, - {"один !тестов! очень очень !тестов тестов тестов!", ""}, - {"два !тестов! очень очень !тестов тестов тестов!", ""}, - {"!testov!", ""}, - {"!ntcnjd!", ""}, - {"!задача!", ""}}, - true); - cfg = cfgDef; - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_TRUE(err.ok()) << err.what(); - CheckResults("тестов", - {{"!задача!", ""}, - {"!testov!", ""}, - {"!ntcnjd!", ""}, - {"один !тестов! очень очень !тестов тестов тестов!", ""}, - {"два !тестов! очень очень !тестов тестов тестов!", ""}, - {"маленький !тест!", ""}}, - true); - cfg = cfgDef; - cfg.rankingConfig.stemmerPenalty = 500; - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_TRUE(err.ok()) << err.what(); - CheckResults("тестов", - {{"!задача!", ""}, - {"!testov!", ""}, - {"!ntcnjd!", ""}, - {"один !тестов! очень очень !тестов тестов тестов!", ""}, - {"два !тестов! очень очень !тестов тестов тестов!", ""}, - {"маленький !тест!", ""}}, - true); - - cfg = cfgDef; - cfg.rankingConfig.stemmerPenalty = -1; - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_EQ(err.code(), errParseJson); - ASSERT_EQ(err.what(), "FtFastConfig: Value of 'stemmer_proc_penalty' - -1 is out of bounds: [0,500]"); - - cfg = cfgDef; - cfg.rankingConfig.synonyms = 500; - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_TRUE(err.ok()) << err.what(); - CheckResults("тестов", - {{"!задача!", ""}, - {"!testov!", ""}, - {"!ntcnjd!", ""}, - {"один !тестов! очень очень !тестов тестов тестов!", ""}, - {"два !тестов! очень очень !тестов тестов тестов!", ""}, - {"маленький !тест!", ""}}, - true); - cfg = cfgDef; - cfg.rankingConfig.synonyms = 501; - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_EQ(err.code(), errParseJson); - ASSERT_EQ(err.what(), "FtFastConfig: Value of 'synonyms_proc' - 501 is out of bounds: [0,500]"); - - cfg = cfgDef; - cfg.rankingConfig.translit = 200; - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_TRUE(err.ok()) << err.what(); - CheckResults("тестов", - {{"!testov!", ""}, - {"!задача!", ""}, - {"!ntcnjd!", ""}, - {"один !тестов! очень очень !тестов тестов тестов!", ""}, - {"два !тестов! очень очень !тестов тестов тестов!", ""}, - {"маленький !тест!", ""}}, - true); - - cfg = cfgDef; - cfg.rankingConfig.typo = 300; - cfg.rankingConfig.translit = 200; - cfg.maxTypos = 4; - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_TRUE(err.ok()) << err.what(); - CheckResults("тестов~", - {{"!Местов!", ""}, - {"!МестоД!", ""}, - {"!testov!", ""}, - {"!задача!", ""}, - {"!ntcnjd!", ""}, - {"один !тестов! очень очень !тестов тестов тестов!", ""}, - {"два !тестов! очень очень !тестов тестов тестов!", ""}, - {"маленький !тест!", ""}}, - true); - - cfg = cfgDef; - cfg.rankingConfig.typo = 300; - cfg.rankingConfig.typoPenalty = 150; - cfg.rankingConfig.translit = 200; - cfg.maxTypos = 4; - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_TRUE(err.ok()) << err.what(); - CheckResults("тестов~", - {{"!Местов!", ""}, - {"!testov!", ""}, - {"!МестоД!", ""}, - {"!задача!", ""}, - {"!ntcnjd!", ""}, - {"один !тестов! очень очень !тестов тестов тестов!", ""}, - {"два !тестов! очень очень !тестов тестов тестов!", ""}, - {"маленький !тест!", ""}}, - true); - - cfg = cfgDef; - cfg.rankingConfig.typo = 300; - cfg.rankingConfig.typoPenalty = 500; - cfg.rankingConfig.translit = 200; - cfg.maxTypos = 4; - err = SetFTConfig(cfg, "nm1", "ft3", {"ft1", "ft2"}); - ASSERT_TRUE(err.ok()) << err.what(); - CheckResults("тестов~", - {{"!testov!", ""}, - {"!Местов!", ""}, - {"!задача!", ""}, - {"!ntcnjd!", ""}, - {"один !тестов! очень очень !тестов тестов тестов!", ""}, - {"два !тестов! очень очень !тестов тестов тестов!", ""}, - {"маленький !тест!", ""}}, - true); -} - -TEST_P(FTGenericApi, InvalidDSLErrors) { - auto cfg = GetDefaultConfig(); - cfg.stopWords.clear(); - cfg.stopWords.emplace("teststopword"); - Init(cfg); - constexpr std::string_view kExpectedErrorMessage = "Fulltext query can not contain only 'NOT' terms (i.e. terms with minus)"; - - { - auto q = Query("nm1").Where("ft3", CondEq, "-word"); - reindexer::QueryResults qr; - auto err = rt.reindexer->Select(q, qr); - EXPECT_EQ(err.code(), errParams) << err.what(); - EXPECT_EQ(err.what(), kExpectedErrorMessage); - - qr.Clear(); - q = Query("nm1").Where("ft3", CondEq, "-word1 -word2 -word3"); - err = rt.reindexer->Select(q, qr); - EXPECT_EQ(err.code(), errParams) << err.what(); - EXPECT_EQ(err.what(), kExpectedErrorMessage); - - qr.Clear(); - q = Query("nm1").Where("ft3", CondEq, "-\"word1 word2\""); - err = rt.reindexer->Select(q, qr); - EXPECT_EQ(err.code(), errParams) << err.what(); - EXPECT_EQ(err.what(), kExpectedErrorMessage); - - qr.Clear(); - q = Query("nm1").Where("ft3", CondEq, "-'word1 word2'"); - err = rt.reindexer->Select(q, qr); - EXPECT_EQ(err.code(), errParams) << err.what(); - EXPECT_EQ(err.what(), kExpectedErrorMessage); - - qr.Clear(); - q = Query("nm1").Where("ft3", CondEq, "-word0 -'word1 word2' -word7"); - err = rt.reindexer->Select(q, qr); - EXPECT_EQ(err.code(), errParams) << err.what(); - EXPECT_EQ(err.what(), kExpectedErrorMessage); - - // Empty DSL is allowed - qr.Clear(); - q = Query("nm1").Where("ft3", CondEq, ""); - err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 0); - - // Stop-word + 'minus' have to return empty response, to avoid random errors for user - qr.Clear(); - q = Query("nm1").Where("ft3", CondEq, "-word1 teststopword -word2"); - err = rt.reindexer->Select(q, qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 0); - } -} - -// Check ft preselect logic with joins. Joined results have to be return even after multiple queries (issue #1437) -TEST_P(FTGenericApi, JoinsWithFtPreselect) { - using reindexer::Query; - using reindexer::QueryResults; - - auto cfg = GetDefaultConfig(); - cfg.enablePreselectBeforeFt = true; - Init(cfg); - const int firstId = counter_; - Add("word1 word2 word3"sv); - Add("word3 word4"sv); - Add("word2 word5 word7"sv); - - fast_hash_map joinedNsItems; - const std::string kJoinedNs = "ns_for_joins"; - const std::string kMainNs = "nm1"; - constexpr unsigned kQueryRepetitions = 6; - CreateAndFillSimpleNs(kJoinedNs, 0, 10, &joinedNsItems); - - const Query q = - Query(kMainNs).Where("ft3", CondEq, "word2").InnerJoin("id", "id", CondEq, Query(kJoinedNs).Where("id", CondLt, firstId + 1)); - const auto expectedJoinedJSON = fmt::sprintf(R"json("joined_%s":[%s])json", kJoinedNs, joinedNsItems[firstId]); - for (unsigned i = 0; i < kQueryRepetitions; ++i) { - QueryResults qr; - auto err = rt.reindexer->Select(q, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - auto item = qr.begin().GetItem(); - ASSERT_EQ(item["id"].As(), firstId); - reindexer::WrSerializer wser; - err = qr.begin().GetJSON(wser, false); - ASSERT_TRUE(err.ok()) << err.what(); - EXPECT_TRUE(wser.Slice().find(expectedJoinedJSON) != std::string_view::npos) - << "Expecting substring '" << expectedJoinedJSON << "', but json was: '" << wser.Slice() << "'. Iteration: " << i; - } -} - -// Check that explain with ft preselect contains all the expected entries (issue #1437) -TEST_P(FTGenericApi, ExplainWithFtPreselect) { - using reindexer::Query; - using reindexer::QueryResults; - - auto cfg = GetDefaultConfig(); - cfg.enablePreselectBeforeFt = true; - Init(cfg); - const int firstId = counter_; - Add("word1 word2 word3"sv); - Add("word3 word4"sv); - Add("word2 word5 word7"sv); - const int lastId = counter_; - - const std::string kJoinedNs = "ns_for_joins"; - const std::string kMainNs = "nm1"; - CreateAndFillSimpleNs(kJoinedNs, 0, 10, nullptr); - - { - const Query q = Query(kMainNs) - .Where("ft3", CondEq, "word2") - .OpenBracket() - .InnerJoin("id", "id", CondEq, Query(kJoinedNs).Where("id", CondLt, firstId + 1)) - .Or() - .Where("id", CondEq, lastId - 1) - .CloseBracket() - .Explain(); - QueryResults qr; - auto err = rt.reindexer->Select(q, qr); - ASSERT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 2); - // Check explain's content - YAML::Node root = YAML::Load(qr.GetExplainResults()); - auto selectors = root["selectors"]; - ASSERT_TRUE(selectors.IsSequence()) << qr.GetExplainResults(); - ASSERT_EQ(selectors.size(), 2) << qr.GetExplainResults(); - ASSERT_EQ(selectors[0]["field"].as(), "(-scan and (id and inner_join ns_for_joins) or id)") << qr.GetExplainResults(); - ASSERT_EQ(selectors[1]["field"].as(), "ft3") << qr.GetExplainResults(); - } - { - // Check the same query with extra brackets over ft condition. Make sure, that ft-index was still move to the end of the query - const Query q = Query(kMainNs) - .OpenBracket() - .Where("ft3", CondEq, "word2") - .CloseBracket() - .OpenBracket() - .InnerJoin("id", "id", CondEq, Query(kJoinedNs).Where("id", CondLt, firstId + 1)) - .Or() - .Where("id", CondEq, lastId - 1) - .CloseBracket() - .Explain(); - QueryResults qr; - auto err = rt.reindexer->Select(q, qr); - ASSERT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 2); - // Check explain's content - YAML::Node root = YAML::Load(qr.GetExplainResults()); - auto selectors = root["selectors"]; - ASSERT_TRUE(selectors.IsSequence()) << qr.GetExplainResults(); - ASSERT_EQ(selectors.size(), 2) << qr.GetExplainResults(); - ASSERT_EQ(selectors[0]["field"].as(), "(-scan and (id and inner_join ns_for_joins) or id)") << qr.GetExplainResults(); - ASSERT_EQ(selectors[1]["field"].as(), "ft3") << qr.GetExplainResults(); - } -} - -TEST_P(FTGenericApi, TotalCountWithFtPreselect) { - using reindexer::Query; - using reindexer::QueryResults; - using reindexer::Variant; - - auto cfg = GetDefaultConfig(); - auto preselectIsEnabled = true; - cfg.enablePreselectBeforeFt = preselectIsEnabled; - Init(cfg); - const int firstId = counter_; - Add("word5"sv); - Add("word1 word2 word3"sv); - Add("word3 word4"sv); - Add("word2 word5 word7"sv); - const int lastId = counter_; - - const std::string kJoinedNs = "ns_for_joins"; - const std::string kMainNs = "nm1"; - CreateAndFillSimpleNs(kJoinedNs, 0, 10, nullptr); - - for (auto preselect : {true, false}) { - if (preselectIsEnabled != preselect) { - auto cfg = GetDefaultConfig(); - preselectIsEnabled = preselect; - cfg.enablePreselectBeforeFt = preselectIsEnabled; - SetFTConfig(cfg); - } - std::string_view kPreselectStr = preselect ? " (with ft preselect) " : " (no ft preselect) "; - - struct Case { - Query query; - int limit; - int expectedTotalCount; - }; - std::vector cases = {{.query = Query(kMainNs).Where("ft3", CondEq, "word2 word4"), .limit = 2, .expectedTotalCount = 3}, - {.query = Query(kMainNs).Where("ft3", CondEq, "word2").Where("id", CondEq, {Variant{lastId - 3}}), - .limit = 0, - .expectedTotalCount = 1}, - {.query = Query(kMainNs) - .Where("ft3", CondEq, "word2") - .InnerJoin("id", "id", CondEq, Query(kJoinedNs).Where("id", CondLt, firstId + 2).Limit(0)), - .limit = 0, - .expectedTotalCount = 1}, - {.query = Query(kMainNs) - .Where("ft3", CondEq, "word2 word3") - .OpenBracket() - .InnerJoin("id", "id", CondEq, Query(kJoinedNs).Where("id", CondLt, firstId + 2).Limit(0)) - .Or() - .Where("id", CondSet, {Variant{lastId - 1}, Variant{lastId - 2}}) - .CloseBracket(), - .limit = 1, - .expectedTotalCount = 3}, - {.query = Query(kMainNs) - .Where("ft3", CondEq, "word2 word3") - .InnerJoin("id", "id", CondEq, Query(kJoinedNs).Where("id", CondLt, lastId).Limit(0)) - .Where("id", CondSet, {Variant{lastId - 1}, Variant{lastId - 2}}), - .limit = 1, - .expectedTotalCount = 2}, - {.query = Query(kMainNs) - .OpenBracket() - .Where("ft3", CondEq, "word2") - .CloseBracket() - .OpenBracket() - .InnerJoin("id", "id", CondEq, Query(kJoinedNs).Where("id", CondLt, firstId + 2)) - .Or() - .Where("id", CondEq, lastId - 1) - .CloseBracket(), - .limit = 0, - .expectedTotalCount = 2}}; - - for (auto& c : cases) { - c.query.ReqTotal(); - // Execute initial query - { - QueryResults qr; - auto err = rt.reindexer->Select(c.query, qr); - ASSERT_TRUE(err.ok()) << kPreselectStr << err.what() << "\n" << c.query.GetSQL(); - EXPECT_EQ(qr.Count(), c.expectedTotalCount) << kPreselectStr << c.query.GetSQL(); - EXPECT_EQ(qr.TotalCount(), c.expectedTotalCount) << kPreselectStr << c.query.GetSQL(); - } - - // Execute query with limit - const Query q = Query(c.query).Limit(c.limit); - { - QueryResults qr; - auto err = rt.reindexer->Select(q, qr); - ASSERT_TRUE(err.ok()) << kPreselectStr << err.what() << "\n" << c.query.GetSQL(); - EXPECT_EQ(qr.Count(), c.limit) << kPreselectStr << c.query.GetSQL(); - EXPECT_EQ(qr.TotalCount(), c.expectedTotalCount) << kPreselectStr << c.query.GetSQL(); - } - } - } -} - -TEST_P(FTGenericApi, StopWordsWithMorphemes) { - reindexer::FtFastConfig cfg = GetDefaultConfig(); - - Init(cfg); - Add("Шахматы из слоновой кости"sv); - Add("Мат в эфире "sv); - Add("Известняк"sv); - Add("Известия"sv); - Add("Изверг"sv); - - Add("Подобрал подосиновики, положил в лубочек"sv); - Add("Подопытный кролик"sv); - Add("Шла Саша по шоссе"sv); - - Add("Зайка серенький под елочкой скакал"sv); - Add("За Альянс! (с)"sv); - Add("Заноза в пальце"sv); - - Add("На западном фронте без перемен"sv); - Add("Наливные яблочки"sv); - Add("Нарком СССР"sv); - - CheckResults("*из*", {{"!Известняк!", ""}, {"!Известия!", ""}, {"!Изверг!", ""}}, false); - CheckResults("из", {}, false); - - CheckResults("*под*", {{"!Подобрал подосиновики!, положил в лубочек", ""}, {"!Подопытный! кролик", ""}}, false); - CheckResults("под", {}, false); - - CheckResults( - "*за*", {{"!Зайка! серенький под елочкой скакал", ""}, {"!Заноза! в пальце", ""}, {"На !западном! фронте без перемен", ""}}, false); - CheckResults("за", {}, false); - - CheckResults("*на*", - { - {"!Наливные! яблочки", ""}, - {"!Нарком! СССР", ""}, - }, - false); - CheckResults("на", {}, false); - - cfg.stopWords.clear(); - - cfg.stopWords.insert({"на"}); - cfg.stopWords.insert({"мат", reindexer::StopWord::Type::Morpheme}); - - SetFTConfig(cfg); - - CheckResults("*из*", {{"Шахматы !из! слоновой кости", ""}, {"!Известняк!", ""}, {"!Известия!", ""}, {"!Изверг!", ""}}, false); - CheckResults("из", {{"Шахматы !из! слоновой кости", ""}}, false); - - CheckResults( - "*под*", - {{"!Подобрал подосиновики!, положил в лубочек", ""}, {"!Подопытный! кролик", ""}, {"Зайка серенький !под! елочкой скакал", ""}}, - false); - CheckResults("под", {{"Зайка серенький !под! елочкой скакал", ""}}, false); - - CheckResults("*по*", - {{"Шла Саша !по! шоссе", ""}, - {"!Подобрал подосиновики, положил! в лубочек", ""}, - {"!Подопытный! кролик", ""}, - {"Зайка серенький !под! елочкой скакал", ""}}, - false); - CheckResults("по~", {{"Шла Саша !по! шоссе", ""}, {"Зайка серенький !под! елочкой скакал", ""}}, false); - CheckResults("по", {{"Шла Саша !по! шоссе", ""}}, false); - - CheckResults("*мат*", {{"!Шахматы! из слоновой кости", ""}}, false); - CheckResults("мат", {}, false); - - CheckResults("*за*", - {{"!Зайка! серенький под елочкой скакал", ""}, - {"!Заноза! в пальце", ""}, - {"!За! Альянс! (с)", ""}, - {"На !западном! фронте без перемен", ""}}, - false); - CheckResults("за", {{"!За! Альянс! (с)", ""}}, false); - - CheckResults("*на*", {}, false); - CheckResults("на~", {}, false); - CheckResults("на", {}, false); -} - -INSTANTIATE_TEST_SUITE_P(, FTGenericApi, - ::testing::Values(reindexer::FtFastConfig::Optimization::Memory, reindexer::FtFastConfig::Optimization::CPU), - [](const auto& info) { - switch (info.param) { - case reindexer::FtFastConfig::Optimization::Memory: - return "OptimizationByMemory"; - case reindexer::FtFastConfig::Optimization::CPU: - return "OptimizationByCPU"; - default: - assert(false); - std::abort(); - } - }); diff --git a/cpp_src/gtests/tests/unit/ft/ft_select_functions.cc b/cpp_src/gtests/tests/unit/ft/ft_select_functions.cc deleted file mode 100644 index b684010b8..000000000 --- a/cpp_src/gtests/tests/unit/ft/ft_select_functions.cc +++ /dev/null @@ -1,492 +0,0 @@ -#include -#include "ft_api.h" - -using namespace std::string_view_literals; - -class FTSelectFunctionsApi : public FTApi { -protected: - std::string_view GetDefaultNamespace() noexcept override { return "ft_seletc_fn_default_namespace"; } -}; - -TEST_P(FTSelectFunctionsApi, SnippetN) { - auto ftCfg = GetDefaultConfig(); - Init(ftCfg); - Add("one two three gg three empty empty empty empty three"sv); - - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5,'{')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Incorrect count of position arguments. Found 5 required 4."); - } - { // check other case, error on not last argument - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5,'{','{')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token ','."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5,pre_delim='{',pre_delim='}')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Argument already added 'pre_delim'."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5,pre_delim='{',pre_delim='}',post_delim='!')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Argument already added 'pre_delim'."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,pre_delim='{')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Incorrect count of position arguments. Found 3 required 4."); - } - { // check other case, error on not last argument - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('' , '',5,pre_delim='{',post_delim='')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token ',', expecting positional argument (1 more positional args required)"); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5,pre_delim='{',pre_delim='}') g"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected character `g` after closing parenthesis."); - } - - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',,5,pre_delim='{')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token ',', expecting positional argument (2 more positional args required)"); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5,,pre_delim='{')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token ','."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5,pre_delim='{',,post_delim='}')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token ','."); - } - - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('''n','',5,5,pre_delim='{')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token 'n'."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n(''n,'',5,5,pre_delim='{')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token 'n'."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('',''5,5,5,pre_delim='{')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token '5'."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5'v',5,pre_delim='{')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token 'v'."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5,\"pre_delim\"pre_delim='{')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token 'pre_delim'."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5,pre_delim= ='{')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token '='."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5,pre_delim='{'8)"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token '8'."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5,pre_delim=)"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token 'pre_delim'."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5,not_delim='{')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unknown argument name 'not_delim'."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5,not_delim='{',pre_delim='}')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unknown argument name 'not_delim'."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: The closing parenthesis is required, but found `5`"); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n{('','',5,5}"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: An open parenthesis is required, but found `{`"); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction(R"#(ft1=snippet_n('','',5,5,"post_delim"="v"})#"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token 'v'."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction(R"#(ft1=snippet_n(<>,'',5,5,"post_delim"='v'})#"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Unexpected token '<>'"); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction(R"#(ft1=snippet_n('<>','',5,5,='v'})#"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "snippet_n: Argument name is empty."); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction(R"#(ft1=snippet_n('<>','','5a',5))#"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "Invalid snippet param before - 5a is not a number"); - } - { - reindexer::Query q("nm1"); - q.Where("ft1", CondEq, "three").AddFunction(R"#(ft1=snippet_n('<>','',5,'5b'))#"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "Invalid snippet param after - 5b is not a number"); - } - - { - reindexer::Query q("nm1"); - q.Select({"ft1"}).Where("ft1", CondEq, "three").AddFunction("ft1=snippet_n('','',5,5,pre_delim=',')"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(res.Count(), 1); - reindexer::WrSerializer wrSer; - err = res.begin().GetJSON(wrSer, false); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(std::string(wrSer.Slice()), R"S({"ft1":", two three gg three empt ,mpty three "})S"); - } - - { - reindexer::Query q("nm1"); - q.Select({"ft1"}).Where("ft1", CondEq, "three").AddFunction(R"S(ft1=snippet_n('' , '' - ,5 ,5 , pre_delim=','))S"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(res.Count(), 1); - reindexer::WrSerializer wrSer; - err = res.begin().GetJSON(wrSer, false); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(std::string(wrSer.Slice()), R"S({"ft1":", two three gg three empt ,mpty three "})S"); - } - - { - reindexer::Query q("nm1"); - q.Select({"ft1"}).Where("ft1", CondEq, "three").AddFunction(R"S(ft1=snippet_n('','',5,5,pre_delim=' g ', post_delim='h'))S"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(res.Count(), 1); - if (res.Count()) { - reindexer::WrSerializer wrSer; - err = res.begin().GetJSON(wrSer, false); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(wrSer.Slice(), R"S({"ft1":" g two three gg three empth g mpty threeh"})S"); - } - } - { - reindexer::Query q("nm1"); - q.Select({"ft1"}) - .Where("ft1", CondEq, "three") - .AddFunction(R"S(ft1=snippet_n('','','5',5,post_delim='h',pre_delim=' g '))S"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(res.Count(), 1); - if (res.Count()) { - reindexer::WrSerializer wrSer; - err = res.begin().GetJSON(wrSer, false); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(wrSer.Slice(), R"S({"ft1":" g two three gg three empth g mpty threeh"})S"); - } - } - { - reindexer::Query q("nm1"); - q.Select({"ft1"}).Where("ft1", CondEq, "three").AddFunction(R"S(ft1=snippet_n('','',5,5,post_delim='h'))S"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(res.Count(), 1); - if (res.Count()) { - reindexer::WrSerializer wrSer; - err = res.begin().GetJSON(wrSer, false); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(wrSer.Slice(), R"S({"ft1":" two three gg three empthmpty threeh"})S"); - } - } - { - reindexer::Query q("nm1"); - q.Select({"ft1"}).Where("ft1", CondEq, "three").AddFunction(R"S(ft1=snippet_n('','',5,5,pre_delim='!'))S"); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(res.Count(), 1); - if (res.Count()) { - reindexer::WrSerializer wrSer; - err = res.begin().GetJSON(wrSer, false); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(wrSer.Slice(), R"S({"ft1":"! two three gg three empt !mpty three "})S"); - } - } -} - -TEST_P(FTSelectFunctionsApi, SnippetNOthers) { - auto ftCfg = GetDefaultConfig(); - Init(ftCfg); - - std::string_view s1 = "123456 one 789012"sv; - [[maybe_unused]] auto [ss1, id1] = Add(s1); - - std::string_view s2 = "123456 one 789 one 987654321"sv; - [[maybe_unused]] auto [ss2, id2] = Add(s2); - - std::string_view s3 = "123456 one two 789 one two 987654321"sv; - [[maybe_unused]] auto [ss3, id3] = Add(s3); - - std::string_view s4 = "123456 one один два two 789 one один два two 987654321"sv; - [[maybe_unused]] auto [ss4, id4] = Add(s4); - - auto check = [&](int index, const std::string& find, const std::string& fun, std::string_view answer) { - reindexer::Query q("nm1"); - q.Select({"ft1"}).Where("ft1", CondEq, find).Where("id", CondEq, index).AddFunction(fun); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(res.Count(), 1); - if (res.Count()) { - reindexer::WrSerializer wrSer; - err = res.begin().GetJSON(wrSer, false); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(wrSer.Slice(), answer); - } - }; - check(id1, "one", R"S(ft1=snippet_n('<','>',5,5,pre_delim='[',post_delim=']',with_area=0))S", R"S({"ft1":"[3456 7890]"})S"); - check(id1, "one", R"S(ft1=snippet_n('<','>',5,5,pre_delim='[',post_delim=']'))S", R"S({"ft1":"[3456 7890]"})S"); - check(id2, "one", R"S(ft1=snippet_n('<','>',5,5,pre_delim='[',post_delim=']'))S", R"S({"ft1":"[3456 789 9876]"})S"); - check(id3, R"S("one two")S", R"S(ft1=snippet_n('<','>',2,2,pre_delim='[',post_delim=']'))S", - R"S({"ft1":"[6 7][9 9]"})S"); - check(id3, R"S("one two")S", R"S(ft1=snippet_n('<','>',2,2,pre_delim='[',post_delim=']',with_area=1))S", - R"S({"ft1":"[[5,16]6 7][[17,28]9 9]"})S"); - check(id4, R"S("one один два two")S", R"S(ft1=snippet_n('<','>',2,2,pre_delim='[',post_delim=']'))S", - R"S({"ft1":"[6 7][9 9]"})S"); - check(id4, R"S("one один два two")S", R"S(ft1=snippet_n('<','>',2,2,with_area=1,pre_delim='[',post_delim=']'))S", - R"S({"ft1":"[[5,25]6 7][[26,46]9 9]"})S"); -} - -TEST_P(FTSelectFunctionsApi, SnippetNOffset) { - auto ftCfg = GetDefaultConfig(); - Init(ftCfg); - - std::string_view s1 = "one"sv; - [[maybe_unused]] auto [ss1, id1] = Add(s1); - - std::string_view s2 = "один"sv; - [[maybe_unused]] auto [ss2, id2] = Add(s2); - - std::string_view s3 = "asd one ghj"sv; - [[maybe_unused]] auto [ss3, id3] = Add(s3); - - std::string_view s4 = "лмн один опр"sv; - [[maybe_unused]] auto [ss4, id4] = Add(s4); - - std::string_view s5 = "лмн один опр один лмк"sv; - [[maybe_unused]] auto [ss5, id5] = Add(s5); - - std::string_view s6 = "лмн опр jkl один"sv; - [[maybe_unused]] auto [ss6, id6] = Add(s6); - - auto check = [&](int index, const std::string& find, const std::string& fun, std::string_view answer) { - reindexer::Query q("nm1"); - q.Select({"ft1"}).Where("ft1", CondEq, find).Where("id", CondEq, index).AddFunction(fun); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(res.Count(), 1); - if (res.Count()) { - reindexer::WrSerializer wrSer; - err = res.begin().GetJSON(wrSer, false); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(wrSer.Slice(), answer); - } - }; - check(id1, "one", R"S(ft1=snippet_n('','',0,0,with_area=1))S", R"S({"ft1":"[0,3]one "})S"); - check(id1, "one", R"S(ft1=snippet_n('','',5,5,with_area=1))S", R"S({"ft1":"[0,3]one "})S"); - check(id2, "один", R"S(ft1=snippet_n('','',0,0,with_area=1))S", R"S({"ft1":"[0,4]один "})S"); - check(id2, "один", R"S(ft1=snippet_n('','',5,5,with_area=1))S", R"S({"ft1":"[0,4]один "})S"); - - check(id3, "one", R"S(ft1=snippet_n('','',0,0,with_area=1))S", R"S({"ft1":"[4,7]one "})S"); - check(id3, "one", R"S(ft1=snippet_n('','',1,1,with_area=1))S", R"S({"ft1":"[3,8] one "})S"); - check(id3, "one", R"S(ft1=snippet_n('','',4,4,with_area=1))S", R"S({"ft1":"[0,11]asd one ghj "})S"); - check(id3, "one", R"S(ft1=snippet_n('','',5,5,with_area=1))S", R"S({"ft1":"[0,11]asd one ghj "})S"); - - check(id6, "один", R"S(ft1=snippet_n('','',2,0,with_area=1))S", R"S({"ft1":"[10,16]l один "})S"); - check(id6, "один", R"S(ft1=snippet_n('','',2,2,with_area=1))S", R"S({"ft1":"[10,16]l один "})S"); - - check(id4, "один", R"S(ft1=snippet_n('','',0,0,with_area=1))S", R"S({"ft1":"[4,8]один "})S"); - check(id4, "один", R"S(ft1=snippet_n('','',1,1,with_area=1))S", R"S({"ft1":"[3,9] один "})S"); - check(id4, "один", R"S(ft1=snippet_n('','',2,2,with_area=1))S", R"S({"ft1":"[2,10]н один о "})S"); - check(id4, "один", R"S(ft1=snippet_n('','',4,4,with_area=1))S", R"S({"ft1":"[0,12]лмн один опр "})S"); - check(id4, "один", R"S(ft1=snippet_n('','',5,5,with_area=1))S", R"S({"ft1":"[0,12]лмн один опр "})S"); - - check(id5, "один", R"S(ft1=snippet_n('','',0,0,with_area=1))S", R"S({"ft1":"[4,8]один [13,17]один "})S"); - check(id5, "один", R"S(ft1=snippet_n('','',2,2,with_area=1))S", R"S({"ft1":"[2,10]н один о [11,19]р один л "})S"); - - check(id5, "один", R"S(ft1=snippet_n('','',3,3,with_area=1))S", R"S({"ft1":"[1,20]мн один опр один лм "})S"); - check(id5, "один", R"S(ft1=snippet_n('{!','}',2,2,pre_delim='((',post_delim='))',with_area=1))S", - R"S({"ft1":"(([2,10]н {!один} о))(([11,19]р {!один} л))"})S"); - check(id5, "один", R"S(ft1=snippet_n('{!','}',3,3,pre_delim='(',post_delim=')',with_area=1))S", - R"S({"ft1":"([1,20]мн {!один} опр {!один} лм)"})S"); - check(id5, "один", R"S(ft1=snippet_n('{!','}',2,2,pre_delim='((',post_delim='))',with_area=0))S", - R"S({"ft1":"((н {!один} о))((р {!один} л))"})S"); - - check(id5, "один", R"S(ft1=snippet_n('{!','}',2,2,pre_delim='((',with_area=1))S", - R"S({"ft1":"(([2,10]н {!один} о (([11,19]р {!один} л "})S"); - check(id5, "один", R"S(ft1=snippet_n('{!','}',3,3,pre_delim='(',with_area=1))S", R"S({"ft1":"([1,20]мн {!один} опр {!один} лм "})S"); - check(id5, "один", R"S(ft1=snippet_n('{!','}',2,2,pre_delim='((',with_area=0))S", R"S({"ft1":"((н {!один} о ((р {!один} л "})S"); - - check(id5, "один", R"S(ft1=snippet_n('{!','}',2,2,post_delim='))',with_area=1))S", - R"S({"ft1":"[2,10]н {!один} о))[11,19]р {!один} л))"})S"); - check(id5, "один", R"S(ft1=snippet_n('{!','}',3,3,post_delim=')',with_area=1))S", R"S({"ft1":"[1,20]мн {!один} опр {!один} лм)"})S"); - check(id5, "один", R"S(ft1=snippet_n('{!','}',2,2,post_delim='))',with_area=0))S", R"S({"ft1":"н {!один} о))р {!один} л))"})S"); -} - -TEST_P(FTSelectFunctionsApi, SnippetNBounds) { - auto ftCfg = GetDefaultConfig(); - Init(ftCfg); - - std::string_view s1 = "one"sv; - [[maybe_unused]] auto [ss1, id1] = Add(s1); - - std::string_view s3 = "as|d one g!hj"sv; - [[maybe_unused]] auto [ss3, id3] = Add(s3); - - auto check = [&](int index, const std::string& find, const std::string& fun, std::string_view answer) { - reindexer::Query q("nm1"); - q.Select({"ft1"}).Where("ft1", CondEq, find).Where("id", CondEq, index).AddFunction(fun); - reindexer::QueryResults res; - reindexer::Error err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(res.Count(), 1); - if (res.Count()) { - reindexer::WrSerializer wrSer; - err = res.begin().GetJSON(wrSer, false); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(wrSer.Slice(), answer); - } - }; - check(id1, "one", R"S(ft1=snippet_n('','',0,0,with_area=1,left_bound='|',right_bound='|'))S", R"S({"ft1":"[0,3]one "})S"); - check(id3, "one", R"S(ft1=snippet_n('','',5,5,with_area=1,left_bound='|',right_bound='!'))S", R"S({"ft1":"[3,10]d one g "})S"); - check(id3, "one", R"S(ft1=snippet_n('','',1,1,with_area=1,left_bound='|',right_bound='!'))S", R"S({"ft1":"[4,9] one "})S"); - - check(id3, "one", R"S(ft1=snippet_n('','',5,5,with_area=1,right_bound='!'))S", R"S({"ft1":"[0,10]as|d one g "})S"); - check(id3, "one", R"S(ft1=snippet_n('','',6,5,with_area=1,right_bound='!'))S", R"S({"ft1":"[0,10]as|d one g "})S"); - check(id3, "one", R"S(ft1=snippet_n('','',4,5,with_area=1,right_bound='!'))S", R"S({"ft1":"[1,10]s|d one g "})S"); - - check(id3, "one", R"S(ft1=snippet_n('','',2,5,with_area=1,left_bound='|'))S", R"S({"ft1":"[3,13]d one g!hj "})S"); - check(id3, "one", R"S(ft1=snippet_n('','',2,6,with_area=1,left_bound='!'))S", R"S({"ft1":"[3,13]d one g!hj "})S"); - check(id3, "one", R"S(ft1=snippet_n('','',2,4,with_area=1,left_bound='!'))S", R"S({"ft1":"[3,12]d one g!h "})S"); - check(id3, "one", R"S(ft1=snippet_n('','',5,5,with_area=1,left_bound='!',right_bound='|'))S", R"S({"ft1":"[0,13]as|d one g!hj "})S"); -} - -INSTANTIATE_TEST_SUITE_P(, FTSelectFunctionsApi, - ::testing::Values(reindexer::FtFastConfig::Optimization::Memory, reindexer::FtFastConfig::Optimization::CPU), - [](const auto& info) { - switch (info.param) { - case reindexer::FtFastConfig::Optimization::Memory: - return "OptimizationByMemory"; - case reindexer::FtFastConfig::Optimization::CPU: - return "OptimizationByCPU"; - default: - assert(false); - std::abort(); - } - }); diff --git a/cpp_src/gtests/tests/unit/ft/ft_stress.cc b/cpp_src/gtests/tests/unit/ft/ft_stress.cc deleted file mode 100644 index e0a0d35e7..000000000 --- a/cpp_src/gtests/tests/unit/ft/ft_stress.cc +++ /dev/null @@ -1,174 +0,0 @@ -#include -#include "ft_api.h" -#include "tools/fsops.h" - -using namespace std::string_view_literals; - -class FTStressApi : public FTApi { -protected: - std::string_view GetDefaultNamespace() noexcept override { return "ft_stress_default_namespace"; } -}; - -TEST_P(FTStressApi, BasicStress) { - const std::string kStorage = reindexer::fs::JoinPath(reindexer::fs::GetTempDir(), "reindex_FTApi/BasicStress"); - reindexer::fs::RmDirAll(kStorage); - Init(GetDefaultConfig(), NS1, kStorage); - - std::vector data; - std::vector phrase; - - data.reserve(100000); - for (size_t i = 0; i < 100000; ++i) { - data.push_back(rt.RandString()); - } - - phrase.reserve(7000); - for (size_t i = 0; i < 7000; ++i) { - phrase.push_back(data[rand() % data.size()] + " " + data[rand() % data.size()] + " " + data[rand() % data.size()]); - } - - std::atomic terminate = false; - std::thread statsThread([&] { - while (!terminate) { - reindexer::QueryResults qr; - const auto err = rt.reindexer->Select(reindexer::Query("#memstats"), qr); - ASSERT_TRUE(err.ok()) << err.what(); - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - } - }); - - for (size_t i = 0; i < phrase.size(); i++) { - Add(phrase[i], phrase[rand() % phrase.size()]); - if (i % 500 == 0) { - for (size_t j = 0; j < i; j++) { - auto res = StressSelect(phrase[j]); - bool found = false; - if (!res.Count()) { - abort(); - } - - for (auto it : res) { - auto ritem(it.GetItem(false)); - if (ritem["ft1"].As() == phrase[j]) { - found = true; - } - } - if (!found) { - abort(); - } - } - } - } - terminate = true; - statsThread.join(); -} - -TEST_P(FTStressApi, ConcurrencyCheck) { - const std::string kStorage = reindexer::fs::JoinPath(reindexer::fs::GetTempDir(), "reindex_FTApi/ConcurrencyCheck"); - reindexer::fs::RmDirAll(kStorage); - Init(GetDefaultConfig(), NS1, kStorage); - - Add("Her nose was very very long"sv); - Add("Her nose was exceptionally long"sv); - Add("Her nose was long"sv); - - rt.reindexer.reset(); - Init(GetDefaultConfig(), NS1, kStorage); // Restart rx to drop all the caches - - std::condition_variable cv; - std::mutex mtx; - bool ready = false; - std::vector threads; - std::atomic runningThreads = {0}; - constexpr unsigned kTotalThreads = 11; - std::thread statsThread; - std::atomic terminate = false; - for (unsigned i = 0; i < kTotalThreads; ++i) { - if (i == 0) { - statsThread = std::thread([&] { - std::unique_lock lck(mtx); - ++runningThreads; - cv.wait(lck, [&] { return ready; }); - lck.unlock(); - while (!terminate) { - reindexer::QueryResults qr; - const auto err = rt.reindexer->Select(reindexer::Query("#memstats"), qr); - ASSERT_TRUE(err.ok()) << err.what(); - } - }); - } else { - threads.emplace_back(std::thread([&] { - std::unique_lock lck(mtx); - ++runningThreads; - cv.wait(lck, [&] { return ready; }); - lck.unlock(); - CheckResults("'nose long'~3", {{"Her !nose was long!", ""}, {"Her !nose was exceptionally long!", ""}}, true); - })); - } - } - while (runningThreads.load() < kTotalThreads) { - std::this_thread::sleep_for(std::chrono::microseconds(100)); - } - { - std::lock_guard lck(mtx); - ready = true; - cv.notify_all(); - } - for (auto& th : threads) { - th.join(); - } - terminate = true; - statsThread.join(); -} - -TEST_P(FTStressApi, LargeMergeLimit) { - // Check if results are bounded by merge limit - auto ftCfg = GetDefaultConfig(); - ftCfg.mergeLimit = kMaxMergeLimitValue; - Init(ftCfg); - const std::string kBase1 = "aaaa"; - const std::string kBase2 = "bbbb"; - - reindexer::fast_hash_set strings1; - constexpr unsigned kPartLen = (kMaxMergeLimitValue + 15000) / 2; - for (unsigned i = 0; i < kPartLen; ++i) { - while (true) { - std::string val = kBase2 + rt.RandString(10, 10); - if (strings1.emplace(val).second) { - Add("nm1"sv, val); - break; - } - } - } - reindexer::fast_hash_set strings2; - auto fit = strings1.begin(); - for (unsigned i = 0; i < kPartLen; ++i, ++fit) { - while (true) { - std::string val = kBase2 + rt.RandString(10, 10); - if (strings2.emplace(val).second) { - if (fit == strings1.end()) { - fit = strings1.begin(); - } - Add("nm1"sv, val, fit.key()); - break; - } - } - } - - auto qr = SimpleSelect(fmt::sprintf("%s* %s*", kBase1, kBase2)); - ASSERT_EQ(qr.Count(), ftCfg.mergeLimit); -} - -INSTANTIATE_TEST_SUITE_P(, FTStressApi, - ::testing::Values(reindexer::FtFastConfig::Optimization::Memory, reindexer::FtFastConfig::Optimization::CPU), - [](const auto& info) { - switch (info.param) { - case reindexer::FtFastConfig::Optimization::Memory: - return "OptimizationByMemory"; - case reindexer::FtFastConfig::Optimization::CPU: - return "OptimizationByCPU"; - default: - assert(false); - std::abort(); - } - }); diff --git a/cpp_src/gtests/tests/unit/ft/ft_synonyms.cc b/cpp_src/gtests/tests/unit/ft/ft_synonyms.cc deleted file mode 100644 index f2eaba192..000000000 --- a/cpp_src/gtests/tests/unit/ft/ft_synonyms.cc +++ /dev/null @@ -1,348 +0,0 @@ -#include -#include "ft_api.h" - -using namespace std::string_view_literals; - -class FTSynonymsApi : public FTApi { -protected: - std::string_view GetDefaultNamespace() noexcept override { return "ft_synonyms_default_namespace"; } -}; - -TEST_P(FTSynonymsApi, CompositeRankWithSynonyms) { - auto cfg = GetDefaultConfig(); - cfg.synonyms = {{{"word"}, {"слово"}}}; - Init(cfg); - Add("word"sv, "слово"sv); - Add("world"sv, "world"sv); - - // rank of synonym is higher - CheckAllPermutations("@", {"ft1^0.5", "ft2^2"}, " word~", {{"!word!", "!слово!"}, {"!world!", "!world!"}}, true, ", "); -} - -TEST_P(FTSynonymsApi, SelectWithMultSynonymArea) { - reindexer::FtFastConfig config = GetDefaultConfig(); - // hyponyms as synonyms - config.synonyms = {{{"digit", "one", "two", "three", "big number"}, {"digit", "one", "two", "big number"}}, - {{"animal", "cat", "dog", "lion"}, {"animal", "cat", "dog", "lion"}}}; - config.maxAreasInDoc = 100; - Init(config); - - Add("digit cat empty one animal"sv); - - CheckResults(R"s("big number animal")s", {}, false, true); - CheckResults(R"s("digit animal")s", {{"!digit cat! empty !one animal!", ""}}, false, true); - CheckResults(R"s("two lion")s", {{"!digit cat! empty !one animal!", ""}}, false, true); -} - -TEST_P(FTSynonymsApi, SelectSynonyms) { - auto ftCfg = GetDefaultConfig(); - ftCfg.synonyms = {{{"лыв", "лав"}, {"love"}}, {{"лар", "hate"}, {"rex", "looove"}}}; - Init(ftCfg); - - Add("nm1"sv, "test"sv, "love rex"sv); - Add("nm1"sv, "test"sv, "no looove"sv); - Add("nm1"sv, "test"sv, "no match"sv); - - CheckAllPermutations("", {"лыв"}, "", {{"test", "!love! rex"}}); - CheckAllPermutations("", {"hate"}, "", {{"test", "love !rex!"}, {"test", "no !looove!"}}); -} - -TEST_P(FTSynonymsApi, SelectMultiwordSynonyms) { - auto ftCfg = GetDefaultConfig(); - ftCfg.synonyms = {{{"whole world", "UN", "United Nations"}, - {"UN", "ООН", "целый мир", "планета", "генеральная ассамблея организации объединенных наций"}}, - {{"word"}, {"одно слово"}}}; - Init(ftCfg); - - Add("nm1"sv, "whole world"sv, "test"sv); - Add("nm1"sv, "world whole"sv, "test"sv); - Add("nm1"sv, "whole"sv, "world"sv); - Add("nm1"sv, "world"sv, "test"sv); - Add("nm1"sv, "whole"sv, "test"sv); - Add("nm1"sv, "целый мир"sv, "test"sv); - Add("nm1"sv, "целый"sv, "мир"sv); - Add("nm1"sv, "целый"sv, "test"sv); - Add("nm1"sv, "мир"sv, "test"sv); - Add("nm1"sv, "планета"sv, "test"sv); - Add("nm1"sv, "генеральная ассамблея организации объединенных наций"sv, "test"sv); - Add("nm1"sv, "ассамблея генеральная наций объединенных организации"sv, "test"sv); - Add("nm1"sv, "генеральная прегенеральная ассамблея"sv, "организации объединенных свободных наций"sv); - Add("nm1"sv, "генеральная прегенеральная "sv, "организации объединенных свободных наций"sv); - Add("nm1"sv, "UN"sv, "UN"sv); - - Add("nm1"sv, "word"sv, "test"sv); - Add("nm1"sv, "test"sv, "word"sv); - Add("nm1"sv, "word"sv, "слово"sv); - Add("nm1"sv, "word"sv, "одно"sv); - Add("nm1"sv, "слово"sv, "test"sv); - Add("nm1"sv, "слово всего лишь одно"sv, "test"sv); - Add("nm1"sv, "одно"sv, "test"sv); - Add("nm1"sv, "слово"sv, "одно"sv); - Add("nm1"sv, "слово одно"sv, "test"sv); - Add("nm1"sv, "одно слово"sv, "word"sv); - - CheckAllPermutations("", {"world"}, "", - {{"whole !world!", "test"}, {"!world! whole", "test"}, {"whole", "!world!"}, {"!world!", "test"}}); - - CheckAllPermutations("", {"whole", "world"}, "", - {{"!whole world!", "test"}, - {"!world whole!", "test"}, - {"!whole!", "!world!"}, - {"!world!", "test"}, - {"!whole!", "test"}, - {"!целый мир!", "test"}, - {"!целый!", "!мир!"}, - {"!планета!", "test"}, - {"!генеральная ассамблея организации объединенных наций!", "test"}, - {"!ассамблея генеральная наций объединенных организации!", "test"}, - {"!генеральная! прегенеральная !ассамблея!", "!организации объединенных! свободных !наций!"}, - {"!UN!", "!UN!"}}); - - CheckAllPermutations("", {"UN"}, "", - {{"!целый мир!", "test"}, - {"!целый!", "!мир!"}, - {"!планета!", "test"}, - {"!генеральная ассамблея организации объединенных наций!", "test"}, - {"!ассамблея генеральная наций объединенных организации!", "test"}, - {"!генеральная! прегенеральная !ассамблея!", "!организации объединенных! свободных !наций!"}, - {"!UN!", "!UN!"}}); - - CheckAllPermutations("", {"United", "+Nations"}, "", - {{"!целый мир!", "test"}, - {"!целый!", "!мир!"}, - {"!планета!", "test"}, - {"!генеральная ассамблея организации объединенных наций!", "test"}, - {"!ассамблея генеральная наций объединенных организации!", "test"}, - {"!генеральная! прегенеральная !ассамблея!", "!организации объединенных! свободных !наций!"}, - {"!UN!", "!UN!"}}); - - CheckAllPermutations("", {"целый", "мир"}, "", {{"!целый мир!", "test"}, {"!целый!", "test"}, {"!мир!", "test"}, {"!целый!", "!мир!"}}); - - CheckAllPermutations("", {"ООН"}, "", {}); - - CheckAllPermutations("", {"word"}, "", - {{"!word!", "test"}, - {"test", "!word!"}, - {"!word!", "слово"}, - {"!word!", "одно"}, - {"!слово! всего лишь !одно!", "test"}, - {"!слово!", "!одно!"}, - {"!слово одно!", "test"}, - {"!одно слово!", "!word!"}}); -} - -// issue #715 -TEST_P(FTSynonymsApi, SelectMultiwordSynonyms2) { - auto ftCfg = GetDefaultConfig(); - ftCfg.synonyms = {{{"черный"}, {"серый космос"}}}; - Init(ftCfg); - - Add("nm1"sv, "Смартфон SAMSUNG Galaxy S20 черный"sv, "SAMSUNG"sv); - Add("nm1"sv, "Смартфон SAMSUNG Galaxy S20 серый"sv, "SAMSUNG"sv); - Add("nm1"sv, "Смартфон SAMSUNG Galaxy S20 красный"sv, "SAMSUNG"sv); - Add("nm1"sv, "Смартфон SAMSUNG Galaxy S20 серый космос"sv, "SAMSUNG"sv); - - // Check all combinations of "+samsung" and "+galaxy" in request - CheckAllPermutations("@ft1 ", {"+samsung", "+galaxy"}, "", - {{"Смартфон !SAMSUNG Galaxy! S20 черный", "!SAMSUNG!"}, - {"Смартфон !SAMSUNG Galaxy! S20 серый", "!SAMSUNG!"}, - {"Смартфон !SAMSUNG Galaxy! S20 красный", "!SAMSUNG!"}, - {"Смартфон !SAMSUNG Galaxy! S20 серый космос", "!SAMSUNG!"}}); - - // Check all combinations of "+samsung", "+galaxy" and "+серый" in request - CheckAllPermutations( - "@ft1 ", {"+samsung", "+galaxy", "+серый"}, "", - {{"Смартфон !SAMSUNG Galaxy! S20 !серый!", "!SAMSUNG!"}, {"Смартфон !SAMSUNG Galaxy! S20 !серый! космос", "!SAMSUNG!"}}); - - // Check all combinations of "+samsung", "+galaxy", "+серый" and "+космос" in request - CheckAllPermutations("@ft1 ", {"+samsung", "+galaxy", "+серый", "+космос"}, "", - {{"Смартфон !SAMSUNG Galaxy! S20 !серый космос!", "!SAMSUNG!"}}); - - // Check all combinations of "+samsung", "+galaxy" and "+черный" in request - CheckAllPermutations( - "@ft1 ", {"+samsung", "+galaxy", "+черный"}, "", - {{"Смартфон !SAMSUNG Galaxy! S20 !черный!", "!SAMSUNG!"}, {"Смартфон !SAMSUNG Galaxy! S20 !серый космос!", "!SAMSUNG!"}}); - - // Check all combinations of "+samsung", "+galaxy", "+серый" and "+серый" in request - CheckAllPermutations("@ft1 ", {"+samsung", "+galaxy", "+черный", "+серый"}, "", - {{"Смартфон !SAMSUNG Galaxy! S20 !серый космос!", "!SAMSUNG!"}}); - - // Check all combinations of "+samsung", "+galaxy", "+черный" and "+космос" in request - CheckAllPermutations("@ft1 ", {"+samsung", "+galaxy", "+черный", "+космос"}, "", - {{"Смартфон !SAMSUNG Galaxy! S20 !серый космос!", "!SAMSUNG!"}}); - - // Check all combinations of "+samsung", "+galaxy", "+черный" and "+somthing" in request - CheckAllPermutations("@ft1 ", {"+samsung", "+galaxy", "+черный", "+something"}, "", {}); - CheckAllPermutations("@ft1 ", {"+samsung", "+galaxy", "+черный", "+черный", "+черный", "+something"}, "", {}); -} - -TEST_P(FTSynonymsApi, SelectWithMinusWithSynonyms) { - auto ftCfg = GetDefaultConfig(); - ftCfg.synonyms = {{{"word", "several lexems"}, {"слово", "сколькото лексем"}}}; - Init(ftCfg); - - Add("nm1"sv, "word"sv, "test"sv); - Add("nm1"sv, "several lexems"sv, "test"sv); - Add("nm1"sv, "слово"sv, "test"sv); - Add("nm1"sv, "сколькото лексем"sv, "test"sv); - - CheckAllPermutations("", {"test", "word"}, "", - {{"!word!", "!test!"}, {"several lexems", "!test!"}, {"!слово!", "!test!"}, {"!сколькото лексем!", "!test!"}}); - // Don't use synonyms - CheckAllPermutations("", {"test", "-word"}, "", {{"several lexems", "!test!"}, {"слово", "!test!"}, {"сколькото лексем", "!test!"}}); - CheckAllPermutations("", {"test", "several", "lexems"}, "", - {{"word", "!test!"}, {"!several lexems!", "!test!"}, {"!слово!", "!test!"}, {"!сколькото лексем!", "!test!"}}); - // Don't use synonyms - CheckAllPermutations("", {"test", "several", "-lexems"}, "", {{"word", "!test!"}, {"слово", "!test!"}, {"сколькото лексем", "!test!"}}); - // Don't use synonyms - CheckAllPermutations("", {"test", "-several", "lexems"}, "", {{"word", "!test!"}, {"слово", "!test!"}, {"сколькото лексем", "!test!"}}); -} - -// issue #627 -TEST_P(FTSynonymsApi, SelectMultiwordSynonymsWithExtraWords) { - auto ftCfg = GetDefaultConfig(); - ftCfg.synonyms = { - {{"бронестекло", "защитное стекло", "бронированное стекло"}, {"бронестекло", "защитное стекло", "бронированное стекло"}}}; - Init(ftCfg); - - Add("nm1"sv, "защитное стекло для экрана samsung galaxy"sv, "test"sv); - Add("nm1"sv, "защитное стекло для экрана iphone"sv, "test"sv); - Add("nm1"sv, "бронированное стекло для samsung galaxy"sv, "test"sv); - Add("nm1"sv, "бронированное стекло для экрана iphone"sv, "test"sv); - Add("nm1"sv, "бронестекло для экрана samsung galaxy"sv, "test"sv); - Add("nm1"sv, "бронестекло для экрана iphone"sv, "test"sv); - - CheckAllPermutations("", {"бронестекло"}, "", - {{"!защитное стекло! для экрана samsung galaxy", "test"}, - {"!защитное стекло! для экрана iphone", "test"}, - {"!бронированное стекло! для samsung galaxy", "test"}, - {"!бронированное стекло! для экрана iphone", "test"}, - {"!бронестекло! для экрана samsung galaxy", "test"}, - {"!бронестекло! для экрана iphone", "test"}}); - - CheckAllPermutations("", {"+бронестекло", "+iphone"}, "", - {{"!защитное стекло! для экрана !iphone!", "test"}, - {"!бронированное стекло! для экрана !iphone!", "test"}, - {"!бронестекло! для экрана !iphone!", "test"}}); - - CheckAllPermutations("", {"+galaxy", "+бронестекло", "+samsung"}, "", - {{"!защитное стекло! для экрана !samsung galaxy!", "test"}, - {"!бронированное стекло! для !samsung galaxy!", "test"}, - {"!бронестекло! для экрана !samsung galaxy!", "test"}}); - - CheckAllPermutations("", {"+galaxy", "+бронестекло", "экрана", "+samsung"}, "", - {{"!защитное стекло! для !экрана samsung galaxy!", "test"}, - {"!бронированное стекло! для !samsung galaxy!", "test"}, - {"!бронестекло! для !экрана samsung galaxy!", "test"}}); - - CheckAllPermutations("", {"+galaxy", "+бронестекло", "какоетослово", "+samsung"}, "", - {{"!защитное стекло! для экрана !samsung galaxy!", "test"}, - {"!бронированное стекло! для !samsung galaxy!", "test"}, - {"!бронестекло! для экрана !samsung galaxy!", "test"}}); - - CheckAllPermutations("", {"+бронестекло", "+iphone", "+samsung"}, "", {}); -} - -TEST_P(FTSynonymsApi, ChangeSynonymsCfg) { - auto ftCfg = GetDefaultConfig(); - Init(ftCfg); - - Add("nm1"sv, "UN"sv, "test"sv); - Add("nm1"sv, "United Nations"sv, "test"sv); - Add("nm1"sv, "ООН"sv, "test"sv); - Add("nm1"sv, "организация объединенных наций"sv, "test"sv); - - Add("nm1"sv, "word"sv, "test"sv); - Add("nm1"sv, "several lexems"sv, "test"sv); - Add("nm1"sv, "слово"sv, "test"sv); - Add("nm1"sv, "сколькото лексем"sv, "test"sv); - - CheckAllPermutations("", {"UN"}, "", {{"!UN!", "test"}}); - CheckAllPermutations("", {"United", "Nations"}, "", {{"!United Nations!", "test"}}); - CheckAllPermutations("", {"word"}, "", {{"!word!", "test"}}); - CheckAllPermutations("", {"several", "lexems"}, "", {{"!several lexems!", "test"}}); - - // Add synonyms - ftCfg.synonyms = {{{"UN", "United Nations"}, {"ООН", "организация объединенных наций"}}}; - SetFTConfig(ftCfg); - - CheckAllPermutations("", {"UN"}, "", {{"!UN!", "test"}, {"!ООН!", "test"}, {"!организация объединенных наций!", "test"}}); - CheckAllPermutations("", {"United", "Nations"}, "", - {{"!United Nations!", "test"}, {"!ООН!", "test"}, {"!организация объединенных наций!", "test"}}); - CheckAllPermutations("", {"word"}, "", {{"!word!", "test"}}); - CheckAllPermutations("", {"several", "lexems"}, "", {{"!several lexems!", "test"}}); - - // Change synonyms - ftCfg.synonyms = {{{"word", "several lexems"}, {"слово", "сколькото лексем"}}}; - SetFTConfig(ftCfg); - - CheckAllPermutations("", {"UN"}, "", {{"!UN!", "test"}}); - CheckAllPermutations("", {"United", "Nations"}, "", {{"!United Nations!", "test"}}); - CheckAllPermutations("", {"word"}, "", {{"!word!", "test"}, {"!слово!", "test"}, {"!сколькото лексем!", "test"}}); - CheckAllPermutations("", {"several", "lexems"}, "", - {{"!several lexems!", "test"}, {"!слово!", "test"}, {"!сколькото лексем!", "test"}}); - - // Remove synonyms - ftCfg.synonyms.clear(); - SetFTConfig(ftCfg); - - CheckAllPermutations("", {"UN"}, "", {{"!UN!", "test"}}); - CheckAllPermutations("", {"United", "Nations"}, "", {{"!United Nations!", "test"}}); - CheckAllPermutations("", {"word"}, "", {{"!word!", "test"}}); - CheckAllPermutations("", {"several", "lexems"}, "", {{"!several lexems!", "test"}}); -} - -TEST_P(FTSynonymsApi, SelectWithRelevanceBoostWithSynonyms) { - auto ftCfg = GetDefaultConfig(); - ftCfg.synonyms = {{{"word"}, {"одно слово"}}, {{"United Nations"}, {"ООН"}}}; - Init(ftCfg); - - Add("nm1"sv, "одно слово"sv, ""sv); - Add("nm1"sv, ""sv, "ООН"sv); - - CheckAllPermutations("", {"word^2", "United^0.5", "Nations"}, "", {{"!одно слово!", ""}, {"", "!ООН!"}}, true); - CheckAllPermutations("", {"word^0.5", "United^2", "Nations^0.5"}, "", {{"", "!ООН!"}, {"!одно слово!", ""}}, true); -} - -TEST_P(FTSynonymsApi, SelectWithFieldsBoostWithSynonyms) { - auto ftCfg = GetDefaultConfig(); - ftCfg.synonyms = {{{"word"}, {"одно слово"}}}; - Init(ftCfg); - - Add("nm1"sv, "одно слово"sv, ""sv); - Add("nm1"sv, "одно"sv, "слово"sv); - Add("nm1"sv, ""sv, "одно слово"sv); - - CheckAllPermutations("@", {"ft1^2", "ft2^0.5"}, " word", {{"!одно слово!", ""}, {"!одно!", "!слово!"}, {"", "!одно слово!"}}, true, - ", "); - CheckAllPermutations("@", {"ft1^0.5", "ft2^2"}, " word", {{"", "!одно слово!"}, {"!одно!", "!слово!"}, {"!одно слово!", ""}}, true, - ", "); -} - -TEST_P(FTSynonymsApi, SelectWithFieldsListWithSynonyms) { - auto ftCfg = GetDefaultConfig(); - ftCfg.synonyms = {{{"word"}, {"одно слово"}}}; - Init(ftCfg); - - Add("nm1"sv, "одно слово"sv, ""sv); - Add("nm1"sv, "одно"sv, "слово"sv); - Add("nm1"sv, ""sv, "одно слово"sv); - - CheckAllPermutations("", {"word"}, "", {{"!одно слово!", ""}, {"!одно!", "!слово!"}, {"", "!одно слово!"}}); - CheckAllPermutations("@ft1 ", {"word"}, "", {{"!одно слово!", ""}}); - CheckAllPermutations("@ft2 ", {"word"}, "", {{"", "!одно слово!"}}); -} - -INSTANTIATE_TEST_SUITE_P(, FTSynonymsApi, - ::testing::Values(reindexer::FtFastConfig::Optimization::Memory, reindexer::FtFastConfig::Optimization::CPU), - [](const auto& info) { - switch (info.param) { - case reindexer::FtFastConfig::Optimization::Memory: - return "OptimizationByMemory"; - case reindexer::FtFastConfig::Optimization::CPU: - return "OptimizationByCPU"; - default: - assert(false); - std::abort(); - } - }); diff --git a/cpp_src/gtests/tests/unit/ft/ft_typos.cc b/cpp_src/gtests/tests/unit/ft/ft_typos.cc deleted file mode 100644 index dd95dc5f0..000000000 --- a/cpp_src/gtests/tests/unit/ft/ft_typos.cc +++ /dev/null @@ -1,755 +0,0 @@ -#include -#include "ft_api.h" - -class FTTyposApi : public FTApi { -protected: - std::string_view GetDefaultNamespace() noexcept override { return "ft_typos_default_namespace"; } - - template - std::string DumpStrings(const T& container) { - bool first = true; - std::string res; - res.append("["); - for (auto& v : container) { - if (first) { - res.append(v); - first = false; - } else { - res.append(",").append(v); - } - } - res.append("]"); - return res; - } - void CheckResultsByField(const reindexer::QueryResults& res, const std::set& expected, std::string_view fieldName, - std::string_view description) { - std::set resSet; - for (auto& r : res) { - auto word = r.GetItem(false)[fieldName].As(); - EXPECT_TRUE(expected.find(word) != expected.end()) << description << ": word '" << word << "' was not expected in results"; - resSet.emplace(std::move(word)); - } - for (auto& e : expected) { - EXPECT_TRUE(resSet.find(e) != resSet.end()) << description << ": word '" << e << "' was expected in results, but was not found"; - } - if (!::testing::Test::HasFailure()) { - EXPECT_EQ(res.Count(), expected.size()) - << description << "; expected(values): " << DumpStrings(expected) << "; got(IDs): " << res.ToLocalQr().Dump(); - } - } -}; - -using namespace std::string_view_literals; - -TEST_P(FTTyposApi, SelectWithTypos) { - auto cfg = GetDefaultConfig(); - cfg.stopWords.clear(); - cfg.stemmers.clear(); - cfg.enableKbLayout = false; - cfg.enableTranslit = false; - const auto kDefaultMaxTypoDist = cfg.maxTypoDistance; - - cfg.maxTypos = 0; - Init(cfg); - Add("A"); - Add("AB"); - Add("ABC"); - Add("ABCD"); - Add("ABCDE"); - Add("ABCDEF"); - Add("ABCDEFG"); - Add("ABCDEFGH"); - // Only full match - CheckAllPermutations("", {"A~"}, "", {{"!A!", ""}}); - CheckAllPermutations("", {"AB~"}, "", {{"!AB!", ""}}); - CheckAllPermutations("", {"ABC~"}, "", {{"!ABC!", ""}}); - CheckAllPermutations("", {"ABCDEFGHI~"}, "", {}); - CheckAllPermutations("", {"XBCD~"}, "", {}); - CheckAllPermutations("", {"ACBD~"}, "", {}); - - cfg.maxTypos = 1; - SetFTConfig(cfg); - // Full match - // Or one missing char in any word - CheckAllPermutations("", {"ABCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABCDEFGH~"}, "", {{"!ABCDEFG!", ""}, {"!ABCDEFGH!", ""}}); - CheckAllPermutations("", {"BCD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"ABCDEFGHI~"}, "", {{"!ABCDEFGH!", ""}}); - CheckAllPermutations("", {"XABCD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"ABXCD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"ABCDX~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"XBCD~"}, "", {}); - CheckAllPermutations("", {"ACBD~"}, "", {}); - // Not less than 2 - CheckAllPermutations("", {"AB~"}, "", {{"!AB!", ""}, {"!ABC!", ""}}); - CheckAllPermutations("", {"AC~"}, "", {{"!ABC!", ""}}); - CheckAllPermutations("", {"B~"}, "", {}); - CheckAllPermutations("", {"AX~"}, "", {}); - - cfg.maxTypos = 2; - cfg.maxTypoDistance = -1; - SetFTConfig(cfg); - // Full match - // Or one missing char in any word - // Or one typo - CheckAllPermutations("", {"ABCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABCDEFGH~"}, "", {{"!ABCDEFG!", ""}, {"!ABCDEFGH!", ""}}); - CheckAllPermutations("", {"BCDEFX~"}, "", {{"!ABCDEF!", ""}}); - CheckAllPermutations("", {"BCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"XABCD~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABXCD~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABCDX~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"BXCD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"BXCX~"}, "", {}); - CheckAllPermutations("", {"ACBD~"}, "", {{"!ABCD!", ""}}); - // Not less than 2 - CheckAllPermutations("", {"AB~"}, "", {{"!AB!", ""}, {"!ABC!", ""}}); - CheckAllPermutations("", {"AC~"}, "", {{"!ABC!", ""}}); - CheckAllPermutations("", {"B~"}, "", {}); - CheckAllPermutations("", {"AX~"}, "", {}); - - cfg.maxTypos = 2; - cfg.maxTypoDistance = kDefaultMaxTypoDist; - SetFTConfig(cfg); - // Full match - // Or one missing char in any word - // Or one letter switch - // Max typo distance is 0 (by default). Only the letter on the same position may be changed - // Max letter permutation is 1 (by default). The same letter may be moved by 1 - CheckAllPermutations("", {"ABCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABCDEFGH~"}, "", {{"!ABCDEFG!", ""}, {"!ABCDEFGH!", ""}}); - CheckAllPermutations("", {"BCDEFX~"}, "", {}); - CheckAllPermutations("", {"BCD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"XABCD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"ABXCD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"ABCDX~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"BXCD~"}, "", {}); - CheckAllPermutations("", {"BXCX~"}, "", {}); - CheckAllPermutations("", {"ACBD~"}, "", {{"!ABCD!", ""}}); - // Not less than 2 - CheckAllPermutations("", {"AB~"}, "", {{"!AB!", ""}, {"!ABC!", ""}}); - CheckAllPermutations("", {"AC~"}, "", {{"!ABC!", ""}}); - CheckAllPermutations("", {"B~"}, "", {}); - CheckAllPermutations("", {"AX~"}, "", {}); - - cfg.maxTypos = 3; - cfg.maxTypoDistance = -1; - SetFTConfig(cfg); - // Full match - // Or one missing char in any word - // Or one missing char in one word and two missing chars in another one - // Or up to two typos - CheckAllPermutations("", {"ABCD~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"ABCDEFGH~"}, "", {{"!ABCDEF!", ""}, {"!ABCDEFG!", ""}, {"!ABCDEFGH!", ""}}); - CheckAllPermutations("", {"BCDEFX~"}, "", {{"!ABCDE!", ""}, {"!ABCDEF!", ""}, {"!ABCDEFG!", ""}}); - CheckAllPermutations("", {"BCDXEFX~"}, "", {{"!ABCDEF!", ""}}); - CheckAllPermutations("", {"BCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"XABCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"ABXCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"ABCDX~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"XABCDX~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABXXCD~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABCDXX~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"BXCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ACBD~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"BADC~"}, "", {{"!ABC!", ""}}); - CheckAllPermutations("", {"BACDFE~"}, "", {{"!ABCDE!", ""}}); - CheckAllPermutations("", {"XBCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABXD~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABCX~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABXX~"}, "", {{"!AB!", ""}, {"!ABC!", ""}}); - CheckAllPermutations("", {"XBXD~"}, "", {}); - CheckAllPermutations("", {"AXXD~"}, "", {}); - CheckAllPermutations("", {"XBCX~"}, "", {{"!ABC!", ""}}); - CheckAllPermutations("", {"XXCD~"}, "", {}); - CheckAllPermutations("", {"XXABX~"}, "", {}); - // Not less than 2 - CheckAllPermutations("", {"AB~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"AC~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"B~"}, "", {}); - CheckAllPermutations("", {"AX~"}, "", {}); - CheckAllPermutations("", {"AXX~"}, "", {}); - - cfg.maxTypos = 3; - cfg.maxTypoDistance = kDefaultMaxTypoDist; - SetFTConfig(cfg); - // Full match - // Or up to two missing chars in any word - // Or one letter switch and one missing char in any word - // Max typo distance is 0 (by default). Only the letter on the same position may be changed - // Max letter permutation is 1 (by default). The same letter may be moved by 1 - CheckAllPermutations("", {"ABCD~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"ABCDEFGH~"}, "", {{"!ABCDEF!", ""}, {"!ABCDEFG!", ""}, {"!ABCDEFGH!", ""}}); - CheckAllPermutations("", {"BCDEFX~"}, "", {{"!ABCDEFG!", ""}}); - CheckAllPermutations("", {"XBCDEF~"}, "", {{"!ABCDE!", ""}, {"!ABCDEF!", ""}, {"!ABCDEFG!", ""}}); - CheckAllPermutations("", {"BCDXEFX~"}, "", {}); - CheckAllPermutations("", {"BCD~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}}); - - CheckAllPermutations("", {"XABCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"ABXCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"ABCDX~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"XABCDX~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABXXCD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"ABCDXX~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"BXCD~"}, "", {}); - CheckAllPermutations("", {"ACBD~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"BADC~"}, "", {{"!ABC!", ""}}); - CheckAllPermutations("", {"BACDFE~"}, "", {{"!ABCDE!", ""}}); - CheckAllPermutations("", {"XBCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABXD~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABCX~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABXX~"}, "", {{"!AB!", ""}, {"!ABC!", ""}}); - CheckAllPermutations("", {"XBXD~"}, "", {}); - CheckAllPermutations("", {"AXXD~"}, "", {}); - CheckAllPermutations("", {"XBCX~"}, "", {{"!ABC!", ""}}); - CheckAllPermutations("", {"XXCD~"}, "", {}); - CheckAllPermutations("", {"XXABX~"}, "", {}); - // Not less than 2 - CheckAllPermutations("", {"AB~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"AC~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"B~"}, "", {}); - CheckAllPermutations("", {"AX~"}, "", {}); - CheckAllPermutations("", {"AXX~"}, "", {}); - - cfg.maxTypos = 4; - cfg.maxTypoDistance = -1; - SetFTConfig(cfg); - // Full match - // Or up to two missing chars in any of the both words - // Or up to two typos - CheckAllPermutations("", {"ABCD~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"ABCDEFGH~"}, "", {{"!ABCDEF!", ""}, {"!ABCDEFG!", ""}, {"!ABCDEFGH!", ""}}); - CheckAllPermutations("", {"BCDEFX~"}, "", {{"!ABCDE!", ""}, {"!ABCDEF!", ""}, {"!ABCDEFG!", ""}}); - CheckAllPermutations("", {"BCDXEFX~"}, "", {{"!ABCDEF!", ""}, {"!ABCDEFG!", ""}}); - CheckAllPermutations("", {"BCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"XABCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"ABXCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"ABCDX~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"XABCDX~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"ABXXCD~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"ABCDXX~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"BXCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ACBD~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"BADC~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"BACDFE~"}, "", {{"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"BADCFE~"}, "", {}); - CheckAllPermutations("", {"XBCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABXD~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABCX~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABXX~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"XBXD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"AXXD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"XBCX~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"XXCD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"XXABX~"}, "", {}); - // Not less than 2 - CheckAllPermutations("", {"AB~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"AC~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"B~"}, "", {}); - CheckAllPermutations("", {"AX~"}, "", {}); - CheckAllPermutations("", {"AXX~"}, "", {}); - - cfg.maxTypos = 4; - cfg.maxTypoDistance = kDefaultMaxTypoDist; - SetFTConfig(cfg); - // Full match - // Or one missing char in any word - // Or one letter switch and one missing char in one of the words - // Or two letters switch - // Max typo distance is 0 (by default). Only the letter on the same position may be changed - // Max letter permutation is 1 (by default). The same letter may be moved by 1 - CheckAllPermutations("", {"ABCD~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"ABCDEFGH~"}, "", {{"!ABCDEF!", ""}, {"!ABCDEFG!", ""}, {"!ABCDEFGH!", ""}}); - CheckAllPermutations("", {"BCDEFX~"}, "", {{"!ABCDEFG!", ""}}); - CheckAllPermutations("", {"BCDXEFX~"}, "", {}); - CheckAllPermutations("", {"BCD~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"XABCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"ABXCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"ABCDX~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"XABCDX~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABXXCD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"ABCDXX~"}, "", {{"!ABCD!", ""}, {"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"BXCD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"ACBD~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"BADC~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"BACDFE~"}, "", {{"!ABCDE!", ""}, {"!ABCDEF!", ""}}); - CheckAllPermutations("", {"BADCFE~"}, "", {}); - CheckAllPermutations("", {"XBCD~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABXD~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABCX~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}, {"!ABCDE!", ""}}); - CheckAllPermutations("", {"ABXX~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"XBXD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"AXXD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"XBCX~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"XXCD~"}, "", {{"!ABCD!", ""}}); - CheckAllPermutations("", {"XXABX~"}, "", {}); - // Not less than 2 - CheckAllPermutations("", {"AB~"}, "", {{"!AB!", ""}, {"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"AC~"}, "", {{"!ABC!", ""}, {"!ABCD!", ""}}); - CheckAllPermutations("", {"B~"}, "", {}); - CheckAllPermutations("", {"AX~"}, "", {}); - CheckAllPermutations("", {"AXX~"}, "", {}); -} - -TEST_P(FTTyposApi, TyposDistance) { - // Check different max_typos_distance values with default max_typos and default max_symbol_permutation_distance (=1) - Init(GetDefaultConfig()); - Add("облачный"sv); - Add("блачныйк"sv); - Add("табачный"sv); - Add("отличный"sv); - Add("солнечный"sv); - - struct Case { - std::string description; - int maxTypoDistance; - std::string word; - std::set expectedResults; - }; - const std::vector cases = { - {"wrong_letter_default_config", std::numeric_limits::max(), "=аблачный~", {"облачный"}}, - {"wrong_letter_in_the_middle_default_config", std::numeric_limits::max(), "=облочный~", {"облачный"}}, - {"extra_letter_default_config", std::numeric_limits::max(), "=облачкный~", {"облачный"}}, - {"missing_letter_default_config", std::numeric_limits::max(), "=обланый~", {"облачный"}}, - - {"wrong_letter_0_typo_distance", 0, "=аблачный~", {"облачный"}}, - {"wrong_letter_in_the_middle_0_typo_distance", 0, "=облочный~", {"облачный"}}, - {"extra_letter_0_typo_distance", 0, "=облачкный~", {"облачный"}}, - {"missing_letter_0_typo_distance", 0, "=обланый~", {"облачный"}}, - - {"wrong_letter_any_typo_distance", -1, "=аблачный~", {"облачный", "табачный", "блачныйк"}}, - {"wrong_letter_in_the_middle_any_typo_distance", -1, "=облочный~", {"облачный"}}, - {"extra_letter_any_typo_distance", -1, "=облачкный~", {"облачный"}}, - {"missing_letter_any_typo_distance", -1, "=обланый~", {"облачный"}}, - - {"wrong_letter_2_typo_distance", 2, "=аблачный~", {"облачный", "табачный"}}, - {"wrong_letter_in_the_middle_2_typo_distance", -1, "=облочный~", {"облачный"}}, - {"extra_letter_2_typo_distance", 2, "=облачкный~", {"облачный"}}, - {"missing_letter_2_typo_distance", 2, "=обланый~", {"облачный"}}, - }; - - for (auto& c : cases) { - auto cfg = GetDefaultConfig(); - if (c.maxTypoDistance != std::numeric_limits::max()) { - cfg.maxTypoDistance = c.maxTypoDistance; - } - auto err = SetFTConfig(cfg, "nm1", "ft1", {"ft1"}); - ASSERT_TRUE(err.ok()) << err.what(); - auto q = reindexer::Query("nm1").Where("ft1", CondEq, c.word); - reindexer::QueryResults res; - err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - CheckResultsByField(res, c.expectedResults, "ft1", c.description); - } -} - -TEST_P(FTTyposApi, TyposDistanceWithMaxTypos) { - // Check basic max_typos_distance funtionality with different max_typos values. - // Letters permutations are not allowed (max_symbol_permutation_distance = 0) - Init(GetDefaultConfig()); - Add("облачный"sv); - Add("блачныйк"sv); - Add("табачный"sv); - Add("отличный"sv); - Add("солнечный"sv); - - struct Case { - std::string description; - int maxTypos; - std::string word; - std::set expectedResults; - }; - const std::vector cases = { - {"full_match_0_max_typo", 0, "=облачный~", {"облачный"}}, - {"wrong_letter_0_max_typo", 0, "=аблачный~", {}}, - {"wrong_letter_in_the_middle_0_max_typo", 0, "=облочный~", {}}, - {"2_wrong_letters_0_max_typo_1", 0, "=аблочный~", {}}, - {"2_wrong_letters_0_max_typo_2", 0, "=оплачнык~", {}}, - {"extra_letter_0_max_typo", 0, "=облачкный~", {}}, - {"missing_letter_0_max_typo", 0, "=обланый~", {}}, - {"2_extra_letters_0_max_typo", 0, "=поблачкный~", {}}, - {"2_missing_letters_0_max_typo", 0, "=обланы~", {}}, - - {"full_match_1_max_typo", 1, "=облачный~", {"облачный"}}, - {"wrong_letter_1_max_typo", 1, "=аблачный~", {}}, - {"wrong_letter_in_the_middle_1_max_typo", 1, "=облочный~", {}}, - {"2_wrong_letters_1_max_typo_1", 1, "=аблочный~", {}}, - {"2_wrong_letters_1_max_typo_2", 1, "=оплачнык~", {}}, - {"extra_letter_1_max_typo", 1, "=облачкный~", {"облачный"}}, - {"missing_letter_1_max_typo", 1, "=обланый~", {"облачный"}}, - {"2_extra_letters_1_max_typo", 1, "=поблачкный~", {}}, - {"2_missing_letters_1_max_typo", 1, "=обланы~", {}}, - - {"full_match_2_max_typo", 2, "=облачный~", {"облачный"}}, - {"wrong_letter_2_max_typo", 2, "=аблачный~", {"облачный"}}, - {"wrong_letter_in_the_middle_2_max_typo", 2, "=облочный~", {"облачный"}}, - {"2_wrong_letters_2_max_typo_1", 2, "=аблочный~", {}}, - {"2_wrong_letters_2_max_typo_2", 2, "=оплачнык~", {}}, - {"extra_letter_2_max_typo", 2, "=облачкный~", {"облачный"}}, - {"missing_letter_2_max_typo", 2, "=обланый~", {"облачный"}}, - {"2_extra_letters_2_max_typo", 2, "=поблачкный~", {}}, - {"2_missing_letters_2_max_typo", 2, "=обланы~", {}}, - - {"full_match_3_max_typo", 3, "=облачный~", {"облачный"}}, - {"wrong_letter_3_max_typo", 3, "=аблачный~", {"облачный"}}, - {"wrong_letter_in_the_middle_3_max_typo", 3, "=облочный~", {"облачный"}}, - {"2_wrong_letters_3_max_typo_1", 3, "=аблочный~", {}}, - {"2_wrong_letters_3_max_typo_2", 3, "=оплачнык~", {}}, - {"extra_letter_3_max_typo", 3, "=облачкный~", {"облачный"}}, - {"missing_letter_3_max_typo", 3, "=обланый~", {"облачный"}}, - {"2_extra_letters_3_max_typo", 3, "=поблачкный~", {"облачный"}}, - {"2_missing_letters_3_max_typo", 3, "=обланы~", {"облачный"}}, - {"3_extra_letters_3_max_typo", 3, "=поблачкныйк~", {}}, - {"1_wrong_1_extra_letter_3_max_typo", 3, "=облочкный~", {"облачный"}}, - {"1_wrong_1_missing_letter_3_max_typo", 3, "=облоный~", {"облачный"}}, - {"1_letter_permutation_3_max_typo", 3, "=болачный~", {}}, - {"2_letters_permutation_3_max_typo", 3, "=болачынй~", {}}, - - {"full_match_4_max_typo", 4, "=облачный~", {"облачный", "отличный"}}, - {"wrong_letter_4_max_typo", 4, "=аблачный~", {"облачный"}}, - {"wrong_letter_in_the_middle_4_max_typo", 4, "=облочный~", {"облачный", "отличный"}}, - {"2_wrong_letters_4_max_typo_1", 4, "=аблочный~", {"облачный"}}, - {"2_wrong_letters_4_max_typo_2", 4, "=оплачнык~", {"облачный"}}, - {"extra_letter_4_max_typo", 4, "=облачкный~", {"облачный"}}, - {"missing_letter_4_max_typo", 4, "=обланый~", {"облачный"}}, - {"2_extra_letters_4_max_typo", 4, "=поблачкный~", {"облачный"}}, - {"2_missing_letters_4_max_typo", 4, "=обланы~", {"облачный"}}, - {"3_extra_letters_4_max_typo", 4, "=поблачкныйк~", {}}, - {"3_missing_letters_4_max_typo", 4, "=обаны~", {}}, - {"1_wrong_1_extra_letter_4_max_typo", 4, "=облочкный~", {"облачный"}}, - {"1_wrong_1_missing_letter_4_max_typo", 4, "=облоный~", {"облачный"}}, - {"1_letter_permutation_4_max_typo", 4, "=болачный~", {"облачный"}}, - {"2_letters_permutation_4_max_typo", 4, "=болачынй~", {}}, - }; - - for (auto& c : cases) { - auto cfg = GetDefaultConfig(); - EXPECT_EQ(cfg.maxTypoDistance, 0) << "This test expects default max_typo_distance == 0"; - cfg.maxSymbolPermutationDistance = 0; - cfg.maxTypos = c.maxTypos; - auto err = SetFTConfig(cfg, "nm1", "ft1", {"ft1"}); - ASSERT_TRUE(err.ok()) << err.what(); - auto q = reindexer::Query("nm1").Where("ft1", CondEq, c.word); - reindexer::QueryResults res; - err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - CheckResultsByField(res, c.expectedResults, "ft1", c.description); - } -} - -TEST_P(FTTyposApi, LettersPermutationDistance) { - // Check different max_symbol_permutation_distance values with default max_typos and default max_typos_distance (=0) - Init(GetDefaultConfig()); - Add("облачный"sv); - Add("табачный"sv); - Add("отличный"sv); - Add("солнечный"sv); - - struct Case { - std::string description; - int maxLettPermDist; - std::string word; - std::set expectedResults; - }; - const std::vector cases = { - {"first_letter_1_move_default_config", std::numeric_limits::max(), "=болачный~", {"облачный"}}, - {"first_letter_1_move_and_switch_default_config", std::numeric_limits::max(), "=волачный~", {}}, - {"first_letter_2_move_default_config", std::numeric_limits::max(), "=блоачный~", {}}, - {"first_letter_3_move_default_config", std::numeric_limits::max(), "=блаочный~", {}}, - {"mid_letter_1_move_default_config", std::numeric_limits::max(), "=обалчный~", {"облачный"}}, - {"mid_letter_1_move_and_switch_default_config", std::numeric_limits::max(), "=обакчный~", {}}, - {"mid_letter_2_move_default_config", std::numeric_limits::max(), "=обачлный~", {}}, - {"mid_letter_3_move_default_config", std::numeric_limits::max(), "=обачнлый~", {}}, - - {"first_letter_1_move_0_lett_perm", 0, "=болачный~", {}}, - {"first_letter_2_move_0_lett_perm", 0, "=блоачный~", {}}, - {"first_letter_3_move_0_lett_perm", 0, "=блаочный~", {}}, - {"mid_letter_1_move_0_lett_perm", 0, "=обалчный~", {}}, - {"mid_letter_2_move_0_lett_perm", 0, "=обачлный~", {}}, - {"mid_letter_3_move_0_lett_perm", 0, "=обачнлый~", {}}, - - {"first_letter_1_move_1_lett_perm", 1, "=болачный~", {"облачный"}}, - {"first_letter_2_move_1_lett_perm", 1, "=блоачный~", {}}, - {"first_letter_3_move_1_lett_perm", 1, "=блаочный~", {}}, - {"mid_letter_1_move_1_lett_perm", 1, "=обалчный~", {"облачный"}}, - {"mid_letter_2_move_1_lett_perm", 1, "=обачлный~", {}}, - {"mid_letter_3_move_1_lett_perm", 1, "=обачнлый~", {}}, - - {"first_letter_1_move_2_lett_perm", 2, "=болачный~", {"облачный"}}, - {"first_letter_1_move_and_switch_2_lett_perm", 2, "=бклачный~", {}}, - {"first_letter_2_move_2_lett_perm", 2, "=блоачный~", {"облачный"}}, - {"first_letter_3_move_2_lett_perm", 2, "=блаочный~", {}}, - {"mid_letter_1_move_2_lett_perm", 2, "=обалчный~", {"облачный"}}, - {"mid_letter_1_move_and_switch_2_lett_perm", 2, "=обапчный~", {}}, - {"mid_letter_2_move_2_lett_perm", 2, "=обачлный~", {"облачный"}}, - {"mid_letter_3_move_2_lett_perm", 2, "=обачнлый~", {}}, - - {"first_letter_1_move_3_lett_perm", 3, "=болачный~", {"облачный"}}, - {"first_letter_2_move_3_lett_perm", 3, "=блоачный~", {"облачный"}}, - {"first_letter_3_move_3_lett_perm", 3, "=блаочный~", {"облачный"}}, - {"mid_letter_switch_3_lett_perm", 3, "=обалчный~", {"облачный"}}, - {"mid_letter_2_move_3_lett_perm", 3, "=обачлный~", {"облачный"}}, - {"mid_letter_3_move_3_lett_perm", 3, "=обачнлый~", {"облачный"}}, - - {"first_letter_1_move_any_lett_perm", -1, "=болачный~", {"облачный"}}, - {"first_letter_1_move_and_switch_any_lett_perm", -1, "=бклачный~", {}}, - {"first_letter_2_move_any_lett_perm", -1, "=блоачный~", {"облачный"}}, - {"first_letter_3_move_any_lett_perm", -1, "=блаочный~", {"облачный"}}, - {"mid_letter_1_move_any_lett_perm", -1, "=обалчный~", {"облачный"}}, - {"mid_letter_1_move_and_switch_any_lett_perm", -1, "=обапчный~", {}}, - {"mid_letter_2_move_any_lett_perm", -1, "=обачлный~", {"облачный"}}, - {"mid_letter_3_move_any_lett_perm", -1, "=обачнлый~", {"облачный"}}, - }; - - for (auto& c : cases) { - auto cfg = GetDefaultConfig(); - if (c.maxLettPermDist != std::numeric_limits::max()) { - cfg.maxSymbolPermutationDistance = c.maxLettPermDist; - } - auto err = SetFTConfig(cfg, "nm1", "ft1", {"ft1"}); - ASSERT_TRUE(err.ok()) << err.what(); - auto q = reindexer::Query("nm1").Where("ft1", CondEq, c.word); - reindexer::QueryResults res; - err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - CheckResultsByField(res, c.expectedResults, "ft1", c.description); - } -} - -TEST_P(FTTyposApi, LettersPermutationDistanceWithMaxTypos) { - // Check basic max_symbol_permutation_distance funtionality with different max_typos values. - // max_typo_distance is 0 - Init(GetDefaultConfig()); - Add("облачный"sv); - Add("блачныйк"sv); - Add("табачный"sv); - Add("отличный"sv); - Add("солнечный"sv); - - struct Case { - std::string description; - int maxTypos; - std::string word; - std::set expectedResults; - }; - const std::vector cases = { - {"full_match_0_max_typo", 0, "=облачный~", {"облачный"}}, - {"wrong_letter_0_max_typo", 0, "=аблачный~", {}}, - {"extra_letter_0_max_typo", 0, "=облачкный~", {}}, - {"missing_letter_0_max_typo", 0, "=обланый~", {}}, - {"2_extra_letters_0_max_typo", 0, "=поблачкный~", {}}, - {"2_missing_letters_0_max_typo", 0, "=обланы~", {}}, - {"1_letter_permutation_0_max_typo", 0, "=болачный~", {}}, - {"1_letter_permutation_and_switch_0_max_typo", 0, "=долачный~", {}}, - {"1_far_letter_permutation_0_max_typo", 0, "=блоачный~", {}}, - {"1_permutation_and_1_missing_letter_0_max_typo", 0, "=балчный~", {}}, - {"1_permutation_and_2_missing_letters_0_max_typo", 0, "=балчны~", {}}, - {"1_permutation_and_1_extra_letter_0_max_typo", 0, "=болачныйк~", {}}, - {"1_permutation_and_2_extra_letters_0_max_typo", 0, "=болачныйкк~", {}}, - {"2_letters_permutation_0_max_typo", 0, "=болачынй~", {}}, - {"2_permutations_and_1_extra_letter_0_max_typo", 0, "=болачынйк~", {}}, - {"1_permutation_and_1_wrong_letter_0_max_typo_1", 0, "=болочный~", {}}, - {"1_permutation_and_1_wrong_letter_0_max_typo_2", 0, "=облончый~", {}}, - {"1_far_permutation_and_1_wrong_letter_0_max_typo", 0, "=блоочный~", {}}, - - {"full_match_1_max_typo", 1, "=облачный~", {"облачный"}}, - {"wrong_letter_1_max_typo", 1, "=аблачный~", {}}, - {"extra_letter_1_max_typo", 1, "=облачкный~", {"облачный"}}, - {"missing_letter_1_max_typo", 1, "=обланый~", {"облачный"}}, - {"2_extra_letters_1_max_typo", 1, "=поблачкный~", {}}, - {"2_missing_letters_1_max_typo", 1, "=обланы~", {}}, - {"1_letter_permutation_1_max_typo", 1, "=болачный~", {}}, - {"1_letter_permutation_and_switch_1_max_typo", 1, "=долачный~", {}}, - {"1_far_letter_permutation_1_max_typo", 1, "=блоачный~", {}}, - {"1_permutation_and_1_missing_letter_1_max_typo", 1, "=балчный~", {}}, - {"1_permutation_and_2_missing_letters_1_max_typo", 1, "=балчны~", {}}, - {"1_permutation_and_1_extra_letter_1_max_typo", 1, "=болачныйк~", {"блачныйк"}}, - {"1_permutation_and_2_extra_letters_1_max_typo", 1, "=болачныйкк~", {}}, - {"2_letters_permutation_1_max_typo", 1, "=болачынй~", {}}, - {"2_permutations_and_1_extra_letter_1_max_typo", 1, "=болачынйт~", {}}, - {"1_permutation_and_1_wrong_letter_1_max_typo_1", 1, "=болочный~", {}}, - {"1_permutation_and_1_wrong_letter_1_max_typo_2", 1, "=облончый~", {}}, - {"1_far_permutation_and_1_wrong_letter_1_max_typo", 1, "=блоочный~", {}}, - - {"full_match_2_max_typo", 2, "=облачный~", {"облачный"}}, - {"wrong_letter_2_max_typo", 2, "=аблачный~", {"облачный"}}, - {"extra_letter_2_max_typo", 2, "=облачкный~", {"облачный"}}, - {"missing_letter_2_max_typo", 2, "=обланый~", {"облачный"}}, - {"2_extra_letters_2_max_typo", 2, "=поблачкный~", {}}, - {"2_missing_letters_2_max_typo", 2, "=обланы~", {}}, - {"1_letter_permutation_2_max_typo", 2, "=болачный~", {"облачный"}}, - {"1_letter_permutation_and_switch_2_max_typo", 2, "=долачный~", {}}, - {"1_far_letter_permutation_2_max_typo", 2, "=блоачный~", {}}, - {"1_permutation_and_1_missing_letter_2_max_typo", 2, "=балчный~", {}}, - {"1_permutation_and_2_missing_letters_2_max_typo", 2, "=балчны~", {}}, - {"1_permutation_and_1_extra_letter_2_max_typo", 2, "=болачныйк~", {"блачныйк"}}, - {"1_permutation_and_2_extra_letters_2_max_typo", 2, "=болачныйкк~", {}}, - {"2_letters_permutation_2_max_typo", 2, "=болачынй~", {}}, - {"2_permutations_and_1_extra_letter_2_max_typo", 2, "=болачынйт~", {}}, - {"1_permutation_and_1_wrong_letter_2_max_typo_1", 2, "=болочный~", {}}, - {"1_permutation_and_1_wrong_letter_2_max_typo_2", 2, "=облончый~", {}}, - {"1_far_permutation_and_1_wrong_letter_2_max_typo", 2, "=блоочный~", {}}, - - {"full_match_3_max_typo", 3, "=облачный~", {"облачный"}}, - {"wrong_letter_3_max_typo", 3, "=аблачный~", {"облачный"}}, - {"extra_letter_3_max_typo", 3, "=облачкный~", {"облачный"}}, - {"missing_letter_3_max_typo", 3, "=обланый~", {"облачный"}}, - {"2_extra_letters_3_max_typo", 3, "=поблачкный~", {"облачный"}}, - {"2_missing_letters_3_max_typo", 3, "=обланы~", {"облачный"}}, - {"1_letter_permutation_3_max_typo", 3, "=болачный~", {"облачный"}}, - {"1_letter_permutation_and_switch_3_max_typo", 3, "=долачный~", {}}, - {"1_far_letter_permutation_3_max_typo", 3, "=блоачный~", {}}, - {"1_permutation_and_1_missing_letter_3_max_typo", 3, "=балчный~", {"облачный", "блачныйк"}}, - {"1_permutation_and_2_missing_letters_3_max_typo", 3, "=балчны~", {}}, - {"1_permutation_and_1_extra_letter_3_max_typo", 3, "=болачныйт~", {"облачный", "блачныйк"}}, - {"1_permutation_and_2_extra_letters_3_max_typo", 3, "=болачныйтт~", {}}, - {"2_letters_permutation_3_max_typo", 3, "=болачынй~", {}}, - {"2_permutations_and_1_extra_letter_3_max_typo", 3, "=болачынйт~", {}}, - {"1_permutation_and_1_wrong_letter_3_max_typo_1", 3, "=болочный~", {}}, - {"1_permutation_and_1_wrong_letter_3_max_typo_2", 3, "=облончый~", {}}, - {"1_far_permutation_and_1_wrong_letter_3_max_typo", 3, "=блоочный~", {}}, - - {"full_match_4_max_typo", 4, "=облачный~", {"облачный", "отличный"}}, - {"wrong_letter_4_max_typo", 4, "=аблачный~", {"облачный"}}, - {"extra_letter_4_max_typo", 4, "=облачкный~", {"облачный"}}, - {"missing_letter_4_max_typo", 4, "=обланый~", {"облачный"}}, - {"2_extra_letters_4_max_typo", 4, "=поблачкный~", {"облачный"}}, - {"2_missing_letters_4_max_typo", 4, "=обланы~", {"облачный"}}, - {"1_letter_permutation_4_max_typo", 4, "=болачный~", {"облачный"}}, - {"1_letter_permutation_and_switch_4_max_typo", 4, "=долачный~", {"облачный"}}, - {"1_far_letter_permutation_4_max_typo_1", 4, "=блоачный~", {"облачный"}}, // Will be handled as double permutation - {"1_far_letter_permutation_4_max_typo_2", 4, "=блаочный~", {}}, - {"1_permutation_and_1_missing_letter_4_max_typo", 4, "=балчный~", {"облачный", "блачныйк"}}, - {"1_permutation_and_2_missing_letters_4_max_typo", 4, "=балчны~", {}}, - {"1_permutation_and_1_extra_letter_4_max_typo", 4, "=болачныйт~", {"облачный", "блачныйк"}}, - {"1_permutation_and_2_extra_letters_4_max_typo", 4, "=болачныйтт~", {}}, - {"2_letters_permutation_4_max_typo", 4, "=болачынй~", {"облачный"}}, - {"2_permutations_and_1_extra_letter_4_max_typo", 4, "=болачынйт~", {}}, - {"1_permutation_and_1_wrong_letter_4_max_typo_1", 4, "=болочный~", {"облачный"}}, - {"1_permutation_and_1_wrong_letter_4_max_typo_2", 4, "=облончый~", {"облачный"}}, - {"1_far_permutation_and_1_wrong_letter_4_max_typo", 4, "=блоочный~", {}}, - }; - - for (auto& c : cases) { - auto cfg = GetDefaultConfig(); - EXPECT_EQ(cfg.maxSymbolPermutationDistance, 1) << "This test expects default max_symbol_permutation_distance == 1"; - cfg.maxTypoDistance = 0; - cfg.maxTypos = c.maxTypos; - auto err = SetFTConfig(cfg, "nm1", "ft1", {"ft1"}); - ASSERT_TRUE(err.ok()) << err.what(); - auto q = reindexer::Query("nm1").Where("ft1", CondEq, c.word); - reindexer::QueryResults res; - err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - CheckResultsByField(res, c.expectedResults, "ft1", c.description); - } -} - -TEST_P(FTTyposApi, TyposMissingAndExtraLetters) { - // Check different max_typos, max_extra_letters and max_missing_letters combinations. - // max_typos must always override max_extra_letters and max_missing_letters. - // max_missing_letters and max_extra_letters must restrict corresponding letters' counts - Init(GetDefaultConfig()); - Add("облачный"sv); - Add("табачный"sv); - Add("отличный"sv); - Add("солнечный"sv); - - struct Case { - std::string description; - int maxTypos; - int maxExtraLetters; - int maxMissingLetter; - std::string word; - std::set expectedResults; - }; - const std::vector cases = { - {"full_match_0_max_typos_0_max_extras_0_max_missing", 0, 0, 0, "=облачный~", {"облачный"}}, - {"1_missing_0_max_typos_1_max_extras_1_max_missing", 0, 1, 1, "=облчный~", {}}, - {"1_extra_0_max_typos_1_max_extras_1_max_missing", 0, 1, 1, "=облкачный~", {}}, - - {"full_match_1_max_typos_0_max_extras_0_max_missing", 0, 0, 0, "=облачный~", {"облачный"}}, - {"1_missing_1_max_typos_0_max_extras_1_max_missing", 1, 0, 1, "=облчный~", {"облачный"}}, - {"1_missing_1_max_typos_1_max_extras_0_max_missing", 1, 1, 0, "=облчный~", {}}, - {"1_missing_1_max_typos_1_max_extras_1_max_missing", 1, 1, 1, "=облчный~", {"облачный"}}, - {"2_missing_1_max_typos_0_max_extras_2_max_missing", 1, 0, 2, "=облчны~", {}}, - {"1_extra_1_max_typos_0_max_extras_1_max_missing", 1, 0, 1, "=облкачный~", {}}, - {"1_extra_1_max_typos_1_max_extras_0_max_missing", 1, 1, 0, "=облкачный~", {"облачный"}}, - {"1_extra_1_max_typos_1_max_extras_1_max_missing", 1, 1, 1, "=облкачный~", {"облачный"}}, - {"2_extra_1_max_typos_2_max_extras_0_max_missing", 1, 2, 0, "=облкачныйп~", {}}, - - {"full_match_2_max_typos_1_max_extras_1_max_missing", 2, 1, 1, "=облачный~", {"облачный"}}, - {"1_missing_2_max_typos_0_max_extras_1_max_missing", 2, 0, 1, "=облчный~", {"облачный"}}, - {"1_missing_2_max_typos_1_max_extras_0_max_missing", 2, 1, 0, "=облчный~", {}}, - {"1_missing_2_max_typos_1_max_extras_1_max_missing", 2, 1, 1, "=облчный~", {"облачный"}}, - {"2_missing_2_max_typos_0_max_extras_2_max_missing", 2, 0, 2, "=облчны~", {}}, - {"1_missing_2_max_typos_0_max_extras_any_max_missing", 2, 0, -1, "=облчный~", {"облачный"}}, - {"2_missing_2_max_typos_0_max_extras_any_max_missing", 2, 0, -1, "=облчны~", {}}, - {"1_extra_2_max_typos_0_max_extras_1_max_missing", 2, 0, 1, "=облкачный~", {}}, - {"1_extra_2_max_typos_1_max_extras_0_max_missing", 2, 1, 0, "=облкачный~", {"облачный"}}, - {"1_extra_2_max_typos_1_max_extras_1_max_missing", 2, 1, 1, "=облкачный~", {"облачный"}}, - {"2_extra_2_max_typos_2_max_extras_0_max_missing", 2, 2, 0, "=облкачныйп~", {}}, - {"1_extra_2_max_typos_any_max_extras_0_max_missing", 2, -1, 0, "=облкачный~", {"облачный"}}, - {"2_extra_2_max_typos_any_max_extras_0_max_missing", 2, -1, 0, "=облкачныйп~", {}}, - - {"full_match_3_max_typos_2_max_extras_2_max_missing", 3, 2, 2, "=облачный~", {"облачный"}}, - {"1_missing_3_max_typos_0_max_extras_1_max_missing", 3, 0, 1, "=облчный~", {"облачный", "отличный"}}, - {"1_missing_3_max_typos_1_max_extras_0_max_missing", 3, 1, 0, "=облчный~", {}}, - {"1_missing_3_max_typos_1_max_extras_1_max_missing", 3, 1, 1, "=облчный~", {"облачный", "отличный"}}, - {"2_missing_3_max_typos_0_max_extras_1_max_missing", 3, 0, 1, "=облчны~", {}}, - {"2_missing_3_max_typos_0_max_extras_2_max_missing", 3, 0, 2, "=облчны~", {"облачный"}}, - {"2_missing_3_max_typos_0_max_extras_any_max_missing", 3, 0, -1, "=облчны~", {"облачный"}}, - {"3_missing_3_max_typos_0_max_extras_any_max_missing", 3, 0, -1, "=облчн~", {}}, - {"1_extra_3_max_typos_0_max_extras_1_max_missing", 3, 0, 1, "=облкачный~", {}}, - {"1_extra_3_max_typos_1_max_extras_0_max_missing", 3, 1, 0, "=облкачный~", {"облачный"}}, - {"1_extra_3_max_typos_1_max_extras_1_max_missing", 3, 1, 1, "=облкачный~", {"облачный"}}, - {"2_extra_3_max_typos_1_max_extras_0_max_missing", 3, 1, 0, "=облкачныйп~", {}}, - {"2_extra_3_max_typos_2_max_extras_0_max_missing", 3, 2, 0, "=облкачныйп~", {"облачный"}}, - {"2_extra_3_max_typos_any_max_extras_0_max_missing", 3, -1, 0, "=облкачныйп~", {"облачный"}}, - {"3_extra_3_max_typos_any_max_extras_0_max_missing", 3, -1, 0, "=оболкачныйп~", {}}, - - {"full_match_4_max_typos_2_max_extras_2_max_missing", 4, 2, 2, "=облачный~", {"облачный", "отличный"}}, - {"1_missing_4_max_typos_0_max_extras_1_max_missing", 4, 0, 1, "=облчный~", {"облачный", "отличный"}}, - {"1_missing_4_max_typos_1_max_extras_0_max_missing", 4, 1, 0, "=облчный~", {}}, - {"1_missing_4_max_typos_1_max_extras_1_max_missing", 4, 1, 1, "=облчный~", {"облачный", "отличный"}}, - {"2_missing_4_max_typos_0_max_extras_1_max_missing", 4, 0, 1, "=облчны~", {}}, - {"2_missing_4_max_typos_0_max_extras_2_max_missing", 4, 0, 2, "=облчны~", {"облачный"}}, - {"2_missing_4_max_typos_0_max_extras_any_max_missing", 4, 0, -1, "=облчны~", {"облачный"}}, - {"3_missing_4_max_typos_0_max_extras_any_max_missing", 4, 0, -1, "=облчн~", {}}, - {"1_extra_4_max_typos_0_max_extras_1_max_missing", 4, 0, 1, "=облкачный~", {}}, - {"1_extra_4_max_typos_1_max_extras_0_max_missing", 4, 1, 0, "=облкачный~", {"облачный"}}, - {"1_extra_4_max_typos_1_max_extras_1_max_missing", 4, 1, 0, "=облкачный~", {"облачный"}}, - {"2_extra_4_max_typos_1_max_extras_0_max_missing", 4, 1, 0, "=облкачныйп~", {}}, - {"2_extra_4_max_typos_2_max_extras_0_max_missing", 4, 2, 0, "=облкачныйп~", {"облачный"}}, - {"2_extra_4_max_typos_any_max_extras_0_max_missing", 4, -1, 0, "=облкачныйп~", {"облачный"}}, - {"3_extra_4_max_typos_any_max_extras_0_max_missing", 4, -1, 0, "=оболкачныйп~", {}}, - }; - - for (auto& c : cases) { - auto cfg = GetDefaultConfig(); - cfg.maxTypos = c.maxTypos; - cfg.maxExtraLetters = c.maxExtraLetters; - cfg.maxMissingLetters = c.maxMissingLetter; - auto err = SetFTConfig(cfg, "nm1", "ft1", {"ft1"}); - ASSERT_TRUE(err.ok()) << err.what(); - auto q = reindexer::Query("nm1").Where("ft1", CondEq, c.word); - reindexer::QueryResults res; - err = rt.reindexer->Select(q, res); - EXPECT_TRUE(err.ok()) << err.what(); - CheckResultsByField(res, c.expectedResults, "ft1", c.description); - } -} - -INSTANTIATE_TEST_SUITE_P(, FTTyposApi, - ::testing::Values(reindexer::FtFastConfig::Optimization::Memory, reindexer::FtFastConfig::Optimization::CPU), - [](const auto& info) { - switch (info.param) { - case reindexer::FtFastConfig::Optimization::Memory: - return "OptimizationByMemory"; - case reindexer::FtFastConfig::Optimization::CPU: - return "OptimizationByCPU"; - default: - assert(false); - std::abort(); - } - }); diff --git a/cpp_src/gtests/tests/unit/grpcclient_test.cc b/cpp_src/gtests/tests/unit/grpcclient_test.cc deleted file mode 100644 index 7c0e5edec..000000000 --- a/cpp_src/gtests/tests/unit/grpcclient_test.cc +++ /dev/null @@ -1,133 +0,0 @@ -#if defined(WITH_GRPC) - -#include "core/itemimpl.h" -#include "grpcclient_api.h" - -TEST_F(GrpcClientApi, SelectCJSON) { - reindexer::Query q(default_namespace); - q.InnerJoin(kIdField, kIdField, CondEq, reindexer::Query(default_namespace + "2")); - - reindexer::grpc::SelectSqlRequest request; - request.set_dbname(kDbName); - request.set_sql(q.GetSQL()); - - reindexer::grpc::OutputFlags* flags = request.flags().New(); - flags->set_encodingtype(reindexer::grpc::EncodingType::CJSON); - flags->set_withnsid(true); - flags->set_withrank(true); - flags->set_withitemid(true); - flags->set_withjoineditems(true); - request.set_allocated_flags(flags); - - grpc::ClientContext context; - std::unique_ptr> reader = rx_->SelectSql(&context, request); - - reindexer::grpc::QueryResultsResponse response; - while (reader->Read(&response)) { - reindexer::Serializer rser(response.data()); - checkCJSONItems(rser, flags); - } -} - -// Perform Select with GRPC-service with -// JSON as output format -TEST_F(GrpcClientApi, SelectJSON) { - // Build query with join, distinct and simple Where condition - reindexer::Query q(default_namespace); - q.Select({kIdField.c_str(), kAgeField.c_str()}); - q.Distinct(kAgeField); - q.InnerJoin(kIdField, kIdField, CondEq, reindexer::Query(default_namespace + "2")); - - // Set input data for GRPC query - reindexer::grpc::SelectSqlRequest request; - request.set_dbname(kDbName); - request.set_sql(q.GetSQL()); - - reindexer::grpc::OutputFlags* flags = request.flags().New(); - flags->set_encodingtype(reindexer::grpc::EncodingType::JSON); - flags->set_withnsid(true); - flags->set_withitemid(true); - flags->set_withjoineditems(true); - request.set_allocated_flags(flags); - - // Execute GRPC query - grpc::ClientContext context; - std::unique_ptr> reader = rx_->SelectSql(&context, request); - - // Read answer and make sure output JSON has a correct format - reindexer::grpc::QueryResultsResponse response; - while (reader->Read(&response)) { - std::string_view json(response.data().c_str(), response.data().length()); - gason::JsonNode root; - gason::JsonParser parser; - size_t len = 0; - ASSERT_NO_THROW(root = parser.Parse(json, &len)); - ASSERT_TRUE(len > 0); - - for (const auto& elem : root) { - const auto& v(elem.value); - std::string_view name(elem.key); - if (name == "items") { - ASSERT_TRUE(v.getTag() == gason::JSON_ARRAY); - for (const auto& element : v) { - auto& object = element.value; - ASSERT_TRUE(object.getTag() == gason::JSON_OBJECT); - for (auto field : object) { - name = std::string_view(field.key); - const auto& fieldValue(field.value); - if (name == "id") { - ASSERT_TRUE(fieldValue.getTag() == gason::JSON_NUMBER); - } else if (name == "joined_test_namespace2") { - ASSERT_TRUE(fieldValue.getTag() == gason::JSON_ARRAY); - for (const auto& item : fieldValue) { - ASSERT_TRUE(item.value.getTag() == gason::JSON_OBJECT); - for (const auto& joinedField : item.value) { - name = std::string_view(joinedField.key); - const auto& joinedFieldValue(joinedField.value); - if (name == "id") { - ASSERT_TRUE(joinedFieldValue.getTag() == gason::JSON_NUMBER); - } else if (name == "price") { - ASSERT_TRUE(joinedFieldValue.getTag() == gason::JSON_NUMBER); - } else { - ASSERT_TRUE(false) << "Wrong JSON field: " << name; - } - } - } - } else { - ASSERT_TRUE(false) << "Wrong JSON field: " << name; - } - } - } - } else if (name == "aggregations") { - ASSERT_TRUE(v.getTag() == gason::JSON_ARRAY); - for (const auto& element : v) { - auto& object = element.value; - ASSERT_TRUE(object.getTag() == gason::JSON_OBJECT); - for (const auto& field : object) { - name = std::string_view(field.key); - const auto& fieldValue(field.value); - if (name == "type") { - ASSERT_TRUE(fieldValue.getTag() == gason::JSON_STRING); - ASSERT_TRUE(fieldValue.toString() == "distinct"); - } else if (name == "distincts") { - ASSERT_TRUE(fieldValue.getTag() == gason::JSON_ARRAY); - for (const auto& items : fieldValue) { - ASSERT_TRUE(items.value.getTag() == gason::JSON_STRING); - } - } else if (name == "fields") { - ASSERT_TRUE(fieldValue.getTag() == gason::JSON_ARRAY); - for (const auto& items : fieldValue) { - ASSERT_TRUE(items.value.getTag() == gason::JSON_STRING); - ASSERT_TRUE(items.value.toString() == "age"); - } - } - } - } - } else { - ASSERT_TRUE(false) << "Wrong JSON field: " << name; - } - } - } -} - -#endif diff --git a/cpp_src/gtests/tests/unit/item_test.cc b/cpp_src/gtests/tests/unit/item_test.cc deleted file mode 100644 index b6b358ee9..000000000 --- a/cpp_src/gtests/tests/unit/item_test.cc +++ /dev/null @@ -1,7 +0,0 @@ -#include "item_move_semantics_api.h" - -TEST_F(ItemMoveSemanticsApi, MoveSemanticsOperator) { - prepareItems(); - verifyAndUpsertItems(); - verifyJsonsOfUpsertedItems(); -} diff --git a/cpp_src/gtests/tests/unit/join_test.cc b/cpp_src/gtests/tests/unit/join_test.cc deleted file mode 100644 index 4a9dbe86c..000000000 --- a/cpp_src/gtests/tests/unit/join_test.cc +++ /dev/null @@ -1,896 +0,0 @@ -#include -#include -#include -#include -#include "core/itemimpl.h" -#include "core/nsselecter/joinedselector.h" -#include "core/type_consts_helpers.h" -#include "join_on_conditions_api.h" -#include "join_selects_api.h" -#include "test_helpers.h" - -TEST_F(JoinSelectsApi, JoinsAsWhereConditionsTest) { - Query queryGenres{Query(genres_namespace).Not().Where(genreid, CondEq, 1)}; - Query queryAuthors{Query(authors_namespace).Where(authorid, CondGe, 10).Where(authorid, CondLe, 25)}; - Query queryAuthors2{Query(authors_namespace).Where(authorid, CondGe, 300).Where(authorid, CondLe, 400)}; - // clang-format off - Query queryBooks{Query(books_namespace, 0, 50) - .OpenBracket() - .Where(price, CondGe, 9540) - .Where(price, CondLe, 9550) - .CloseBracket() - .Or().OpenBracket() - .Where(price, CondGe, 1000) - .Where(price, CondLe, 2000) - .InnerJoin(authorid_fk, authorid, CondEq, std::move(queryAuthors)) - .OrInnerJoin(genreId_fk, genreid, CondEq, std::move(queryGenres)) - .CloseBracket() - .Or().OpenBracket() - .Where(pages, CondEq, 0) - .CloseBracket() - .Or().InnerJoin(authorid_fk, authorid, CondEq, std::move(queryAuthors2))}; - // clang-format on - - QueryWatcher watcher{queryBooks}; - reindexer::QueryResults qr; - Error err = rt.reindexer->Select(queryBooks, qr); - ASSERT_TRUE(err.ok()) << err.what(); - EXPECT_LE(qr.Count(), 50); - CheckJoinsInComplexWhereCondition(qr); -} - -TEST_F(JoinSelectsApi, JoinsLockWithCache_364) { - Query queryGenres{Query(genres_namespace).Where(genreid, CondEq, 1)}; - Query queryBooks{Query(books_namespace, 0, 50).InnerJoin(genreId_fk, genreid, CondEq, std::move(queryGenres))}; - QueryWatcher watcher{queryBooks}; - TurnOnJoinCache(genres_namespace); - - for (int i = 0; i < 10; ++i) { - reindexer::QueryResults qr; - Error err = rt.reindexer->Select(queryBooks, qr); - ASSERT_TRUE(err.ok()) << err.what(); - } -} - -TEST_F(JoinSelectsApi, JoinsAsWhereConditionsTest2) { - std::string sql = - "SELECT * FROM books_namespace WHERE " - "(price >= 9540 AND price <= 9550) " - "OR (price >= 1000 AND price <= 2000 INNER JOIN (SELECT * FROM authors_namespace WHERE authorid >= 10 AND authorid <= 25)ON " - "authors_namespace.authorid = books_namespace.authorid_fk OR INNER JOIN (SELECT * FROM genres_namespace WHERE NOT genreid = 1) ON " - "genres_namespace.genreid = books_namespace.genreid_fk) " - "OR (pages = 0) " - "OR INNER JOIN (SELECT *FROM authors_namespace WHERE authorid >= 300 AND authorid <= 400) ON authors_namespace.authorid = " - "books_namespace.authorid_fk LIMIT 50"; - - Query query = Query::FromSQL(sql); - QueryWatcher watcher{query}; - reindexer::QueryResults qr; - Error err = rt.reindexer->Select(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - EXPECT_LE(qr.Count(), 50); - CheckJoinsInComplexWhereCondition(qr); -} - -TEST_F(JoinSelectsApi, SqlParsingTest) { - std::string sql = - "select * from books_namespace where (pages > 0 and inner join (select * from authors_namespace limit 10) on " - "authors_namespace.authorid = " - "books_namespace.authorid_fk and price > 1000 or inner join (select * from genres_namespace limit 10) on " - "genres_namespace.genreid = books_namespace.genreid_fk and pages < 10000 and inner join (select * from authors_namespace WHERE " - "(authorid >= 10 AND authorid <= 20) limit 100) on " - "authors_namespace.authorid = books_namespace.authorid_fk) or pages == 3 limit 20"; - - Query srcQuery = Query::FromSQL(sql); - QueryWatcher watcher{srcQuery}; - - reindexer::WrSerializer wrser; - srcQuery.GetSQL(wrser); - - Query dstQuery = Query::FromSQL(wrser.Slice()); - - ASSERT_EQ(srcQuery, dstQuery); - - wrser.Reset(); - srcQuery.Serialize(wrser); - reindexer::Serializer ser(wrser.Buf(), wrser.Len()); - Query deserializedQuery = Query::Deserialize(ser); - ASSERT_EQ(srcQuery, deserializedQuery) << "Original query:\n" - << srcQuery.GetSQL() << "\nDeserialized query:\n" - << deserializedQuery.GetSQL(); -} - -TEST_F(JoinSelectsApi, InnerJoinTest) { - Query queryAuthors(authors_namespace); - Query queryBooks{Query(books_namespace, 0, 10).Where(price, CondGe, 600)}; - Query joinQuery = queryBooks.InnerJoin(authorid_fk, authorid, CondEq, std::move(queryAuthors)); - QueryWatcher watcher{joinQuery}; - - reindexer::QueryResults joinQueryRes; - Error err = rt.reindexer->Select(joinQuery, joinQueryRes); - ASSERT_TRUE(err.ok()) << err.what(); - - err = VerifyResJSON(joinQueryRes); - ASSERT_TRUE(err.ok()) << err.what(); - - reindexer::QueryResults pureSelectRes; - err = rt.reindexer->Select(queryBooks, pureSelectRes); - ASSERT_TRUE(err.ok()) << err.what(); - - QueryResultRows joinSelectRows; - QueryResultRows pureSelectRows; - - if (err.ok()) { - for (auto it : pureSelectRes) { - Item booksItem(it.GetItem(false)); - Variant authorIdKeyRef = booksItem[authorid_fk]; - - reindexer::QueryResults authorsSelectRes; - Query authorsQuery{Query(authors_namespace).Where(authorid, CondEq, authorIdKeyRef)}; - err = rt.reindexer->Select(authorsQuery, authorsSelectRes); - ASSERT_TRUE(err.ok()) << err.what(); - - if (err.ok()) { - int bookId = booksItem[bookid].Get(); - QueryResultRow& pureSelectRow = pureSelectRows[bookId]; - - FillQueryResultFromItem(booksItem, pureSelectRow); - for (auto jit : authorsSelectRes) { - Item authorsItem(jit.GetItem(false)); - FillQueryResultFromItem(authorsItem, pureSelectRow); - } - } - } - - FillQueryResultRows(joinQueryRes, joinSelectRows); - EXPECT_EQ(CompareQueriesResults(pureSelectRows, joinSelectRows), true); - } -} - -TEST_F(JoinSelectsApi, LeftJoinTest) { - Query booksQuery{Query(books_namespace).Where(price, CondGe, 500)}; - reindexer::QueryResults booksQueryRes; - Error err = rt.reindexer->Select(booksQuery, booksQueryRes); - ASSERT_TRUE(err.ok()) << err.what(); - - QueryResultRows pureSelectRows; - if (err.ok()) { - for (auto it : booksQueryRes) { - Item item(it.GetItem(false)); - BookId bookId = item[bookid].Get(); - QueryResultRow& resultRow = pureSelectRows[bookId]; - FillQueryResultFromItem(item, resultRow); - } - } - - Query joinQuery{Query(authors_namespace).LeftJoin(authorid, authorid_fk, CondEq, std::move(booksQuery))}; - - QueryWatcher watcher{joinQuery}; - reindexer::QueryResults joinQueryRes; - err = rt.reindexer->Select(joinQuery, joinQueryRes); - ASSERT_TRUE(err.ok()) << err.what(); - - err = VerifyResJSON(joinQueryRes); - ASSERT_TRUE(err.ok()) << err.what(); - - if (err.ok()) { - std::unordered_set presentedAuthorIds; - std::unordered_map rowidsIndexes; - int i = 0; - for (auto rowIt : joinQueryRes.ToLocalQr()) { - Item item(rowIt.GetItem(false)); - Variant authorIdKeyRef1 = item[authorid]; - const reindexer::ItemRef& rowid = rowIt.GetItemRef(); - - auto itemIt = rowIt.GetJoined(); - if (itemIt.getJoinedItemsCount() == 0) continue; - for (auto joinedFieldIt = itemIt.begin(); joinedFieldIt != itemIt.end(); ++joinedFieldIt) { - reindexer::ItemImpl item2(joinedFieldIt.GetItem(0, joinQueryRes.GetPayloadType(1), joinQueryRes.GetTagsMatcher(1))); - Variant authorIdKeyRef2 = item2.GetField(joinQueryRes.GetPayloadType(1).FieldByName(authorid_fk)); - EXPECT_EQ(authorIdKeyRef1, authorIdKeyRef2); - } - - presentedAuthorIds.insert(static_cast(authorIdKeyRef1)); - rowidsIndexes.insert({rowid.Id(), i}); - i++; - } - - for (auto rowIt : joinQueryRes.ToLocalQr()) { - IdType rowid = rowIt.GetItemRef().Id(); - auto itemIt = rowIt.GetJoined(); - if (itemIt.getJoinedItemsCount() == 0) continue; - auto joinedFieldIt = itemIt.begin(); - for (int i = 0; i < joinedFieldIt.ItemsCount(); ++i) { - reindexer::ItemImpl item(joinedFieldIt.GetItem(i, joinQueryRes.GetPayloadType(1), joinQueryRes.GetTagsMatcher(1))); - - Variant authorIdKeyRef1 = item.GetField(joinQueryRes.GetPayloadType(1).FieldByName(authorid_fk)); - int authorId = static_cast(authorIdKeyRef1); - - auto itAutorid(presentedAuthorIds.find(authorId)); - EXPECT_NE(itAutorid, presentedAuthorIds.end()); - - auto itRowidIndex(rowidsIndexes.find(rowid)); - EXPECT_NE(itRowidIndex, rowidsIndexes.end()); - - if (itRowidIndex != rowidsIndexes.end()) { - Item item2((joinQueryRes.begin() + rowid).GetItem(false)); - Variant authorIdKeyRef2 = item2[authorid]; - EXPECT_EQ(authorIdKeyRef1, authorIdKeyRef2); - } - } - } - } -} - -TEST_F(JoinSelectsApi, OrInnerJoinTest) { - Query queryGenres(genres_namespace); - Query queryAuthors(authors_namespace); - Query queryBooks{Query(books_namespace, 0, 10).Where(price, CondGe, 500)}; - Query innerJoinQuery = std::move(queryBooks.InnerJoin(authorid_fk, authorid, CondEq, std::move(queryAuthors))); - Query orInnerJoinQuery = std::move(innerJoinQuery.OrInnerJoin(genreId_fk, genreid, CondEq, std::move(queryGenres))); - QueryWatcher watcher{orInnerJoinQuery}; - - const int authorsNsJoinIndex = 0; - const int genresNsJoinIndex = 1; - - reindexer::QueryResults queryRes; - Error err = rt.reindexer->Select(orInnerJoinQuery, queryRes); - ASSERT_TRUE(err.ok()) << err.what(); - - err = VerifyResJSON(queryRes); - ASSERT_TRUE(err.ok()) << err.what(); - - if (err.ok()) { - for (auto rowIt : queryRes) { - Item item(rowIt.GetItem(false)); - auto itemIt = rowIt.GetJoined(); - - reindexer::joins::JoinedFieldIterator authorIdIt = itemIt.at(authorsNsJoinIndex); - Variant authorIdKeyRef1 = item[authorid_fk]; - for (int i = 0; i < authorIdIt.ItemsCount(); ++i) { - reindexer::ItemImpl authorsItem(authorIdIt.GetItem(i, queryRes.GetPayloadType(1), queryRes.GetTagsMatcher(1))); - Variant authorIdKeyRef2 = authorsItem.GetField(queryRes.GetPayloadType(1).FieldByName(authorid)); - EXPECT_EQ(authorIdKeyRef1, authorIdKeyRef2); - } - - reindexer::joins::JoinedFieldIterator genreIdIt = itemIt.at(genresNsJoinIndex); - Variant genresIdKeyRef1 = item[genreId_fk]; - for (int i = 0; i < genreIdIt.ItemsCount(); ++i) { - reindexer::ItemImpl genresItem = genreIdIt.GetItem(i, queryRes.GetPayloadType(2), queryRes.GetTagsMatcher(2)); - Variant genresIdKeyRef2 = genresItem.GetField(queryRes.GetPayloadType(2).FieldByName(genreid)); - EXPECT_EQ(genresIdKeyRef1, genresIdKeyRef2); - } - } - } -} - -TEST_F(JoinSelectsApi, JoinTestSorting) { - for (size_t i = 0; i < 10; ++i) { - int booksTimeout = 1000, authorsTimeout = 0; - if (i % 2 == 0) { - std::swap(booksTimeout, authorsTimeout); - } else if (i % 3) { - authorsTimeout = booksTimeout; - } - ChangeNsOptimizationTimeout(books_namespace, booksTimeout); - ChangeNsOptimizationTimeout(authors_namespace, authorsTimeout); - std::this_thread::sleep_for(std::chrono::milliseconds(150)); - Query booksQuery{Query(books_namespace, 11, 1111).Where(pages, CondGe, 100).Where(price, CondGe, 200).Sort(price, true)}; - Query joinQuery{Query(authors_namespace) - .Where(authorid, CondLe, 100) - .LeftJoin(authorid, authorid_fk, CondEq, std::move(booksQuery)) - .Sort(age, false) - .Limit(10)}; - - QueryWatcher watcher{joinQuery}; - reindexer::QueryResults joinQueryRes; - Error err = rt.reindexer->Select(joinQuery, joinQueryRes); - ASSERT_TRUE(err.ok()) << err.what(); - - Variant prevField; - for (auto rowIt : joinQueryRes) { - Item item = rowIt.GetItem(false); - if (!prevField.Type().Is()) { - ASSERT_LE(prevField.Compare(item[age]), 0); - } - - Variant key = item[authorid]; - auto itemIt = rowIt.GetJoined(); - if (itemIt.getJoinedItemsCount() == 0) continue; - auto joinedFieldIt = itemIt.begin(); - - Variant prevJoinedValue; - for (int i = 0; i < joinedFieldIt.ItemsCount(); ++i) { - reindexer::ItemImpl joinItem(joinedFieldIt.GetItem(i, joinQueryRes.GetPayloadType(1), joinQueryRes.GetTagsMatcher(1))); - Variant fkey = joinItem.GetField(joinQueryRes.GetPayloadType(1).FieldByName(authorid_fk)); - ASSERT_EQ(key.Compare(fkey), 0) << key.As() << " " << fkey.As(); - Variant recentJoinedValue = joinItem.GetField(joinQueryRes.GetPayloadType(1).FieldByName(price)); - ASSERT_GE(recentJoinedValue.As(), 200); - if (!prevJoinedValue.Type().Is()) { - ASSERT_GE(prevJoinedValue.Compare(recentJoinedValue), 0); - } - Variant pagesValue = joinItem.GetField(joinQueryRes.GetPayloadType(1).FieldByName(pages)); - ASSERT_GE(pagesValue.As(), 100); - prevJoinedValue = recentJoinedValue; - } - prevField = item[age]; - } - } -} - -TEST_F(JoinSelectsApi, TestSortingByJoinedNs) { - Query joinedQuery1 = Query(books_namespace); - Query query1{Query(authors_namespace) - .LeftJoin(authorid, authorid_fk, CondEq, std::move(joinedQuery1)) - .Sort(books_namespace + '.' + price, false)}; - - reindexer::QueryResults joinQueryRes1; - Error err = rt.reindexer->Select(query1, joinQueryRes1); - // several book to one author, cannot sort - ASSERT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "Not found value joined from ns books_namespace"); - - Query joinedQuery2 = Query(authors_namespace); - Query query2{Query(books_namespace) - .InnerJoin(authorid_fk, authorid, CondEq, std::move(joinedQuery2)) - .Sort(authors_namespace + '.' + age, false)}; - - QueryWatcher watcher{query2}; - reindexer::QueryResults joinQueryRes2; - err = rt.reindexer->Select(query2, joinQueryRes2); - ASSERT_TRUE(err.ok()) << err.what(); - - Variant prevValue; - for (auto rowIt : joinQueryRes2) { - const auto itemIt = rowIt.GetJoined(); - ASSERT_EQ(itemIt.getJoinedItemsCount(), 1); - const auto joinedFieldIt = itemIt.begin(); - reindexer::ItemImpl joinItem(joinedFieldIt.GetItem(0, joinQueryRes2.GetPayloadType(1), joinQueryRes2.GetTagsMatcher(1))); - const Variant recentValue = joinItem.GetField(joinQueryRes2.GetPayloadType(1).FieldByName(age)); - if (!prevValue.Type().Is()) { - reindexer::WrSerializer ser; - ASSERT_LE(prevValue.Compare(recentValue), 0) << (prevValue.Dump(ser), ser << ' ', recentValue.Dump(ser), ser.Slice()); - } - prevValue = recentValue; - } -} - -TEST_F(JoinSelectsApi, JoinTestSelectNonIndexedField) { - reindexer::QueryResults qr; - Query authorsQuery = Query(authors_namespace); - Error err = rt.reindexer->Select(Query(books_namespace) - .Where(rating, CondEq, Variant(static_cast(100))) - .InnerJoin(authorid_fk, authorid, CondEq, std::move(authorsQuery)), - qr); - - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - Item theOnlyItem = qr.begin().GetItem(false); - VariantArray krefs = theOnlyItem[title]; - ASSERT_EQ(krefs.size(), 1); - ASSERT_EQ(krefs[0].As(), "Crime and Punishment"); -} - -TEST_F(JoinSelectsApi, JoinByNonIndexedField) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - DefineNamespaceDataset(default_namespace, {IndexDeclaration{"id", "hash", "int", IndexOpts().PK(), 0}}); - - std::stringstream json; - json << "{" << addQuotes(id) << ":" << 1 << "," << addQuotes(authorid_fk) << ":" << DostoevskyAuthorId << "}"; - Item lonelyItem = NewItem(default_namespace); - ASSERT_TRUE(lonelyItem.Status().ok()) << lonelyItem.Status().what(); - - err = lonelyItem.FromJSON(json.str()); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Upsert(default_namespace, lonelyItem); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Commit(books_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - reindexer::QueryResults qr; - Query authorsQuery = Query(authors_namespace); - err = rt.reindexer->Select(Query(default_namespace) - .Where(authorid_fk, CondEq, Variant(DostoevskyAuthorId)) - .InnerJoin(authorid_fk, authorid, CondEq, std::move(authorsQuery)), - qr); - - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - // And backwards even! - reindexer::QueryResults qr2; - Query testNsQuery = Query(default_namespace); - err = rt.reindexer->Select(Query(authors_namespace) - .Where(authorid, CondEq, Variant(DostoevskyAuthorId)) - .InnerJoin(authorid, authorid_fk, CondEq, std::move(testNsQuery)), - qr2); - - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr2.Count(), 1); -} - -TEST_F(JoinSelectsApi, JoinsEasyStressTest) { - auto selectTh = [this]() { - Query queryGenres(genres_namespace); - Query queryAuthors(authors_namespace); - Query queryBooks{Query(books_namespace, 0, 10).Where(price, CondGe, 600).Sort(bookid, false)}; - Query joinQuery1 = std::move(queryBooks.InnerJoin(authorid_fk, authorid, CondEq, queryAuthors).Sort(pages, false)); - Query joinQuery2 = std::move(joinQuery1.LeftJoin(authorid_fk, authorid, CondEq, std::move(queryAuthors))); - Query orInnerJoinQuery = - std::move(joinQuery2.OrInnerJoin(genreId_fk, genreid, CondEq, std::move(queryGenres)).Sort(price, true).Limit(20)); - for (size_t i = 0; i < 10; ++i) { - reindexer::QueryResults queryRes; - Error err = rt.reindexer->Select(orInnerJoinQuery, queryRes); - ASSERT_TRUE(err.ok()) << err.what(); - EXPECT_GT(queryRes.Count(), 0); - } - }; - - auto removeTh = [this]() { - QueryResults qres; - Error err = rt.reindexer->Delete(Query(books_namespace, 0, 10).Where(price, CondGe, 5000), qres); - ASSERT_TRUE(err.ok()) << err.what(); - }; - - int32_t since = 0, count = 1000; - std::vector threads; - for (size_t i = 0; i < 20; ++i) { - threads.push_back(std::thread(selectTh)); - if (i % 2 == 0) threads.push_back(std::thread(removeTh)); - if (i % 4 == 0) threads.push_back(std::thread([this, since, count]() { FillBooksNamespace(since, count); })); - since += 1000; - } - for (size_t i = 0; i < threads.size(); ++i) threads[i].join(); -} - -TEST_F(JoinSelectsApi, JoinPreResultStoreValuesOptimizationStressTest) { - using reindexer::JoinedSelector; - static const std::string rightNs = "rightNs"; - static constexpr char const* data = "data"; - static constexpr int maxDataValue = 10; - static constexpr int maxRightNsRowCount = maxDataValue * JoinedSelector::MaxIterationsForPreResultStoreValuesOptimization(); - static constexpr int maxLeftNsRowCount = 10000; - static constexpr size_t leftNsCount = 50; - static std::vector leftNs; - if (leftNs.empty()) { - leftNs.reserve(leftNsCount); - for (size_t i = 0; i < leftNsCount; ++i) leftNs.push_back("leftNs" + std::to_string(i)); - } - - const auto createNs = [this](const std::string& ns) { - Error err = rt.reindexer->OpenNamespace(ns); - ASSERT_TRUE(err.ok()) << err.what(); - DefineNamespaceDataset( - ns, {IndexDeclaration{id, "hash", "int", IndexOpts().PK(), 0}, IndexDeclaration{data, "hash", "int", IndexOpts(), 0}}); - }; - const auto fill = [this](const std::string& ns, int startId, int endId) { - for (int i = startId; i < endId; ++i) { - Item item = NewItem(ns); - item[id] = i; - item[data] = rand() % maxDataValue; - Upsert(ns, item); - } - const auto err = Commit(ns); - ASSERT_TRUE(err.ok()) << err.what(); - }; - - createNs(rightNs); - fill(rightNs, 0, maxRightNsRowCount); - std::atomic start{false}; - std::vector threads; - threads.reserve(leftNs.size()); - for (size_t i = 0; i < leftNs.size(); ++i) { - createNs(leftNs[i]); - fill(leftNs[i], 0, maxLeftNsRowCount); - threads.emplace_back([this, i, &start]() { - // about 50% of queries will use the optimization - Query q{Query(leftNs[i]).InnerJoin(data, data, CondEq, Query(rightNs).Where(data, CondEq, rand() % maxDataValue))}; - QueryResults qres; - while (!start) std::this_thread::sleep_for(std::chrono::milliseconds(1)); - Error err = rt.reindexer->Select(q, qres); - ASSERT_TRUE(err.ok()) << err.what(); - }); - } - start = true; - for (auto& th : threads) th.join(); -} - -static void checkForAllowedJsonTags(const std::vector& tags, gason::JsonValue jsonValue) { - size_t count = 0; - for (const auto& elem : jsonValue) { - ASSERT_NE(std::find(tags.begin(), tags.end(), std::string_view(elem.key)), tags.end()); - ++count; - } - ASSERT_EQ(count, tags.size()); -} - -TEST_F(JoinSelectsApi, JoinWithSelectFilter) { - Query queryAuthors = Query(authors_namespace).Select({name, age}); - - Query queryBooks{Query(books_namespace) - .Where(pages, CondGe, 100) - .InnerJoin(authorid_fk, authorid, CondEq, std::move(queryAuthors)) - .Select({title, price})}; - - QueryResults qr; - Error err = rt.reindexer->Select(queryBooks, qr); - ASSERT_TRUE(err.ok()) << err.what(); - - for (auto it : qr) { - ASSERT_TRUE(it.Status().ok()) << it.Status().what(); - reindexer::WrSerializer wrser; - err = it.GetJSON(wrser, false); - ASSERT_TRUE(err.ok()) << err.what(); - - reindexer::joins::ItemIterator joinIt = it.GetJoined(); - gason::JsonParser jsonParser; - gason::JsonNode root = jsonParser.Parse(reindexer::giftStr(wrser.Slice())); - checkForAllowedJsonTags({title, price, "joined_authors_namespace"}, root.value); - - for (auto fieldIt = joinIt.begin(); fieldIt != joinIt.end(); ++fieldIt) { - LocalQueryResults jqr = fieldIt.ToQueryResults(); - jqr.addNSContext(qr, 1, reindexer::lsn_t()); - for (auto jit : jqr) { - ASSERT_TRUE(jit.Status().ok()) << jit.Status().what(); - wrser.Reset(); - err = jit.GetJSON(wrser, false); - ASSERT_TRUE(err.ok()) << err.what(); - root = jsonParser.Parse(reindexer::giftStr(wrser.Slice())); - checkForAllowedJsonTags({name, age}, root.value); - } - } - } -} - -// Execute a query that is merged with another one: -// both queries should contain join queries, -// joined NS for the 1st query should be the same -// as the main NS of the merged query. -TEST_F(JoinSelectsApi, TestMergeWithJoins) { - // Build the 1st query with 'authors_namespace' as join. - Query queryBooks = Query(books_namespace); - queryBooks.InnerJoin(authorid_fk, authorid, CondEq, Query(authors_namespace)); - - // Build the 2nd query (with join) with 'authors_namespace' as the main NS. - Query queryAuthors = Query(authors_namespace); - queryAuthors.LeftJoin(locationid_fk, locationid, CondEq, Query(location_namespace)); - queryBooks.Merge(std::move(queryAuthors)); - - // Execute it - QueryResults qr; - Error err = rt.reindexer->Select(queryBooks, qr); - ASSERT_TRUE(err.ok()) << err.what(); - VerifyResJSON(qr); - - // Make sure results are correct: - // values of main and joined namespaces match - // in both parts of the query. - size_t rowId = 0; - for (auto it : qr) { - Item item = it.GetItem(false); - auto joined = it.GetJoined(); - ASSERT_EQ(joined.getJoinedFieldsCount(), 1); - - bool booksItem = (rowId <= 10000); - LocalQueryResults jqr = joined.begin().ToQueryResults(); - int joinedNs = booksItem ? 2 : 3; - jqr.addNSContext(qr, joinedNs, reindexer::lsn_t()); - - if (booksItem) { - Variant fkValue = item[authorid_fk]; - for (auto jit : jqr) { - Item jItem = jit.GetItem(false); - Variant value = jItem[authorid]; - ASSERT_EQ(value, fkValue); - } - } else { - Variant fkValue = item[locationid_fk]; - for (auto jit : jqr) { - Item jItem = jit.GetItem(false); - Variant value = jItem[locationid]; - ASSERT_EQ(value, fkValue); - } - } - - ++rowId; - } -} - -// Check JOINs nested into the other JOINs (expecting errors) -TEST_F(JoinSelectsApi, TestNestedJoinsError) { - constexpr char sqlPattern[] = - R"(select * from books_namespace %s (select * from authors_namespace %s (select * from books_namespace) on authors_namespace.authorid = books_namespace.authorid_fk) on authors_namespace.authorid = books_namespace.authorid_fk)"; - auto joinTypes = {"inner join", "join", "left join"}; - for (auto& firstJoin : joinTypes) { - for (auto& secondJoin : joinTypes) { - auto sql = fmt::sprintf(sqlPattern, firstJoin, secondJoin); - ValidateQueryThrow(sql, errParseSQL, "Expected ')', but found .*, line: 1 column: .*"); - } - } -} - -// Check MERGEs nested into the JOINs (expecting errors) -TEST_F(JoinSelectsApi, TestNestedMergesInJoinsError) { - constexpr char sqlPattern[] = - R"(select * from books_namespace %s (select * from authors_namespace merge (select * from books_namespace)) on authors_namespace.authorid = books_namespace.authorid_fk)"; - auto joinTypes = {"inner join", "join", "left join"}; - for (auto& join : joinTypes) { - auto sql = fmt::sprintf(sqlPattern, join); - ValidateQueryThrow(sql, errParseSQL, "Expected ')', but found 'merge', line: 1 column: .*"); - } -} - -// Check MERGEs nested into the MERGEs (expecting errors) -TEST_F(JoinSelectsApi, TestNestedMergesInMergesError) { - constexpr char sql[] = - R"(select * from books_namespace merge (select * from authors_namespace merge (select * from books_namespace)))"; - ValidateQueryError(sql, errParams, "MERGEs nested into the MERGEs are not supported"); -} - -TEST_F(JoinSelectsApi, CountCachedWithDifferentJoinConditions) { - // Test checks if cached total values is changing after inner join's condition change - - const std::vector kBaseQueries = { - Query(books_namespace).InnerJoin(authorid_fk, authorid, CondEq, Query(authors_namespace)).Limit(10), - Query(books_namespace).InnerJoin(authorid_fk, authorid, CondEq, Query(authors_namespace).Where(authorid, CondLe, 100)).Limit(10), - Query(books_namespace).InnerJoin(authorid_fk, authorid, CondEq, Query(authors_namespace).Where(authorid, CondGe, 200)).Limit(10), - Query(books_namespace).InnerJoin(authorid_fk, authorid, CondEq, Query(authors_namespace).Where(authorid, CondLe, 400)).Limit(10), - Query(books_namespace).InnerJoin(authorid_fk, authorid, CondEq, Query(authors_namespace).Where(authorid, CondGe, 400)).Limit(10)}; - - SetQueriesCacheHitsCount(1); - for (auto& bq : kBaseQueries) { - const Query cachedTotalNoCondQ = Query(bq).CachedTotal(); - const Query totalCountNoCondQ = Query(bq).ReqTotal(); - QueryResults qrRegular; - auto err = rt.reindexer->Select(totalCountNoCondQ, qrRegular); - ASSERT_TRUE(err.ok()) << err.what() << "; " << totalCountNoCondQ.GetSQL(); - // Run all the queries with CountCached twice to check main and cached values - for (int i = 0; i < 2; ++i) { - QueryResults qrCached; - err = rt.reindexer->Select(cachedTotalNoCondQ, qrCached); - ASSERT_TRUE(err.ok()) << err.what() << "; i = " << i << "; " << cachedTotalNoCondQ.GetSQL(); - EXPECT_EQ(qrCached.TotalCount(), qrRegular.TotalCount()) << " i = " << i << "; " << bq.GetSQL(); - } - } -} - -TEST_F(JoinSelectsApi, CountCachedWithJoinNsUpdates) { - const Genre kLastGenre = *genres.rbegin(); - const std::vector kBaseQueries = { - Query(books_namespace) - .InnerJoin(authorid_fk, authorid, CondEq, Query(authors_namespace).Where(authorid, CondGe, 100)) - .OrInnerJoin(genreId_fk, genreid, CondEq, - Query(genres_namespace) - .Where(genrename, CondSet, - {Variant{"non fiction"}, Variant{"poetry"}, Variant{"documentary"}, Variant{kLastGenre.name}})) - .Limit(10), - Query(books_namespace) - .InnerJoin(authorid_fk, authorid, CondEq, Query(authors_namespace).Where(authorid, CondGe, 100)) - .InnerJoin(genreId_fk, genreid, CondEq, - Query(genres_namespace) - .Where(genrename, CondSet, - {Variant{"non fiction"}, Variant{"poetry"}, Variant{"documentary"}, Variant{kLastGenre.name}})) - .Limit(10), - Query(books_namespace) - .InnerJoin(authorid_fk, authorid, CondEq, Query(authors_namespace).Where(authorid, CondGe, 100)) - .OpenBracket() - .InnerJoin(genreId_fk, genreid, CondEq, - Query(genres_namespace).Where(genrename, CondSet, {Variant{"non fiction"}, Variant{kLastGenre.name}})) - .OrInnerJoin( - genreId_fk, genreid, CondEq, - Query(genres_namespace).Where(genrename, CondSet, {Variant{"poetry"}, Variant{"documentary"}, Variant{kLastGenre.name}})) - .CloseBracket() - .Limit(10), - Query(books_namespace) - .InnerJoin(authorid_fk, authorid, CondEq, Query(authors_namespace).Where(authorid, CondGe, 100)) - .OpenBracket() - .InnerJoin(genreId_fk, genreid, CondEq, - Query(genres_namespace).Where(genrename, CondSet, {Variant{"non fiction"}, Variant{kLastGenre.name}})) - .InnerJoin(genreId_fk, genreid, CondEq, Query(genres_namespace)) - .CloseBracket() - .Limit(10), - Query(books_namespace) - .InnerJoin(authorid_fk, authorid, CondEq, Query(authors_namespace).Where(authorid, CondGe, 100)) - .OpenBracket() - .InnerJoin(genreId_fk, genreid, CondEq, - Query(genres_namespace).Where(genrename, CondSet, {Variant{"non fiction"}, Variant{kLastGenre.name}})) - .OrInnerJoin(genreId_fk, genreid, CondEq, Query(genres_namespace)) - .CloseBracket() - .Limit(10)}; - - SetQueriesCacheHitsCount(1); - for (auto& bq : kBaseQueries) { - const Query cachedTotalNoCondQ = Query(bq).CachedTotal(); - const Query totalCountNoCondQ = Query(bq).ReqTotal(); - auto checkQuery = [&](std::string_view step) { - // With Initial data - QueryResults qrRegular; - auto err = rt.reindexer->Select(totalCountNoCondQ, qrRegular); - ASSERT_TRUE(err.ok()) << err.what() << "; step: " << step << "; " << totalCountNoCondQ.GetSQL(); - // Run all the queries with CountCached twice to check main and cached values - for (int i = 0; i < 2; ++i) { - QueryResults qrCached; - err = rt.reindexer->Select(cachedTotalNoCondQ, qrCached); - ASSERT_TRUE(err.ok()) << err.what() << "; step: " << step << "; i = " << i << "; " << cachedTotalNoCondQ.GetSQL(); - EXPECT_EQ(qrCached.TotalCount(), qrRegular.TotalCount()) << "step: " << step << "; i = " << i << "; " << bq.GetSQL(); - } - }; - - // Check query and create cache with initial data - checkQuery("initial data"); - - // Update data on the first joined namespace - RemoveLastAuthors(250); - checkQuery("first ns update (remove)"); - FillAuthorsNamespace(250); - checkQuery("first ns update (add)"); - - // Update data on the second joined namespace - RemoveGenre(kLastGenre.id); - checkQuery("second ns update (remove)"); - AddGenre(kLastGenre.id, kLastGenre.name); - checkQuery("second ns update (insert)"); - } -} - -TEST_F(JoinOnConditionsApi, TestGeneralConditions) { - const std::string sqlTemplate = - R"(select * from books_namespace inner join books_namespace on (books_namespace.authorid_fk = books_namespace.authorid_fk and books_namespace.pages %s books_namespace.pages);)"; - for (CondType condition : {CondLt, CondLe, CondGt, CondGe, CondEq}) { - Query queryBooks = Query::FromSQL(GetSql(sqlTemplate, condition)); - QueryResults qr; - Error err = rt.reindexer->Select(queryBooks, qr); - ASSERT_TRUE(err.ok()) << err.what(); - for (auto it : qr) { - const auto item = it.GetItem(); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - const Variant authorid1 = item[authorid_fk]; - const Variant pages1 = item[pages]; - const auto joined = it.GetJoined(); - ASSERT_EQ(joined.getJoinedFieldsCount(), 1); - LocalQueryResults jqr = joined.begin().ToQueryResults(); - jqr.addNSContext(qr, 0, reindexer::lsn_t()); - for (auto jit : jqr) { - auto joinedItem = jit.GetItem(); - ASSERT_TRUE(joinedItem.Status().ok()) << joinedItem.Status().what(); - Variant authorid2 = joinedItem[authorid_fk]; - ASSERT_EQ(authorid1, authorid2); - Variant pages2 = joinedItem[pages]; - ASSERT_TRUE(CompareVariants(pages1, pages2, condition)) - << pages1.As() << ' ' << reindexer::CondTypeToStr(condition) << ' ' << pages2.As(); - } - } - } -} - -#ifndef REINDEX_WITH_TSAN - -TEST_F(JoinOnConditionsApi, TestComparisonConditions) { - const std::vector> sqlTemplates = { - {R"(select * from books_namespace inner join authors_namespace on (books_namespace.authorid_fk %s authors_namespace.authorid);)", - R"(select * from books_namespace inner join authors_namespace on (authors_namespace.authorid %s books_namespace.authorid_fk);)"}}; - const std::vector> conditions = {{CondLt, CondGt}, {CondLe, CondGe}, {CondGt, CondLt}, - {CondGe, CondLe}, {CondEq, CondEq}, {CondSet, CondSet}}; - for (size_t i = 0; i < sqlTemplates.size(); ++i) { - const auto& sqlTemplate = sqlTemplates[i]; - for (const auto& condition : conditions) { - Query query1 = Query::FromSQL(GetSql(sqlTemplate.first, condition.first)); - QueryResults qr1; - Error err = rt.reindexer->Select(query1, qr1); - ASSERT_TRUE(err.ok()) << err.what(); - - Query query2 = Query::FromSQL(GetSql(sqlTemplate.second, condition.second)); - QueryResults qr2; - err = rt.reindexer->Select(query2, qr2); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(query1.GetJSON(), query2.GetJSON()); - ASSERT_EQ(qr1.Count(), qr2.Count()); - for (QueryResults::Iterator it1 = qr1.begin(), it2 = qr2.begin(); it1 != qr1.end(); ++it1, ++it2) { - auto item1 = it1.GetItem(); - ASSERT_TRUE(item1.Status().ok()) << item1.Status().what(); - auto joined1 = it1.GetJoined(); - ASSERT_EQ(joined1.getJoinedFieldsCount(), 1); - LocalQueryResults jqr1 = joined1.begin().ToQueryResults(); - jqr1.addNSContext(qr1, 1, reindexer::lsn_t()); - - auto item2 = it2.GetItem(); - ASSERT_TRUE(item2.Status().ok()) << item2.Status().what(); - auto joined2 = it2.GetJoined(); - ASSERT_EQ(joined2.getJoinedFieldsCount(), 1); - LocalQueryResults jqr2 = joined2.begin().ToQueryResults(); - jqr2.addNSContext(qr2, 1, reindexer::lsn_t()); - - ASSERT_EQ(jqr1.Count(), jqr2.Count()); - - for (auto jit1 = jqr1.begin(), jit2 = jqr2.begin(); jit1 != jqr1.end(); ++jit1, ++jit2) { - auto joinedItem1 = jit1.GetItem(); - ASSERT_TRUE(joinedItem1.Status().ok()) << joinedItem1.Status().what(); - Variant authorid11 = item1[authorid_fk]; - Variant authorid12 = joinedItem1[authorid]; - ASSERT_TRUE(CompareVariants(authorid11, authorid12, condition.first)); - - auto joinedItem2 = jit2.GetItem(); - ASSERT_TRUE(joinedItem2.Status().ok()) << joinedItem2.Status().what(); - Variant authorid21 = item2[authorid_fk]; - Variant authorid22 = joinedItem2[authorid]; - ASSERT_TRUE(CompareVariants(authorid21, authorid22, condition.first)); - - ASSERT_EQ(authorid11, authorid21); - ASSERT_EQ(authorid12, authorid22); - } - } - } - } -} - -#endif - -TEST_F(JoinOnConditionsApi, TestLeftJoinOnCondSet) { - const std::string leftNs = "leftNs"; - const std::string rightNs = "rightNs"; - std::vector leftNsData = {1, 3, 10}; - std::vector> rightNsData = {{1, 2, 3}, {3, 4, 5}, {5, 6, 7}}; - CreateCondSetTable(leftNs, rightNs, leftNsData, rightNsData); - // clang-format off - const std::vector results = { - R"({"id":1,"joined_rightNs":[{"id":10,"set":[1,2,3]}]})", - R"({"id":3,"joined_rightNs":[{"id":10,"set":[1,2,3]},{"id":11,"set":[3,4,5]}]})", - R"({"id":10})" - }; - // clang-format on - - auto execQuery = [&results, this](Query& q) { - QueryResults qr; - Error err = rt.reindexer->Select(q, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), results.size()); - int k = 0; - for (auto it = qr.begin(); it != qr.end(); ++it, ++k) { - ASSERT_TRUE(it.Status().ok()) << it.Status().what(); - reindexer::WrSerializer ser; - err = it.GetJSON(ser, false); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(ser.c_str(), results[k]); - } - }; - - { - Query q(leftNs); - q.Sort("id", false); - reindexer::Query qj(rightNs); - q.LeftJoin("id", "set", CondSet, qj); - reindexer::WrSerializer ser; - execQuery(q); - } - - auto sqlTestCase = [execQuery](const std::string& s) { - Query q = Query::FromSQL(s); - execQuery(q); - }; - - sqlTestCase(fmt::sprintf("select * from %s left join %s on %s.id IN %s.set order by id", leftNs, rightNs, leftNs, rightNs)); - sqlTestCase(fmt::sprintf("select * from %s left join %s on %s.set IN %s.id order by id", leftNs, rightNs, rightNs, leftNs)); - sqlTestCase(fmt::sprintf("select * from %s left join %s on %s.id = %s.set order by id", leftNs, rightNs, leftNs, rightNs)); - sqlTestCase(fmt::sprintf("select * from %s left join %s on %s.set = %s.id order by id", leftNs, rightNs, rightNs, leftNs)); -} - -TEST_F(JoinOnConditionsApi, TestInvalidConditions) { - const std::vector sqls = { - R"(select * from books_namespace inner join authors_namespace on (books_namespace.authorid_fk = books_namespace.authorid_fk and books_namespace.pages is null);)", - R"(select * from books_namespace inner join authors_namespace on (books_namespace.authorid_fk = books_namespace.authorid_fk and books_namespace.pages range(0, 1000));)", - R"(select * from books_namespace inner join authors_namespace on (books_namespace.authorid_fk = books_namespace.authorid_fk and books_namespace.pages in(1, 50, 100, 500, 1000, 1500));)", - }; - for (const std::string& sql : sqls) { - EXPECT_THROW((void)Query::FromSQL(sql), Error); - } - QueryResults qr; - Error err = rt.reindexer->Select(Query(books_namespace).InnerJoin(authorid_fk, authorid, CondAllSet, Query(authors_namespace)), qr); - EXPECT_FALSE(err.ok()); - qr.Clear(); - err = rt.reindexer->Select(Query(books_namespace).InnerJoin(authorid_fk, authorid, CondLike, Query(authors_namespace)), qr); - EXPECT_FALSE(err.ok()); -} diff --git a/cpp_src/gtests/tests/unit/json_parsing_test.cc b/cpp_src/gtests/tests/unit/json_parsing_test.cc deleted file mode 100644 index daf9da111..000000000 --- a/cpp_src/gtests/tests/unit/json_parsing_test.cc +++ /dev/null @@ -1,93 +0,0 @@ -#include -#include "core/reindexer.h" - -#include "core/cjson/jsonbuilder.h" - -TEST(JSONParsingTest, EmptyDocument) { - reindexer::Reindexer rx; - constexpr std::string_view kNsName("json_empty_doc_test"); - auto err = rx.OpenNamespace(kNsName); - ASSERT_TRUE(err.ok()) << err.what(); - - reindexer::Item item(rx.NewItem(kNsName)); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - err = item.FromJSON("\n"); - EXPECT_EQ(err.code(), errParseJson); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - err = item.FromJSON("\t"); - EXPECT_EQ(err.code(), errParseJson); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - err = item.FromJSON(" "); - EXPECT_EQ(err.code(), errParseJson); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); -} - -TEST(JSONParsingTest, NestedNodesRead) { - constexpr std::string_view jsonTest{R"json({ - "type":"replication", - "replication":{ - "server_id": 10, - "cluster_id": 11 - } - })json"}; - - // Parse json and check keys - gason::JsonParser parser; - auto root = parser.Parse(jsonTest); - - EXPECT_EQ(root["replication"]["server_id"].As(), 10); - EXPECT_EQ(root["replication"]["cluster_id"].As(), 11); - EXPECT_ANY_THROW(root["no-node"]["server_id"].As()); -} - -TEST(JSONParsingTest, Strings) { - const std::vector lens = {0, 100, 8 < 10, 2 << 20, 8 << 20, 16 << 20, 32 << 20, 60 << 20}; - for (auto len : lens) { - std::string strs[2]; - strs[0].resize(len / 2); - std::fill(strs[0].begin(), strs[0].end(), 'a'); - strs[1].resize(len); - std::fill(strs[1].begin(), strs[1].end(), 'b'); - - std::string d("{\"id\":1,\"str0\":\"" + strs[0] + "\",\"str1\":\"" + strs[1] + "\",\"val\":999}"); - reindexer::span data(d); - try { - gason::JsonParser parser; - auto root = parser.Parse(data, nullptr); - EXPECT_EQ(root["id"].As(), 1) << len; - auto rstr = root["str0"].As(); - EXPECT_EQ(rstr, strs[0]) << len; - rstr = root["str1"].As(); - EXPECT_EQ(rstr, strs[1]) << len; - EXPECT_EQ(root["val"].As(), 999) << len; - } catch (gason::Exception& e) { - EXPECT_TRUE(false) << e.what(); - } - } -} - -TEST(JSONParsingTest, LargeAllocations) { - constexpr int64_t kArrElemsCnt = 50000; - // Create json - reindexer::WrSerializer ser; - reindexer::JsonBuilder jb(ser); - jb.Put("mode", "mode"); - auto arr = jb.Array("array"); - for (int64_t i = 0; i < kArrElemsCnt; ++i) { - arr.Put(nullptr, reindexer::Variant{i}); - } - arr.End(); - jb.End(); - - // Parse json and check keys - gason::JsonParser parser; - auto root = parser.Parse(reindexer::giftStr(ser.Slice())); - ASSERT_EQ(std::string_view(root["mode"].key), "mode"); - for (auto el : root["array"]) { - ASSERT_EQ(std::string_view(el.key), std::string_view()); - ASSERT_EQ(el.value.getTag(), gason::JSON_NUMBER); - } -} diff --git a/cpp_src/gtests/tests/unit/msgpack_cproto_tests.cc b/cpp_src/gtests/tests/unit/msgpack_cproto_tests.cc deleted file mode 100644 index d8d89543f..000000000 --- a/cpp_src/gtests/tests/unit/msgpack_cproto_tests.cc +++ /dev/null @@ -1,182 +0,0 @@ -#include "msgpack_cproto_api.h" - -#include -#include "gtests/tests/tests_data.h" -#include "query_aggregate_strict_mode_test.h" - -using reindexer::client::RPCDataFormat; - -TEST_F(MsgPackCprotoApi, MsgPackDecodeTest) { - using namespace reindexer; - auto testDataPath = reindexer::fs::JoinPath(std::string(kTestsDataPath), "MsgPack"); - auto msgPackPath = fs::JoinPath(testDataPath, "msg.uu"); - auto msgJsonPath = fs::JoinPath(testDataPath, "msg.json"); - - std::string content; - int res = reindexer::fs::ReadFile(msgPackPath, content); - ASSERT_GT(res, 0) << "Test data file not found: '" << msgPackPath << "'"; - reindexer::client::Item msgPackItem = client_->NewItem(default_namespace); - if (res > 0) { - size_t offset = 0; - auto err = msgPackItem.FromMsgPack(content, offset); - ASSERT_TRUE(err.ok()) << err.what(); - } - - content.clear(); - res = reindexer::fs::ReadFile(msgJsonPath, content); - ASSERT_GT(res, 0) << "Test data file not found: '" << msgJsonPath << "'"; - ASSERT_GT(content.size(), 1); - - reindexer::client::Item msgJsonItem = client_->NewItem(default_namespace); - if (res > 0) { - auto err = msgJsonItem.FromJSON(content); - ASSERT_TRUE(err.ok()) << err.what(); - } - EXPECT_EQ(msgJsonItem.GetJSON(), msgPackItem.GetJSON()); -} - -TEST_F(MsgPackCprotoApi, SelectTest) { - QueryResults qr(kResultsMsgPack | kResultsWithItemID); - Error err = client_->Select(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 1000); - ASSERT_EQ(qr.GetFlags() & kResultsFormatMask, kResultsMsgPack) << qr.GetFlags(); - for (auto it : qr) { - checkItem(it); - } -} - -TEST_F(MsgPackCprotoApi, AggregationSelectTest) { - QueryResults qr(kResultsMsgPack | kResultsWithItemID); - Error err = client_->Select( - Query(default_namespace).Distinct("id").Aggregate(AggFacet, {"a1", "a2"}).Aggregate(AggSum, {"id"}).Limit(100000), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.GetAggregationResults().size(), 3); - ASSERT_EQ(qr.GetFlags() & kResultsFormatMask, kResultsMsgPack) << qr.GetFlags(); - - const reindexer::AggregationResult& distinct = qr.GetAggregationResults()[0]; - EXPECT_EQ(distinct.type, AggDistinct); - EXPECT_EQ(distinct.distincts.size(), 1000); - ASSERT_EQ(distinct.fields.size(), 1); - EXPECT_EQ(distinct.fields[0], kFieldId); - std::unordered_set found; - for (size_t i = 0; i < distinct.distincts.size(); ++i) { - found.insert(reindexer::stoi(distinct.distincts[i].As(distinct.payloadType, distinct.distinctsFields))); - } - ASSERT_EQ(distinct.distincts.size(), found.size()); - - for (size_t i = 0; i < distinct.distincts.size(); ++i) { - EXPECT_NE(found.find(i), found.end()); - } - - const reindexer::AggregationResult& facet = qr.GetAggregationResults()[1]; - EXPECT_EQ(facet.type, AggFacet); - EXPECT_EQ(facet.facets.size(), 1000); - ASSERT_EQ(facet.fields.size(), 2); - EXPECT_EQ(facet.fields[0], kFieldA1); - EXPECT_EQ(facet.fields[1], kFieldA2); - - for (const reindexer::FacetResult& res : facet.facets) { - EXPECT_EQ(res.count, 1); - const auto v1 = reindexer::stoll(res.values[0]); - const auto v2 = reindexer::stoll(res.values[1]); - EXPECT_EQ(v1 * 3, v2 * 2); - } - - const reindexer::AggregationResult& sum = qr.GetAggregationResults()[2]; - EXPECT_EQ(sum.type, AggSum); - ASSERT_EQ(sum.fields.size(), 1); - EXPECT_EQ(sum.fields[0], kFieldId); - double val = (999.0 / 2.0) * 1000.0; - EXPECT_DOUBLE_EQ(sum.GetValueOrZero(), val) << sum.GetValueOrZero() << "; " << val; -} - -TEST_F(MsgPackCprotoApi, AggregationsWithStrictModeTest) { QueryAggStrictModeTest(client_); } - -TEST_F(MsgPackCprotoApi, ModifyItemsTest) { - auto item = client_->NewItem(default_namespace); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - reindexer::WrSerializer wrser; - reindexer::JsonBuilder jsonBuilder(wrser, reindexer::ObjType::TypeObject); - jsonBuilder.Put(kFieldId, 7); - jsonBuilder.Put(kFieldA1, 77); - jsonBuilder.Put(kFieldA2, 777); - jsonBuilder.Put(kFieldA3, 7777); - jsonBuilder.End(); - - std::string itemSrcJson(wrser.Slice()); - - char* endp = nullptr; - Error err = item.FromJSON(wrser.Slice(), &endp); - ASSERT_TRUE(err.ok()) << err.what(); - - err = client_->Upsert(default_namespace, item, RPCDataFormat::MsgPack); - ASSERT_TRUE(err.ok()) << err.what(); - - QueryResults qr(kResultsMsgPack | kResultsWithItemID); - err = client_->Select(Query(default_namespace).Where(kFieldId, CondEq, Variant(int(7))), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() == 1); - ASSERT_EQ(qr.GetFlags() & kResultsFormatMask, kResultsMsgPack) << qr.GetFlags(); - - for (auto it : qr) { - checkItem(it); - auto item = it.GetItem(); - ASSERT_TRUE(itemSrcJson == std::string(item.GetJSON())); - } -} - -TEST_F(MsgPackCprotoApi, UpdateTest) { - Query q = Query(default_namespace).Set("a1", {7}).Where("id", CondGe, {10}).Where("id", CondLe, {100}); - QueryResults qr(kResultsMsgPack | kResultsWithItemID); - Error err = client_->Update(q, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() == 91); - ASSERT_EQ(qr.GetFlags() & kResultsFormatMask, kResultsMsgPack) << qr.GetFlags(); - - int id = 10; - for (auto it : qr) { - checkItem(it); - - reindexer::WrSerializer json; - reindexer::JsonBuilder jsonBuilder(json, reindexer::ObjType::TypeObject); - jsonBuilder.Put(kFieldId, id); - jsonBuilder.Put(kFieldA1, 7); - jsonBuilder.Put(kFieldA2, id * 3); - jsonBuilder.Put(kFieldA3, id * 4); - jsonBuilder.End(); - - reindexer::client::Item item = it.GetItem(); - ASSERT_TRUE(item.GetJSON() == json.Slice()); - - ++id; - } -} - -TEST_F(MsgPackCprotoApi, DeleteTest) { - Query q = Query(default_namespace).Where("id", CondGe, {100}).Where("id", CondLe, {110}); - QueryResults qr(kResultsMsgPack | kResultsWithItemID); - Error err = client_->Delete(q, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() == 11); - ASSERT_EQ(qr.GetFlags() & kResultsFormatMask, kResultsMsgPack) << qr.GetFlags(); - - int id = 100; - for (auto it : qr) { - checkItem(it); - - reindexer::WrSerializer json; - reindexer::JsonBuilder jsonBuilder(json, reindexer::ObjType::TypeObject); - jsonBuilder.Put(kFieldId, id); - jsonBuilder.Put(kFieldA1, id * 2); - jsonBuilder.Put(kFieldA2, id * 3); - jsonBuilder.Put(kFieldA3, id * 4); - jsonBuilder.End(); - - auto item = it.GetItem(); - ASSERT_TRUE(item.GetJSON() == json.Slice()); - - ++id; - } -} diff --git a/cpp_src/gtests/tests/unit/namespace_test.cc b/cpp_src/gtests/tests/unit/namespace_test.cc deleted file mode 100644 index a02054345..000000000 --- a/cpp_src/gtests/tests/unit/namespace_test.cc +++ /dev/null @@ -1,2698 +0,0 @@ -#include -#include -#include "core/cbinding/resultserializer.h" -#include "core/cjson/ctag.h" -#include "core/cjson/jsonbuilder.h" -#include "core/cjson/msgpackbuilder.h" -#include "core/cjson/msgpackdecoder.h" -#include "core/defnsconfigs.h" -#include "estl/span.h" -#include "ns_api.h" -#include "tools/jsontools.h" -#include "tools/serializer.h" -#include "vendor/gason/gason.h" - -using QueryResults = ReindexerApi::QueryResults; -using Item = ReindexerApi::Item; -using Reindexer = ReindexerApi::Reindexer; - -TEST_F(NsApi, IndexDrop) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - DefineNamespaceDataset( - default_namespace, - {IndexDeclaration{idIdxName.c_str(), "hash", "int", IndexOpts().PK(), 0}, IndexDeclaration{"date", "", "int64", IndexOpts(), 0}, - IndexDeclaration{"price", "", "int64", IndexOpts(), 0}, IndexDeclaration{"serialNumber", "", "int64", IndexOpts(), 0}, - IndexDeclaration{"fileName", "", "string", IndexOpts(), 0}}); - - DefineNamespaceDataset(default_namespace, {IndexDeclaration{"ft11", "text", "string", IndexOpts(), 0}, - IndexDeclaration{"ft12", "text", "string", IndexOpts(), 0}, - IndexDeclaration{"ft11+ft12=ft13", "text", "composite", IndexOpts(), 0}}); - - DefineNamespaceDataset(default_namespace, {IndexDeclaration{"ft21", "text", "string", IndexOpts(), 0}, - IndexDeclaration{"ft22", "text", "string", IndexOpts(), 0}, - IndexDeclaration{"ft23", "text", "string", IndexOpts(), 0}, - IndexDeclaration{"ft21+ft22+ft23=ft24", "text", "composite", IndexOpts(), 0}}); - - for (int i = 0; i < 1000; ++i) { - Item item = NewItem(default_namespace); - item[idIdxName] = i; - item["data"] = rand(); - item["price"] = rand(); - item["serialNumber"] = i * 100; - item["fileName"] = "File" + std::to_string(i); - item["ft11"] = RandString(); - item["ft12"] = RandString(); - item["ft21"] = RandString(); - item["ft22"] = RandString(); - item["ft23"] = RandString(); - auto err = rt.reindexer->Insert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - } - - reindexer::IndexDef idef("price"); - err = rt.reindexer->DropIndex(default_namespace, idef); - EXPECT_TRUE(err.ok()) << err.what(); -} - -TEST_F(NsApi, AddTooManyIndexes) { - constexpr size_t kHalfOfStartNotCompositeIndexesCount = 80; - constexpr size_t kMaxCompositeIndexesCount = 100; - static const std::string ns = "too_many_indexes"; - Error err = rt.reindexer->OpenNamespace(ns); - ASSERT_TRUE(err.ok()) << err.what(); - - size_t notCompositeIndexesCount = 0; - size_t compositeIndexesCount = 0; - while (notCompositeIndexesCount < reindexer::kMaxIndexes - 1) { - reindexer::IndexDef idxDef; - if (notCompositeIndexesCount < 2 * kHalfOfStartNotCompositeIndexesCount || rand() % 4 != 0 || - compositeIndexesCount >= kMaxCompositeIndexesCount) { - const std::string indexName = "index_" + std::to_string(notCompositeIndexesCount); - idxDef = reindexer::IndexDef{indexName, {indexName}, "tree", "int", IndexOpts{}}; - ++notCompositeIndexesCount; - } else { - const std::string firstSubIndex = "index_" + std::to_string(rand() % kHalfOfStartNotCompositeIndexesCount); - const std::string secondSubIndex = - "index_" + std::to_string(rand() % kHalfOfStartNotCompositeIndexesCount + kHalfOfStartNotCompositeIndexesCount); - const std::string indexName = std::string(firstSubIndex).append("+").append(secondSubIndex); - idxDef = reindexer::IndexDef{indexName, {firstSubIndex, secondSubIndex}, "tree", "composite", IndexOpts{}}; - ++compositeIndexesCount; - } - err = rt.reindexer->AddIndex(ns, idxDef); - ASSERT_TRUE(err.ok()) << err.what(); - } - // Add composite index - std::string firstSubIndex = "index_" + std::to_string(rand() % kHalfOfStartNotCompositeIndexesCount); - std::string secondSubIndex = - "index_" + std::to_string(rand() % kHalfOfStartNotCompositeIndexesCount + kHalfOfStartNotCompositeIndexesCount); - std::string indexName = std::string(firstSubIndex).append("+").append(secondSubIndex); - err = rt.reindexer->AddIndex(ns, reindexer::IndexDef{indexName, {firstSubIndex, secondSubIndex}, "tree", "composite", IndexOpts{}}); - ASSERT_TRUE(err.ok()) << err.what(); - - // Add non-composite index - indexName = "index_" + std::to_string(notCompositeIndexesCount); - err = rt.reindexer->AddIndex(ns, reindexer::IndexDef{indexName, {indexName}, "tree", "int", IndexOpts{}}); - ASSERT_FALSE(err.ok()); - ASSERT_EQ(err.what(), - "Cannot add index 'too_many_indexes.index_255'. Too many non-composite indexes. 255 non-composite indexes are allowed only"); - - // Add composite index - firstSubIndex = "index_" + std::to_string(rand() % kHalfOfStartNotCompositeIndexesCount); - secondSubIndex = "index_" + std::to_string(rand() % kHalfOfStartNotCompositeIndexesCount + kHalfOfStartNotCompositeIndexesCount); - indexName = std::string(firstSubIndex).append("+").append(secondSubIndex); - err = rt.reindexer->AddIndex(ns, reindexer::IndexDef{indexName, {firstSubIndex, secondSubIndex}, "tree", "composite", IndexOpts{}}); - ASSERT_TRUE(err.ok()) << err.what(); -} - -TEST_F(NsApi, TruncateNamespace) { - TruncateNamespace([&](const std::string &nsName) { return rt.reindexer->TruncateNamespace(nsName); }); - TruncateNamespace([&](const std::string &nsName) { - QueryResults qr; - return rt.reindexer->Select("TRUNCATE " + nsName, qr); - }); -} - -TEST_F(NsApi, UpsertWithPrecepts) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - DefineNamespaceDataset(default_namespace, {IndexDeclaration{idIdxName.c_str(), "hash", "int", IndexOpts().PK(), 0}, - IndexDeclaration{updatedTimeSecFieldName.c_str(), "", "int64", IndexOpts(), 0}, - IndexDeclaration{updatedTimeMSecFieldName.c_str(), "", "int64", IndexOpts(), 0}, - IndexDeclaration{updatedTimeUSecFieldName.c_str(), "", "int64", IndexOpts(), 0}, - IndexDeclaration{updatedTimeNSecFieldName.c_str(), "", "int64", IndexOpts(), 0}, - IndexDeclaration{serialFieldName.c_str(), "", "int64", IndexOpts(), 0}, - IndexDeclaration{stringField.c_str(), "text", "string", IndexOpts(), 0}}); - - Item item = NewItem(default_namespace); - item[idIdxName] = idNum; - - { - // Set precepts - std::vector precepts = {updatedTimeSecFieldName + "=NOW()", updatedTimeMSecFieldName + "=NOW(msec)", - updatedTimeUSecFieldName + "=NOW(usec)", updatedTimeNSecFieldName + "=NOW(nsec)", - serialFieldName + "=SERIAL()", stringField + "=SERIAL()"}; - item.SetPrecepts(std::move(precepts)); - } - - // Upsert item a few times - for (int i = 0; i < upsertTimes; i++) { - auto err = rt.reindexer->Upsert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - } - - // Get item - reindexer::QueryResults res; - err = rt.reindexer->Select("SELECT * FROM " + default_namespace + " WHERE id=" + std::to_string(idNum), res); - ASSERT_TRUE(err.ok()) << err.what(); - - for (auto it : res) { - Item item = it.GetItem(false); - for (auto idx = 1; idx < item.NumFields(); idx++) { - auto field = item[idx].Name(); - - if (field == updatedTimeSecFieldName) { - int64_t value = item[field].Get(); - ASSERT_TRUE(reindexer::getTimeNow("sec") - value < 1) << "Precept function `now()/now(sec)` doesn't work properly"; - } else if (field == updatedTimeMSecFieldName) { - int64_t value = item[field].Get(); - ASSERT_TRUE(reindexer::getTimeNow("msec") - value < 1000) << "Precept function `now(msec)` doesn't work properly"; - } else if (field == updatedTimeUSecFieldName) { - int64_t value = item[field].Get(); - ASSERT_TRUE(reindexer::getTimeNow("usec") - value < 1000000) << "Precept function `now(usec)` doesn't work properly"; - } else if (field == updatedTimeNSecFieldName) { - int64_t value = item[field].Get(); - ASSERT_TRUE(reindexer::getTimeNow("nsec") - value < 1000000000) << "Precept function `now(nsec)` doesn't work properly"; - } else if (field == serialFieldName) { - int64_t value = item[field].Get(); - ASSERT_TRUE(value == upsertTimes) << "Precept function `serial()` didn't increment a value to " << upsertTimes << " after " - << upsertTimes << " upsert times"; - } else if (field == stringField) { - auto value = item[field].Get(); - ASSERT_TRUE(value == std::to_string(upsertTimes)) << "Precept function `serial()` didn't increment a value to " - << upsertTimes << " after " << upsertTimes << " upsert times"; - } - } - } -} - -TEST_F(NsApi, ReturnOfItemChange) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - DefineNamespaceDataset(default_namespace, {IndexDeclaration{idIdxName.c_str(), "hash", "int", IndexOpts().PK(), 0}, - IndexDeclaration{updatedTimeNSecFieldName.c_str(), "", "int64", IndexOpts(), 0}, - IndexDeclaration{serialFieldName.c_str(), "", "int64", IndexOpts(), 0}}); - - Item item = NewItem(default_namespace); - item[idIdxName] = idNum; - - { - // Set precepts - std::vector precepts = {updatedTimeNSecFieldName + "=NOW(nsec)", serialFieldName + "=SERIAL()"}; - item.SetPrecepts(std::move(precepts)); - } - - // Check Insert - err = rt.reindexer->Insert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - reindexer::QueryResults res1; - err = rt.reindexer->Select("SELECT * FROM " + default_namespace + " WHERE " + idIdxName + "=" + std::to_string(idNum), res1); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(res1.Count(), 1); - Item selectedItem = res1.begin().GetItem(false); - CheckItemsEqual(item, selectedItem); - - // Check Update - err = rt.reindexer->Update(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - reindexer::QueryResults res2; - err = rt.reindexer->Select("SELECT * FROM " + default_namespace + " WHERE " + idIdxName + "=" + std::to_string(idNum), res2); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(res2.Count(), 1); - selectedItem = res2.begin().GetItem(false); - CheckItemsEqual(item, selectedItem); - - // Check Delete - err = rt.reindexer->Delete(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - CheckItemsEqual(item, selectedItem); - - // Check Upsert - item[idIdxName] = idNum; - err = rt.reindexer->Upsert(default_namespace, item); - ASSERT_TRUE(err.ok()) << err.what(); - reindexer::QueryResults res3; - err = rt.reindexer->Select("SELECT * FROM " + default_namespace + " WHERE " + idIdxName + "=" + std::to_string(idNum), res3); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(res3.Count(), 1); - selectedItem = res3.begin().GetItem(false); - CheckItemsEqual(item, selectedItem); -} - -TEST_F(NsApi, UpdateIndex) { - Error err = rt.reindexer->InitSystemNamespaces(); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - DefineNamespaceDataset(default_namespace, {IndexDeclaration{idIdxName.c_str(), "hash", "int", IndexOpts().PK(), 0}}); - - auto const wrongIdx = reindexer::IndexDef(idIdxName, reindexer::JsonPaths{"wrongPath"}, "hash", "double", IndexOpts().PK()); - err = rt.reindexer->UpdateIndex(default_namespace, wrongIdx); - ASSERT_FALSE(err.ok()); - EXPECT_EQ(err.what(), "Unsupported combination of field 'id' type 'double' and index type 'hash'"); - - auto newIdx = reindexer::IndexDef(idIdxName, "tree", "int64", IndexOpts().PK().Dense()); - err = rt.reindexer->UpdateIndex(default_namespace, newIdx); - ASSERT_TRUE(err.ok()) << err.what(); - - std::vector nsDefs; - err = rt.reindexer->EnumNamespaces(nsDefs, reindexer::EnumNamespacesOpts()); - ASSERT_TRUE(err.ok()) << err.what(); - - auto nsDefIt = - std::find_if(nsDefs.begin(), nsDefs.end(), [&](const reindexer::NamespaceDef &nsDef) { return nsDef.name == default_namespace; }); - - ASSERT_TRUE(nsDefIt != nsDefs.end()) << "Namespace " + default_namespace + " is not found"; - - auto &indexes = nsDefIt->indexes; - auto receivedIdx = std::find_if(indexes.begin(), indexes.end(), [&](const reindexer::IndexDef &idx) { return idx.name_ == idIdxName; }); - ASSERT_TRUE(receivedIdx != indexes.end()) << "Expect index was created, but it wasn't"; - - reindexer::WrSerializer newIdxSer; - newIdx.GetJSON(newIdxSer); - - reindexer::WrSerializer receivedIdxSer; - receivedIdx->GetJSON(receivedIdxSer); - - auto newIdxJson = newIdxSer.Slice(); - auto receivedIdxJson = receivedIdxSer.Slice(); - - ASSERT_TRUE(newIdxJson == receivedIdxJson); -} - -TEST_F(NsApi, QueryperfstatsNsDummyTest) { - Error err = rt.reindexer->InitSystemNamespaces(); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - DefineNamespaceDataset(default_namespace, {IndexDeclaration{idIdxName.c_str(), "hash", "int", IndexOpts().PK(), 0}}); - - const char *const configNs = "#config"; - Item item = NewItem(configNs); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - std::string newConfig = R"json({ - "type":"profiling", - "profiling":{ - "queriesperfstats":true, - "queries_threshold_us":0, - "perfstats":true, - "memstats":true, - "long_queries_logging":{ - "select":{ - "threshold_us": 1000000, - "normalized": false - }, - "update_delete":{ - "threshold_us": 1000000, - "normalized": false - }, - "transaction":{ - "threshold_us": 1000000, - "avg_step_threshold_us": 1000 - } - } - } - })json"; - - err = item.FromJSON(newConfig); - ASSERT_TRUE(err.ok()) << err.what(); - - Upsert(configNs, item); - err = Commit(configNs); - ASSERT_TRUE(err.ok()) << err.what(); - - struct QueryPerformance { - std::string query; - double latencyStddev = 0; - int64_t minLatencyUs = 0; - int64_t maxLatencyUs = 0; - void Dump() const { - std::cout << "stddev: " << latencyStddev << std::endl; - std::cout << "min: " << minLatencyUs << std::endl; - std::cout << "max: " << maxLatencyUs << std::endl; - } - }; - - Query testQuery = Query(default_namespace, 0, 0, ModeAccurateTotal); - const std::string querySql(testQuery.GetSQL(true)); - - auto performSimpleQuery = [&]() { - QueryResults qr; - Error err = rt.reindexer->Select(testQuery, qr); - ASSERT_TRUE(err.ok()) << err.what(); - }; - - auto getPerformanceParams = [&](QueryPerformance &performanceRes) { - QueryResults qres; - Error err = rt.reindexer->Select(Query("#queriesperfstats").Where("query", CondEq, Variant(querySql)), qres); - ASSERT_TRUE(err.ok()) << err.what(); - if (qres.Count() == 0) { - QueryResults qr; - err = rt.reindexer->Select(Query("#queriesperfstats"), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_GT(qr.Count(), 0) << "#queriesperfstats table is empty!"; - for (auto &it : qr) { - std::cout << it.GetItem(false).GetJSON() << std::endl; - } - } - ASSERT_EQ(qres.Count(), 1) << "Expected 1 row for this query, got " << qres.Count(); - Item item = qres.begin().GetItem(false); - Variant val; - val = item["latency_stddev"]; - performanceRes.latencyStddev = static_cast(val); - val = item["min_latency_us"]; - performanceRes.minLatencyUs = val.As(); - val = item["max_latency_us"]; - performanceRes.maxLatencyUs = val.As(); - val = item["query"]; - performanceRes.query = val.As(); - }; - - sleep(1); - - QueryPerformance prevQperf; - for (size_t i = 0; i < 1000; ++i) { - performSimpleQuery(); - QueryPerformance qperf; - getPerformanceParams(qperf); - if ((qperf.minLatencyUs > qperf.maxLatencyUs) || (qperf.latencyStddev < 0) || (qperf.latencyStddev > qperf.maxLatencyUs)) { - qperf.Dump(); - } - ASSERT_TRUE(qperf.minLatencyUs <= qperf.maxLatencyUs); - ASSERT_TRUE((qperf.latencyStddev >= 0) && (qperf.latencyStddev <= qperf.maxLatencyUs)); - if (i > 0) { - ASSERT_TRUE(qperf.minLatencyUs <= prevQperf.minLatencyUs); - ASSERT_TRUE(qperf.maxLatencyUs >= prevQperf.maxLatencyUs); - } - ASSERT_TRUE(qperf.query == "SELECT COUNT(*) FROM test_namespace") << qperf.query; - prevQperf = qperf; - } -} - -static void checkIfItemJSONValid(QueryResults::Iterator &it, bool print = false) { - reindexer::WrSerializer wrser; - Error err = it.GetJSON(wrser, false); - ASSERT_TRUE(err.ok()) << err.what(); - if (err.ok() && print) std::cout << wrser.Slice() << std::endl; -} - -TEST_F(NsApi, TestUpdateIndexedField) { - DefineDefaultNamespace(); - FillDefaultNamespace(); - - QueryResults qrUpdate; - Query updateQuery{Query(default_namespace).Where(intField, CondGe, Variant(static_cast(500))).Set(stringField, "bingo!")}; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - - QueryResults qrAll; - err = rt.reindexer->Select(Query(default_namespace).Where(intField, CondGe, Variant(static_cast(500))), qrAll); - ASSERT_TRUE(err.ok()) << err.what(); - - for (auto it : qrAll) { - Item item = it.GetItem(false); - Variant val = item[stringField]; - ASSERT_TRUE(val.Type().Is()); - ASSERT_TRUE(val.As() == "bingo!") << val.As(); - checkIfItemJSONValid(it); - } -} - -TEST_F(NsApi, TestUpdateNonindexedField) { - DefineDefaultNamespace(); - AddUnindexedData(); - - QueryResults qrUpdate; - Query updateQuery{Query(default_namespace).Where("id", CondGe, Variant("1500")).Set("nested.bonus", static_cast(100500))}; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qrUpdate.Count(), 500); - - QueryResults qrAll; - err = rt.reindexer->Select(Query(default_namespace).Where("id", CondGe, Variant("1500")), qrAll); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qrAll.Count(), 500); - - for (auto it : qrAll) { - Item item = it.GetItem(false); - Variant val = item["nested.bonus"]; - ASSERT_TRUE(val.Type().Is()); - ASSERT_TRUE(val.As() == 100500); - checkIfItemJSONValid(it); - } -} - -TEST_F(NsApi, TestUpdateSparseField) { - DefineDefaultNamespace(); - AddUnindexedData(); - - QueryResults qrUpdate; - Query updateQuery{Query(default_namespace).Where("id", CondGe, Variant("1500")).Set("sparse_field", static_cast(100500))}; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qrUpdate.Count(), 500); - - QueryResults qrAll; - err = rt.reindexer->Select(Query(default_namespace).Where("id", CondGe, Variant("1500")), qrAll); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qrAll.Count(), 500); - - for (auto it : qrAll) { - Item item = it.GetItem(false); - Variant val = item["sparse_field"]; - ASSERT_TRUE(val.Type().Is()); - ASSERT_TRUE(val.As() == 100500); - checkIfItemJSONValid(it); - } -} - -// Test of the currious case: https://git.restream.ru/itv-backend/reindexer/-/issues/697 -// Updating entire object field and some indexed field at once. -TEST_F(NsApi, TestUpdateTwoFields) { - // Set and fill Database - DefineDefaultNamespace(); - FillDefaultNamespace(); - - // Try to update 2 fields at once: indexed field 'stringField' - // + adding and setting a new object-field called 'very_nested' - QueryResults qrUpdate; - Query updateQuery = Query(default_namespace) - .Where(idIdxName, CondEq, 1) - .Set(stringField, "Bingo!") - .SetObject("very_nested", R"({"id":111, "name":"successfully updated!"})"); - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - - // Make sure query worked well - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qrUpdate.Count(), 1); - - // Make sure: - // 1. JSON of the item is correct - // 2. every new set&updated field has a correct value - for (auto it : qrUpdate) { - checkIfItemJSONValid(it); - - Item item = it.GetItem(false); - Variant strField = item[stringField]; - EXPECT_TRUE(strField.Type().Is()); - EXPECT_TRUE(strField.As() == "Bingo!"); - - Variant nestedId = item["very_nested.id"]; - EXPECT_TRUE(nestedId.As() == 111); - - Variant nestedName = item["very_nested.name"]; - EXPECT_TRUE(nestedName.As() == "successfully updated!"); - } -} - -TEST_F(NsApi, TestUpdateNewFieldCheckTmVersion) { - DefineDefaultNamespace(); - FillDefaultNamespace(); - - auto check = [this](const Query &query, int tmVersion) { - QueryResults qrUpdate; - auto err = rt.reindexer->Update(query, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qrUpdate.Count(), 1); - ASSERT_EQ(qrUpdate.GetTagsMatcher(0).version(), tmVersion); - }; - - QueryResults qr; - Error err = rt.reindexer->Select(Query(default_namespace).Where(idIdxName, CondEq, 1), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - auto tmVersion = qr.GetTagsMatcher(0).version(); - Query updateQuery = Query(default_namespace).Where(idIdxName, CondEq, 1).Set("some_new_field", "some_value"); - - // Make sure the version increases by 1 when one new tag with non-object content is added - check(updateQuery, ++tmVersion); - - // Make sure the version not change when the same update query applied - check(updateQuery, tmVersion); - - updateQuery.SetObject("new_obj_field", R"({"id":111, "name":"successfully updated!"})"); - - // Make sure that tm version updates correctly when new tags are added: - // +1 by tag very_nested, - // +1 by all new tags processed in ItemModifier::modifyCJSON for SetObject-method - // +1 by merge of two corresponded tagsmatchers - check(updateQuery, tmVersion += 3); - - // Make sure that if no new tags were added to the tagsmatcher during the update, - // then the version of the tagsmatcher will not change - check(updateQuery, tmVersion); -} - -static void updateArrayField(const std::shared_ptr &reindexer, const std::string &ns, - const std::string &updateFieldPath, const VariantArray &values) { - QueryResults qrUpdate; - Query updateQuery{Query(ns).Where("id", CondGe, Variant("500")).Set(updateFieldPath, values)}; - Error err = reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_GT(qrUpdate.Count(), 0); - - QueryResults qrAll; - err = reindexer->Select(Query(ns).Where("id", CondGe, Variant("500")), qrAll); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qrAll.Count(), qrUpdate.Count()); - - for (auto it : qrAll) { - Item item = it.GetItem(false); - VariantArray val = item[updateFieldPath.c_str()]; - if (values.empty()) { - ASSERT_TRUE(val.size() == 1) << val.size(); - ASSERT_TRUE(val.IsNullValue()) << val.ArrayType().Name(); - } else { - ASSERT_TRUE(val.size() == values.size()) << val.size() << ":" << values.size(); - ASSERT_TRUE(val == values); - } - checkIfItemJSONValid(it); - } -} - -TEST_F(NsApi, TestUpdateNonindexedArrayField) { - DefineDefaultNamespace(); - AddUnindexedData(); - updateArrayField(rt.reindexer, default_namespace, "array_field", {}); - updateArrayField(rt.reindexer, default_namespace, "array_field", - {Variant(static_cast(3)), Variant(static_cast(4)), Variant(static_cast(5)), - Variant(static_cast(6))}); -} - -TEST_F(NsApi, TestUpdateNonindexedArrayField2) { - DefineDefaultNamespace(); - AddUnindexedData(); - - QueryResults qr; - Error err = rt.reindexer->Select(R"(update test_namespace set nested.bonus=[{"first":1,"second":2,"third":3}] where id = 1000;)", qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - Item item = qr.begin().GetItem(false); - std::string_view json = item.GetJSON(); - size_t pos = json.find(R"("nested":{"bonus":[{"first":1,"second":2,"third":3}])"); - ASSERT_TRUE(pos != std::string::npos) << "'nested.bonus' was not updated properly" << json; -} - -TEST_F(NsApi, TestUpdateNonindexedArrayField3) { - DefineDefaultNamespace(); - AddUnindexedData(); - - QueryResults qr; - Error err = - rt.reindexer->Select(R"(update test_namespace set nested.bonus=[{"id":1},{"id":2},{"id":3},{"id":4}] where id = 1000;)", qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - Item item = qr.begin().GetItem(false); - VariantArray val = item["nested.bonus"]; - ASSERT_TRUE(val.size() == 4); - - size_t length = 0; - std::string_view json = item.GetJSON(); - gason::JsonParser jsonParser; - ASSERT_NO_THROW(jsonParser.Parse(json, &length)); - ASSERT_TRUE(length > 0); - - size_t pos = json.find(R"("nested":{"bonus":[{"id":1},{"id":2},{"id":3},{"id":4}])"); - ASSERT_TRUE(pos != std::string::npos) << "'nested.bonus' was not updated properly" << json; -} - -TEST_F(NsApi, TestUpdateNonindexedArrayField4) { - DefineDefaultNamespace(); - AddUnindexedData(); - - QueryResults qr; - Error err = rt.reindexer->Select(R"(update test_namespace set nested.bonus=[0] where id = 1000;)", qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - Item item = qr.begin().GetItem(false); - std::string_view json = item.GetJSON(); - size_t pos = json.find(R"("nested":{"bonus":[0])"); - ASSERT_NE(pos, std::string::npos) << "'nested.bonus' was not updated properly" << json; -} - -TEST_F(NsApi, TestUpdateNonindexedArrayField5) { - DefineDefaultNamespace(); - AddUnindexedData(); - updateArrayField(rt.reindexer, default_namespace, "string_array", {}); - updateArrayField( - rt.reindexer, default_namespace, "string_array", - {Variant(std::string("one")), Variant(std::string("two")), Variant(std::string("three")), Variant(std::string("four"))}); - updateArrayField(rt.reindexer, default_namespace, "string_array", {Variant(std::string("single one"))}); -} - -TEST_F(NsApi, TestUpdateIndexedArrayField) { - DefineDefaultNamespace(); - FillDefaultNamespace(); - updateArrayField(rt.reindexer, default_namespace, indexedArrayField, - {Variant(7), Variant(8), Variant(9), Variant(10), Variant(11), Variant(12), Variant(13)}); -} - -TEST_F(NsApi, TestUpdateIndexedArrayField2) { - DefineDefaultNamespace(); - AddUnindexedData(); - - QueryResults qr; - VariantArray value; - value.emplace_back(static_cast(77)); - Query q{Query(default_namespace).Where(idIdxName, CondEq, static_cast(1000)).Set(indexedArrayField, std::move(value.MarkArray()))}; - Error err = rt.reindexer->Update(q, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - Item item = qr.begin().GetItem(false); - std::string_view json = item.GetJSON(); - size_t pos = json.find(R"("indexed_array_field":[77])"); - ASSERT_NE(pos, std::string::npos) << "'indexed_array_field' was not updated properly" << json; -} - -static void addAndSetNonindexedField(const std::shared_ptr &reindexer, const std::string &ns, - const std::string &updateFieldPath) { - QueryResults qrUpdate; - Query updateQuery{Query(ns).Where("nested.bonus", CondGe, Variant(500)).Set(updateFieldPath, static_cast(777))}; - Error err = reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - - QueryResults qrAll; - err = reindexer->Select(Query(ns).Where("nested.bonus", CondGe, Variant(500)), qrAll); - ASSERT_TRUE(err.ok()) << err.what(); - - for (auto it : qrAll) { - Item item = it.GetItem(false); - Variant val = item[updateFieldPath.c_str()]; - ASSERT_TRUE(val.Type().Is()); - ASSERT_TRUE(val.As() == 777); - checkIfItemJSONValid(it); - } -} - -TEST_F(NsApi, TestAddAndSetNonindexedField) { - DefineDefaultNamespace(); - AddUnindexedData(); - addAndSetNonindexedField(rt.reindexer, default_namespace, "nested3.extrabonus"); -} - -TEST_F(NsApi, TestAddAndSetNonindexedField2) { - DefineDefaultNamespace(); - AddUnindexedData(); - addAndSetNonindexedField(rt.reindexer, default_namespace, "nested2.nested3.extrabonus"); -} - -TEST_F(NsApi, TestAddAndSetNonindexedField3) { - DefineDefaultNamespace(); - AddUnindexedData(); - addAndSetNonindexedField(rt.reindexer, default_namespace, "nested3.nested4.extrabonus"); -} - -static void setAndCheckArrayItem(const std::shared_ptr &reindexer, const std::string &ns, - const std::string &fullItemPath, const std::string &jsonPath, int i = IndexValueType::NotSet, - int j = IndexValueType::NotSet) { - // Set array item to 777 - QueryResults qrUpdate; - Query updateQuery{Query(ns).Where("nested.bonus", CondGe, Variant(500)).Set(fullItemPath, static_cast(777))}; - Error err = reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - - // Get all items for the same query - QueryResults qrAll; - err = reindexer->Select(Query(ns).Where("nested.bonus", CondGe, Variant(500)), qrAll); - ASSERT_TRUE(err.ok()) << err.what(); - - const int kPricesSize = 3; - - // Check if array item with appropriate index equals to 777 and - // is a type of Int64. - auto checkItem = [](const VariantArray &values, size_t index) { - ASSERT_TRUE(index < values.size()); - ASSERT_TRUE(values[index].Type().Is()); - ASSERT_TRUE(values[index].As() == 777); - }; - - // Check every item according to it's index, where i is the index of parent's array - // and j is the index of a nested array: - // 1) objects[1].prices[0]: i = 1, j = 0 - // 2) objects[2].prices[*]: i = 2, j = IndexValueType::NotSet - // etc. - for (auto it : qrAll) { - Item item = it.GetItem(false); - checkIfItemJSONValid(it); - VariantArray values = item[jsonPath.c_str()]; - if (i == j && i == IndexValueType::NotSet) { - for (size_t i = 0; i < values.size(); ++i) { - checkItem(values, i); - } - } else if (i == IndexValueType::NotSet) { - for (int k = 0; k < kPricesSize; ++k) { - checkItem(values, k * kPricesSize + j); - } - } else if (j == IndexValueType::NotSet) { - for (int k = 0; k < kPricesSize; ++k) { - checkItem(values, i * kPricesSize + k); - } - } else { - checkItem(values, i * kPricesSize + j); - } - } -} - -TEST_F(NsApi, TestAddAndSetArrayField) { - // 1. Define NS - // 2. Fill NS - // 3. Set array item(s) value to 777 and check if it was set properly - DefineDefaultNamespace(); - AddUnindexedData(); - setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[0].prices[2]", "nested.nested_array.prices", 0, 2); - setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[2].nested.array[1]", "nested.nested_array.nested.array", 0, - 1); - setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[2].nested.array[*]", "nested.nested_array.nested.array", 0); - setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[1].prices[*]", "nested.nested_array.prices", 1); -} - -TEST_F(NsApi, TestAddAndSetArrayField2) { - // 1. Define NS - // 2. Fill NS - // 3. Set array item(s) value to 777 and check if it was set properly - DefineDefaultNamespace(); - AddUnindexedData(); - setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[*].prices[0]", "nested.nested_array.prices", - IndexValueType::NotSet, 0); - setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[*].name", "nested.nested_array.name"); -} - -TEST_F(NsApi, TestAddAndSetArrayField3) { - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); - - // 3. Set array item(s) value to 777 and check if it was set properly - QueryResults qrUpdate; - Query updateQuery{ - Query(default_namespace).Where("nested.bonus", CondGe, Variant(500)).Set("indexed_array_field[0]", static_cast(777))}; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - - // 4. Make sure each item's indexed_array_field[0] is of type Int and equal to 777 - for (auto it : qrUpdate) { - Item item = it.GetItem(false); - checkIfItemJSONValid(it); - VariantArray values = item[indexedArrayField]; - ASSERT_TRUE(values[0].Type().Is()); - ASSERT_TRUE(values[0].As() == 777); - } -} - -TEST_F(NsApi, TestAddAndSetArrayField4) { - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); - - // 3. Set array item(s) value to 777 and check if it was set properly - QueryResults qrUpdate; - Query updateQuery{ - Query(default_namespace).Where("nested.bonus", CondGe, Variant(500)).Set("indexed_array_field[*]", static_cast(777))}; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - - // 4. Make sure all items of indexed_array_field are of type Int and set to 777 - for (auto it : qrUpdate) { - Item item = it.GetItem(false); - checkIfItemJSONValid(it); - VariantArray values = item[indexedArrayField]; - ASSERT_TRUE(values.size() == 9); - for (size_t i = 0; i < values.size(); ++i) { - ASSERT_TRUE(values[i].Type().Is()); - ASSERT_TRUE(values[i].As() == 777); - } - } -} - -static void DropArrayItem(const std::shared_ptr &reindexer, const std::string &ns, const std::string &fullItemPath, - const std::string &jsonPath, int i = IndexValueType::NotSet, int j = IndexValueType::NotSet) { - // Drop item(s) with name = fullItemPath - QueryResults qrUpdate; - Query updateQuery{Query(ns).Where("nested.bonus", CondGe, Variant(500)).Drop(fullItemPath)}; - Error err = reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - - // Get all items of the same query - QueryResults qrAll; - err = reindexer->Select(Query(ns).Where("nested.bonus", CondGe, Variant(500)), qrAll); - ASSERT_TRUE(err.ok()) << err.what(); - - const int kPricesSize = 3; - - // Check every item according to it's index, where i is the index of parent's array - // and j is the index of a nested array: - // 1) objects[1].prices[0]: i = 1, j = 0 - // 2) objects[2].prices[*]: i = 2, j = IndexValueType::NotSet - // etc. - // Approach is to check array size (because after removing some of it's items - // it should decrease). - for (auto it : qrAll) { - checkIfItemJSONValid(it); - Item item = it.GetItem(false); - VariantArray values = item[jsonPath.c_str()]; - if (i == IndexValueType::NotSet && j == IndexValueType::NotSet) { - ASSERT_TRUE(values.size() == 0) << values.size(); - } else if (i == IndexValueType::NotSet || j == IndexValueType::NotSet) { - ASSERT_TRUE(int(values.size()) == kPricesSize * 2) << values.size(); - } else { - ASSERT_TRUE(int(values.size()) == kPricesSize * 3 - 1) << values.size(); - } - } -} - -TEST_F(NsApi, DropArrayField1) { - // 1. Define NS - // 2. Fill NS - // 3. Drop array item(s) and check it was properly removed - DefineDefaultNamespace(); - AddUnindexedData(); - DropArrayItem(rt.reindexer, default_namespace, "nested.nested_array[0].prices[0]", "nested.nested_array.prices", 0, 0); -} - -TEST_F(NsApi, DropArrayField2) { - // 1. Define NS - // 2. Fill NS - // 3. Drop array item(s) and check it was properly removed - DefineDefaultNamespace(); - AddUnindexedData(); - DropArrayItem(rt.reindexer, default_namespace, "nested.nested_array[1].prices[*]", "nested.nested_array.prices", 1); -} - -TEST_F(NsApi, DropArrayField3) { - // 1. Define NS - // 2. Fill NS - // 3. Drop array item(s) and check it was properly removed - DefineDefaultNamespace(); - AddUnindexedData(); - DropArrayItem(rt.reindexer, default_namespace, "nested.nested_array[*].prices[*]", "nested.nested_array.prices"); -} - -#if (0) // #1500 -TEST_F(NsApi, DropArrayField4) { - // 1. Define NS - // 2. Fill NS - // 3. Drop array item(s) and check it was properly removed - DefineDefaultNamespace(); - AddUnindexedData(); - DropArrayItem(rt.reindexer, default_namespace, "nested.nested_array[0].prices[((2+4)*2)/6]", "nested.nested_array.prices", 0, - ((2 + 4) * 2) / 6); -} -#endif - -TEST_F(NsApi, SetArrayFieldWithSql) { - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); - - // 3. Set all items of array to 777 - Query updateQuery = Query::FromSQL("update test_namespace set nested.nested_array[1].prices[*] = 777"); - QueryResults qrUpdate; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - - const int kElements = 3; - - // 4. Make sure all items of array nested.nested_array.prices are equal to 777 and of type Int - for (auto it : qrUpdate) { - Item item = it.GetItem(false); - VariantArray values = item["nested.nested_array.prices"]; - for (int i = 0; i < kElements; ++i) { - ASSERT_TRUE(values[kElements + i].As() == 777); - } - checkIfItemJSONValid(it); - } -} - -TEST_F(NsApi, DropArrayFieldWithSql) { - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); - - // 3. Drop all items of array nested.nested_array[1].prices - Query updateQuery = Query::FromSQL("update test_namespace drop nested.nested_array[1].prices[*]"); - QueryResults qrUpdate; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - - const int kElements = 3; - - // 4. Check if items were really removed - for (auto it : qrUpdate) { - Item item = it.GetItem(false); - VariantArray values = item["nested.nested_array.prices"]; - ASSERT_TRUE(values.size() == kElements * 2); - checkIfItemJSONValid(it); - } -} - -TEST_F(NsApi, ExtendArrayFromTopWithSql) { - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); - - // Append the following items: [88, 88, 88] to the top of the array array_field - Query updateQuery = Query::FromSQL("update test_namespace set array_field = [88,88,88] || array_field"); - QueryResults qrUpdate; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - - const int kElements = 3; - - // Check if these items were really added to array_field - for (auto it : qrUpdate) { - Item item = it.GetItem(false); - checkIfItemJSONValid(it); - VariantArray values = item["array_field"]; - ASSERT_TRUE(values.size() == kElements * 2); - for (int i = 0; i < kElements; ++i) { - ASSERT_TRUE(values[i].As() == 88); - } - } -} - -TEST_F(NsApi, AppendToArrayWithSql) { - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); - - // 3. Extend array_field with expression substantially - Query updateQuery = - Query::FromSQL("update test_namespace set array_field = array_field || objects.more[1].array[4] || [22,22,22] || [11]"); - QueryResults qrUpdate; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - - const int kElements = 3; - - // 4. Make sure all items of array have proper values - for (auto it : qrUpdate) { - Item item = it.GetItem(false); - checkIfItemJSONValid(it); - VariantArray values = item["array_field"]; - int i = 0; - ASSERT_TRUE(values.size() == kElements * 2 + 1 + 1); - for (; i < kElements; ++i) { - ASSERT_TRUE(values[i].As() == i + 1); - } - ASSERT_TRUE(values[i++].As() == 0); - for (; i < 7; ++i) { - ASSERT_TRUE(values[i].As() == 22); - } - ASSERT_TRUE(values[i].As() == 11); - } -} - -TEST_F(NsApi, ExtendArrayWithExpressions) { - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); - - // 3. Extend array_field with expression via Query builder - Query updateQuery = Query(default_namespace); - updateQuery.Set("array_field", - Variant(std::string("[88,88,88] || array_field || [99, 99, 99] || indexed_array_field || objects.more[1].array[4]")), - true); - QueryResults qrUpdate; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - - const int kElements = 3; - - // Check if array_field was modified properly - for (auto it : qrUpdate) { - Item item = it.GetItem(false); - checkIfItemJSONValid(it); - VariantArray values = item["array_field"]; - ASSERT_TRUE(values.size() == kElements * 3 + 9 + 1); - int i = 0; - for (; i < kElements; ++i) { - ASSERT_TRUE(values[i].As() == 88); - } - ASSERT_TRUE(values[i++].As() == 1); - ASSERT_TRUE(values[i++].As() == 2); - ASSERT_TRUE(values[i++].As() == 3); - for (; i < 9; ++i) { - ASSERT_TRUE(values[i].As() == 99); - } - for (int k = 1; k < 10; ++i, ++k) { - ASSERT_TRUE(values[i].As() == k * 11) << k << "; " << i << "; " << values[i].As(); - } - ASSERT_TRUE(values[i++].As() == 0); - } -} - -// Check if it's possible to use append operation with empty arrays and null fields -TEST_F(NsApi, ExtendEmptyArrayWithExpressions) { - const std::string kEmptyArraysNs = "empty_arrays_ns"; - CreateEmptyArraysNamespace(kEmptyArraysNs); - const Query kBaseQuery = Query(kEmptyArraysNs).Where("id", CondSet, {100, 105, 189, 113, 153}); - - auto ValidateResults = [this, &kBaseQuery, &kEmptyArraysNs](const QueryResults &qr, std::string_view pattern, std::string_view field, - const VariantArray &expectedValues, std::string_view description) { - const std::string fullDescription = "Description: " + std::string(description) + ";\n"; - // Check initial result - ASSERT_EQ(qr.Count(), 5) << fullDescription; - std::vector initialResults; - initialResults.reserve(qr.Count()); - for (auto it : qr) { - Item item = it.GetItem(false); - checkIfItemJSONValid(it); - const auto json = item.GetJSON(); - ASSERT_NE(json.find(pattern), std::string::npos) << fullDescription << "JSON: " << json << ";\npattern: " << pattern; - initialResults.emplace_back(json); - const VariantArray values = item[field]; - ASSERT_EQ(values.size(), expectedValues.size()) << fullDescription; - ASSERT_EQ(values.IsArrayValue(), expectedValues.IsArrayValue()) << fullDescription; - for (size_t i = 0; i < values.size(); ++i) { - ASSERT_TRUE(values[i].Type().IsSame(expectedValues[i].Type())) - << fullDescription << values[i].Type().Name() << "!=" << expectedValues[i].Type().Name(); - ASSERT_EQ(values[i], expectedValues[i]) << fullDescription; - } - } - // Check select results - QueryResults qrSelect; - const Query q = expectedValues.size() ? Query(kEmptyArraysNs).Where(std::string(field), CondAllSet, expectedValues) : kBaseQuery; - auto err = rt.reindexer->Select(q, qrSelect); - ASSERT_TRUE(err.ok()) << fullDescription << err.what(); - ASSERT_EQ(qrSelect.Count(), qr.Count()) << fullDescription; - unsigned i = 0; - for (auto it : qrSelect) { - Item item = it.GetItem(false); - checkIfItemJSONValid(it); - const auto json = item.GetJSON(); - ASSERT_EQ(json, initialResults[i++]) << fullDescription; - const VariantArray values = item[field]; - ASSERT_EQ(values.size(), expectedValues.size()) << fullDescription; - ASSERT_EQ(values.IsArrayValue(), expectedValues.IsArrayValue()) << fullDescription; - for (size_t j = 0; j < values.size(); ++j) { - ASSERT_TRUE(values[j].Type().IsSame(expectedValues[j].Type())) - << fullDescription << values[j].Type().Name() << "!=" << expectedValues[j].Type().Name(); - ASSERT_EQ(values[j], expectedValues[j]) << fullDescription; - } - } - }; - - { - const auto description = "append value to the empty indexed array"; - const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("indexed_array_field || [99, 99, 99]"), true); - QueryResults qr; - Error err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ValidateResults(qr, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", "indexed_array_field", - VariantArray{Variant(99), Variant(99), Variant(99)}, description); - } - { - const auto description = "append empty array to the indexed array"; - const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("indexed_array_field || []"), true); - QueryResults qr; - Error err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ValidateResults(qr, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", "indexed_array_field", - VariantArray{Variant(99), Variant(99), Variant(99)}, description); - } - { - const auto description = "append value to the empty non-indexed array"; - const Query query = Query(kBaseQuery).Set("non_indexed_array_field", Variant("non_indexed_array_field || [88, 88]"), true); - QueryResults qr; - Error err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ValidateResults(qr, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[88,88])", "non_indexed_array_field", - VariantArray{Variant(int64_t(88)), Variant(int64_t(88))}, description); - } - { - const auto description = "append empty array to the non-indexed array"; - const Query query = Query(kBaseQuery).Set("non_indexed_array_field", Variant("non_indexed_array_field || []"), true); - QueryResults qr; - Error err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ValidateResults(qr, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[88,88])", "non_indexed_array_field", - VariantArray{Variant(int64_t(88)), Variant(int64_t(88))}, description); - } - { - const auto description = "append empty array to the non-existing field"; - const Query query = Query(kBaseQuery).Set("non_existing_field", Variant("non_existing_field || []"), true); - QueryResults qr; - Error err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ValidateResults(qr, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[88,88],"non_existing_field":[])", - "non_existing_field", VariantArray().MarkArray(), description); - } - - { - const auto description = "append non-empty array to the non-existing field"; - const Query query = Query(kBaseQuery).Set("non_existing_field1", Variant("non_existing_field1 || [546]"), true); - QueryResults qr; - Error err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ValidateResults( - qr, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[88,88],"non_existing_field":[],"non_existing_field1":[546])", - "non_existing_field1", VariantArray{Variant(int64_t(546))}.MarkArray(), description); - } -} - -TEST_F(NsApi, UpdateObjectsArray) { - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); - - // 3. Update object array and change one of it's items - Query updateQuery = - Query::FromSQL(R"(update test_namespace set nested.nested_array[1] = {"id":1,"name":"modified", "prices":[4,5,6]})"); - QueryResults qrUpdate; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - - // 4. Make sure nested.nested_array[1] is set to a new value properly - for (auto it : qrUpdate) { - Item item = it.GetItem(false); - checkIfItemJSONValid(it); - ASSERT_TRUE(item.GetJSON().find(R"({"id":1,"name":"modified","prices":[4,5,6]})") != std::string::npos); - } -} - -TEST_F(NsApi, UpdateObjectsArray2) { - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); - - // 3. Set all items of the object array to a new value - Query updateQuery = Query::FromSQL(R"(update test_namespace set nested.nested_array[*] = {"ein":1,"zwei":2, "drei":3})"); - QueryResults qrUpdate; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - - // 4. Make sure all items of nested.nested_array are set to a new value correctly - for (auto it : qrUpdate) { - Item item = it.GetItem(false); - checkIfItemJSONValid(it); - ASSERT_TRUE(item.GetJSON().find( - R"("nested_array":[{"ein":1,"zwei":2,"drei":3},{"ein":1,"zwei":2,"drei":3},{"ein":1,"zwei":2,"drei":3}]})") != - std::string::npos); - } -} - -TEST_F(NsApi, UpdateObjectsArray3) { - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); - - // 3. Set all items of the object array to a new value via Query builder - Query updateQuery = Query(default_namespace); - updateQuery.SetObject("nested.nested_array[*]", Variant(std::string(R"({"ein":1,"zwei":2, "drei":3})")), false); - QueryResults qrUpdate; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); - - // 4. Make sure all items of nested.nested_array are set to a new value correctly - for (auto it : qrUpdate) { - Item item = it.GetItem(false); - checkIfItemJSONValid(it); - const auto json = item.GetJSON(); - ASSERT_NE(json.find(R"("nested_array":[{"ein":1,"zwei":2,"drei":3},{"ein":1,"zwei":2,"drei":3},{"ein":1,"zwei":2,"drei":3}]})"), - std::string::npos) - << json; - ASSERT_NE(json.find(R"("objects":[{"more":[{"array":[9,8,7,6,5]},{"array":[4,3,2,1,0]}]}])"), std::string::npos) << json; - } -} - -TEST_F(NsApi, UpdateObjectsArray4) { - // 1. Define NS - DefineDefaultNamespace(); - const std::vector indexTypes = {"regular", "sparse", "none"}; - constexpr char kIndexName[] = "objects.array.field"; - const Query kBaseQuery = Query(default_namespace).Where("id", CondSet, {1199, 1201, 1203, 1210, 1240}); - - auto ValidateResults = [this, &kBaseQuery](const QueryResults &qr, std::string_view pattern, std::string_view indexType, - std::string_view description) { - const std::string fullDescription = fmt::sprintf("Description: %s; %s;\n", description, indexType); - // Check initial result - ASSERT_EQ(qr.Count(), 5) << fullDescription; - std::vector initialResults; - initialResults.reserve(qr.Count()); - for (auto it : qr) { - Item item = it.GetItem(false); - checkIfItemJSONValid(it); - const auto json = item.GetJSON(); - ASSERT_NE(json.find(pattern), std::string::npos) << fullDescription << "JSON: " << json << ";\npattern: " << pattern; - initialResults.emplace_back(json); - } - // Check select results - QueryResults qrSelect; - auto err = rt.reindexer->Select(kBaseQuery, qrSelect); - ASSERT_TRUE(err.ok()) << fullDescription << err.what(); - ASSERT_EQ(qrSelect.Count(), qr.Count()) << fullDescription; - unsigned i = 0; - for (auto it : qrSelect) { - Item item = it.GetItem(false); - checkIfItemJSONValid(it); - const auto json = item.GetJSON(); - ASSERT_EQ(json, initialResults[i++]) << fullDescription; - } - }; - - for (const auto &index : indexTypes) { - Error err = rt.reindexer->TruncateNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - // 2. Refill NS - AddHeterogeniousNestedData(); - err = - rt.reindexer->DropIndex(default_namespace, reindexer::IndexDef(kIndexName, {kIndexName}, "hash", "int64", IndexOpts().Array())); - (void)err; // Error does not matter here - if (index != "none") { - err = rt.reindexer->AddIndex(default_namespace, reindexer::IndexDef(kIndexName, {kIndexName}, "hash", "int64", - IndexOpts().Array().Sparse(index == "sparse"))); - ASSERT_TRUE(err.ok()) << err.what(); - } - const std::string indexTypeMsg = fmt::sprintf("Index type is '%s'", index); - - { - const auto description = "Update array field, nested into objects array with explicit index (1 element)"; - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[0].field[4]", {777}, false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - ValidateResults(qr, R"("objects":[{"array":[{"field":[9,8,7,6,777]},{"field":11},{"field":[4,3,2,1,0]},{"field":[99]}]}])", - indexTypeMsg, description); - } - { - const auto description = "Update array field, nested into objects array with explicit index (1 element, different position)"; - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[2].field[3]", {8387}, false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - ValidateResults(qr, R"("objects":[{"array":[{"field":[9,8,7,6,777]},{"field":11},{"field":[4,3,2,8387,0]},{"field":[99]}]}])", - indexTypeMsg, description); - } - { - const auto description = "Update array field, nested into objects array without explicit index with scalar type"; - // Make sure, that internal field's type ('scalar') was not changed - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[1].field", {537}, false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - ValidateResults(qr, R"("objects":[{"array":[{"field":[9,8,7,6,777]},{"field":537},{"field":[4,3,2,8387,0]},{"field":[99]}]}])", - indexTypeMsg, description); - } - { - const auto description = "Update scalar field, nested into objects array with explicit index with array type"; - // Make sure, that internal field's type ('array') was not changed - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[3].field[0]", {999}, false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - ValidateResults(qr, R"("objects":[{"array":[{"field":[9,8,7,6,777]},{"field":537},{"field":[4,3,2,8387,0]},{"field":[999]}]}])", - indexTypeMsg, description); - } - { - const auto description = - "Update array field, nested into objects array without explicit index. Change field type from array[1] to scalar"; - // Make sure, that internal field's type (array of 1 element) was changed to scalar - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[3].field", {837}, false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - ValidateResults(qr, R"("objects":[{"array":[{"field":[9,8,7,6,777]},{"field":537},{"field":[4,3,2,8387,0]},{"field":837}]}])", - indexTypeMsg, description); - } - { - const auto description = - "Update array field, nested into objects array without explicit index. Change field type from array[4] to scalar"; - // Make sure, that internal field's type (array of 4 elements) was changed to scalar - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[0].field", {2345}, false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - ValidateResults(qr, R"("objects":[{"array":[{"field":2345},{"field":537},{"field":[4,3,2,8387,0]},{"field":837}]}])", - indexTypeMsg, description); - } - { - const auto description = - "Update array field, nested into objects array without explicit index. Change field type from scalar to array[1]"; - // Make sure, that internal field's type ('scalar') was changed to array - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[1].field", VariantArray{Variant{1847}}.MarkArray(), false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - ValidateResults(qr, R"("objects":[{"array":[{"field":2345},{"field":[1847]},{"field":[4,3,2,8387,0]},{"field":837}]}])", - indexTypeMsg, description); - } - { - const auto description = "Update array field, nested into objects array without explicit index. Increase array size"; - Query updateQuery = - Query(kBaseQuery).Set("objects[0].array[1].field", VariantArray{Variant{115}, Variant{1000}, Variant{501}}, false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - ValidateResults(qr, R"("objects":[{"array":[{"field":2345},{"field":[115,1000,501]},{"field":[4,3,2,8387,0]},{"field":837}]}])", - indexTypeMsg, description); - } - { - const auto description = - "Update array field, nested into objects array without explicit index. Reduce array size (to multiple elements)"; - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[1].field", VariantArray{Variant{100}, Variant{999}}, false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - ValidateResults(qr, R"("objects":[{"array":[{"field":2345},{"field":[100,999]},{"field":[4,3,2,8387,0]},{"field":837}]}])", - indexTypeMsg, description); - } - { - const auto description = - "Update array field, nested into objects array without explicit index. Reduce array size (to single element)"; - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[1].field", VariantArray{Variant{150}}.MarkArray(), false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - ValidateResults(qr, R"("objects":[{"array":[{"field":2345},{"field":[150]},{"field":[4,3,2,8387,0]},{"field":837}]}])", - indexTypeMsg, description); - } - { - const auto description = "Attempt to set array-value(1 element) by explicit index"; - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[1].field[0]", VariantArray{Variant{199}}.MarkArray(), false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_EQ(err.code(), errParams) << indexTypeMsg << err.what(); - - qr.Clear(); - err = rt.reindexer->Select(kBaseQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - // Make sure, that item was not changed - ValidateResults(qr, R"("objects":[{"array":[{"field":2345},{"field":[150]},{"field":[4,3,2,8387,0]},{"field":837}]}])", - indexTypeMsg, description); - } - { - const auto description = "Attempt to set array-value(multiple elements) by explicit index"; - VariantArray v{Variant{199}, Variant{200}, Variant{300}}; - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[1].field[0]", v, false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_EQ(err.code(), errParams) << indexTypeMsg << err.what(); - - qr.Clear(); - err = rt.reindexer->Select(kBaseQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - // Make sure, that item was not changed - ValidateResults(qr, R"("objects":[{"array":[{"field":2345},{"field":[150]},{"field":[4,3,2,8387,0]},{"field":837}]}])", - indexTypeMsg, description); - } - { - const auto description = "Attempt to set array-value(1 element) by *-index"; - VariantArray v{Variant{199}, Variant{200}, Variant{300}}; - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[1].field[*]", v, false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_EQ(err.code(), errParams) << indexTypeMsg << err.what(); - - qr.Clear(); - err = rt.reindexer->Select(kBaseQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - // Make sure, that item was not changed - ValidateResults(qr, R"("objects":[{"array":[{"field":2345},{"field":[150]},{"field":[4,3,2,8387,0]},{"field":837}]}])", - indexTypeMsg, description); - } - { - const auto description = "Attempt to set array-value(multiple elements) by *-index"; - VariantArray v{Variant{199}, Variant{200}, Variant{300}}; - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[1].field[*]", v, false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_EQ(err.code(), errParams) << indexTypeMsg << err.what(); - - qr.Clear(); - err = rt.reindexer->Select(kBaseQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - // Make sure, that item was not changed - ValidateResults(qr, R"("objects":[{"array":[{"field":2345},{"field":[150]},{"field":[4,3,2,8387,0]},{"field":837}]}])", - indexTypeMsg, description); - } - { - const auto description = "Update array field, nested into objects array with *-index"; - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[2].field[*]", {199}, false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - ValidateResults(qr, R"("objects":[{"array":[{"field":2345},{"field":[150]},{"field":[199,199,199,199,199]},{"field":837}]}])", - indexTypeMsg, description); - } - { - const auto description = "Attempt to update scalar value by *-index"; - VariantArray v{Variant{199}, Variant{200}, Variant{300}}; - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[0].field[*]", v, false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_EQ(err.code(), errParams) << indexTypeMsg << err.what(); - - qr.Clear(); - err = rt.reindexer->Select(kBaseQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - // Make sure, that item was not changed - ValidateResults(qr, R"("objects":[{"array":[{"field":2345},{"field":[150]},{"field":[199,199,199,199,199]},{"field":837}]}])", - indexTypeMsg, description); - } - { - const auto description = "Update array field, nested into objects array without explicit index. Reduce array size to 0"; - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[1].field", VariantArray().MarkArray(), false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - ValidateResults(qr, R"("objects":[{"array":[{"field":2345},{"field":[]},{"field":[199,199,199,199,199]},{"field":837}]}])", - indexTypeMsg, description); - } - { - const auto description = "Update array field, nested into objects array without explicit index. Increase array size from 0"; - VariantArray v{Variant{11199}, Variant{11200}, Variant{11300}}; - Query updateQuery = Query(kBaseQuery).Set("objects[0].array[1].field", v, false); - QueryResults qr; - err = rt.reindexer->Update(updateQuery, qr); - ASSERT_TRUE(err.ok()) << indexTypeMsg << err.what(); - ValidateResults( - qr, R"("objects":[{"array":[{"field":2345},{"field":[11199,11200,11300]},{"field":[199,199,199,199,199]},{"field":837}]}])", - indexTypeMsg, description); - } - } -} - -TEST_F(NsApi, UpdateArrayIndexFieldWithSeveralJsonPaths) { - struct Values { - std::vector valsList, newValsList; - }; - const int fieldsCnt = 5; - const int valsPerFieldCnt = 4; - std::vector fieldsValues(fieldsCnt); - for (int i = 0; i < fieldsCnt; ++i) { - for (int j = 0; j < valsPerFieldCnt; ++j) { - fieldsValues[i].valsList.emplace_back(fmt::sprintf("data%d%d", i, j)); - fieldsValues[i].newValsList.emplace_back(fmt::sprintf("data%d%d", i, j + i)); - } - } - - enum class OpT { Insert, Update }; - - auto makeFieldsList = [&fieldsValues](const reindexer::fast_hash_set &indexes, OpT type) { - auto quote = type == OpT::Insert ? '"' : '\''; - std::vector Values::*list = type == OpT::Insert ? &Values::valsList : &Values::newValsList; - const auto fieldsListTmplt = type == OpT::Insert ? R"("%sfield%d": [%s])" : R"(%sfield%d = [%s])"; - std::string fieldsList; - for (int idx : indexes) { - std::string fieldList; - for (const auto &data : fieldsValues[idx].*list) { - fieldList += std::string(fieldList.empty() ? "" : ", ") + quote + data + quote; - } - fieldsList += fmt::sprintf(fieldsListTmplt, fieldsList.empty() ? "" : ", ", idx, fieldList); - } - return fieldsList; - }; - - auto makeItem = [&makeFieldsList](int id, const reindexer::fast_hash_set &indexes) { - auto list = makeFieldsList(indexes, OpT::Insert); - return fmt::sprintf(R"({"id": %d%s})", id, (list.empty() ? "" : ", ") + list); - }; - - auto makeUpdate = [this, &makeFieldsList](int id, const reindexer::fast_hash_set &indexes) { - return fmt::sprintf("UPDATE %s SET %s WHERE id = %d", default_namespace, makeFieldsList(indexes, OpT::Update), id); - }; - - struct TestCase { - reindexer::fast_hash_set insertIdxs, updateIdxs; - auto expected() const { - auto res = insertIdxs; - res.insert(updateIdxs.begin(), updateIdxs.end()); - return res; - } - }; - - std::vector testCases{ - {{}, {0}}, - {{}, {2}}, - {{}, {0, 1, 2}}, - {{2, 3, 4}, {0}}, - {{3}, {0, 2, 4}}, - {{0, 3, 4}, {2, 1}}, - {{0, 1, 2, 3}, {4}}, - {{0, 1, 2}, {3, 4}}, - {{0, 2, 3}, {1, 4}}, - {{4}, {0, 1, 2, 3}}, - {{3, 4}, {0, 2, 1}}, - {{}, {0, 1, 2, 3, 4}}, - {{0, 1, 2, 3, 4}, {0}}, - {{0, 3, 4}, {0, 3, 4}}, - {{0, 1, 2}, {2, 3, 1}}, - {{0, 3, 4}, {2, 3, 4}}, - {{0, 1, 3}, {0, 1, 2, 3, 4}}, - {{0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}}, - }; - - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(default_namespace, {"array_index", reindexer::JsonPaths{"field0", "field1", "field2", "field3", "field4"}, - "hash", "string", IndexOpts().Array()}); - ASSERT_TRUE(err.ok()) << err.what(); - - for (size_t i = 0; i < testCases.size(); ++i) { - AddItemFromJSON(default_namespace, makeItem(i, testCases[i].insertIdxs)); - { - QueryResults qr; - err = rt.reindexer->Select(makeUpdate(i, testCases[i].updateIdxs), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - auto item = qr.begin().GetItem(false); - for (auto idx : testCases[i].expected()) { - int varArrCnt = 0; - for (auto &&var : VariantArray(item[fmt::sprintf("field%d", idx)])) { - const auto &data = testCases[i].updateIdxs.count(idx) ? fieldsValues[idx].newValsList : fieldsValues[idx].valsList; - ASSERT_EQ(var.As(), data[varArrCnt++]); - } - } - } - } - - // Check that prohibited updating an index array field with several json paths by index name - QueryResults qr; - err = rt.reindexer->Select(fmt::sprintf(R"(UPDATE %s SET array_index = ['data0', 'data1', 'data2'] WHERE id = 0)", default_namespace), - qr); - ASSERT_FALSE(err.ok()); - ASSERT_EQ(err.what(), "Ambiguity when updating field with several json paths by index name: 'array_index'"); -} - -TEST_F(NsApi, UpdateWithObjectAndFieldsDuplication) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(default_namespace, {"nested", reindexer::JsonPaths{"n.idv"}, "hash", "string", IndexOpts()}); - ASSERT_TRUE(err.ok()) << err.what(); - - std::vector items = {R"({"id":0,"data":"data0","n":{"idv":"index_str_1","dat":"data1"}})", - R"({"id":5,"data":"data5","n":{"idv":"index_str_3","dat":"data2"}})"}; - - AddItemFromJSON(default_namespace, items[0]); - AddItemFromJSON(default_namespace, items[1]); - - { - QueryResults qr; - err = rt.reindexer->Update(Query(default_namespace) - .SetObject("n", R"({"idv":"index_str_3_modified","idv":"index_str_5_modified","dat":"data2_mod"})") - .Where("id", CondEq, 5), - qr); - ASSERT_EQ(err.code(), errLogic) << err.what(); - } - { - // Check all the items - QueryResults qr; - err = rt.reindexer->Select(Query(default_namespace).Sort("id", false), qr); - ASSERT_EQ(qr.Count(), 2); - unsigned i = 0; - for (auto it : qr) { - ASSERT_EQ(it.GetItem().GetJSON(), items[i]) << i; - ++i; - } - } - { - // Check old indexed value (have to exists) - QueryResults qr; - err = rt.reindexer->Select(Query(default_namespace).Where("nested", CondEq, std::string("index_str_3")), qr); - ASSERT_EQ(qr.Count(), 1); - ASSERT_EQ(qr.begin().GetItem().GetJSON(), items[1]); - } - { - // Check new indexed values (have to not exist) - QueryResults qr; - err = rt.reindexer->Select( - Query(default_namespace).Where("nested", CondSet, {std::string("index_str_3_modified"), std::string("index_str_5_modified")}), - qr); - ASSERT_EQ(qr.Count(), 0); - } -} - -TEST_F(NsApi, UpdateOutOfBoundsArrayField) { - // Check, that item modifier does not allow to set value in the array with out of bound index - const int kTargetID = 1500; - - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); - - struct Case { - const std::string name; - const std::string baseUpdateExpr; - const std::vector arrayIdx; - const std::function createQueryF; - }; - const std::vector cases = { - {.name = "update-index-array-field", - .baseUpdateExpr = "indexed_array_field[%d]", - .arrayIdx = {9, 10, 100, 10000, 5000000}, - .createQueryF = - [&](const std::string &path) { - return Query(default_namespace).Where("id", CondEq, kTargetID).Set(path, static_cast(777)); - }}, - {.name = "update-non-indexed-array-field", - .baseUpdateExpr = "array_field[%d]", - .arrayIdx = {3, 4, 100, 10000, 5000000}, - .createQueryF = - [&](const std::string &path) { - return Query(default_namespace).Where("id", CondEq, kTargetID).Set(path, static_cast(777)); - }}, - {.name = "update-object-array-field", - .baseUpdateExpr = "nested.nested_array[%d]", - .arrayIdx = {3, 4, 100, 10000, 5000000}, - .createQueryF = [&](const std::string &path) { - return Query(default_namespace) - .Where("id", CondEq, kTargetID) - .SetObject(path, Variant(std::string(R"({"id":5,"name":"fifth", "prices":[3,5,5]})")), false); - }}}; - - for (auto &c : cases) { - TestCout() << c.name << std::endl; - - for (auto idx : c.arrayIdx) { - // 3. Get initial array value - std::string initialItemJSON; - { - QueryResults qr; - Error err = rt.reindexer->Select(Query(default_namespace).Where("id", CondEq, kTargetID), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - reindexer::WrSerializer ser; - err = qr.begin().GetJSON(ser, false); - ASSERT_TRUE(err.ok()) << err.what(); - initialItemJSON = ser.Slice(); - } - - // 4. Set item with out of bound index to specific value via Query builder - const auto path = fmt::sprintf(c.baseUpdateExpr, idx); - QueryResults qrUpdate; - const auto updateQuery = c.createQueryF(path); - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - EXPECT_FALSE(err.ok()) << path; - - { - // 5. Make sure, that item was not changed - QueryResults qr; - err = rt.reindexer->Select(Query(default_namespace).Where("id", CondEq, kTargetID), qr); - ASSERT_TRUE(err.ok()) << err.what() << "; " << path; - ASSERT_EQ(qr.Count(), 1) << path; - reindexer::WrSerializer ser; - err = qr.begin().GetJSON(ser, false); - ASSERT_TRUE(err.ok()) << err.what() << "; " << path; - ASSERT_EQ(initialItemJSON, ser.Slice()) << path; - } - } - } -} - -TEST_F(NsApi, AccessForIndexedArrayItem) { - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); - - // 3. Set indexed_array_field[0] to 777 - QueryResults qr; - Error err = rt.reindexer->Update(Query(default_namespace).Set("indexed_array_field[0]", Variant(int(777))), qr); - ASSERT_TRUE(err.ok()) << err.what(); - - // 4. Try to access elements of different arrays with Item object functionality - // to make sure if GetValueByJSONPath() works properly. - for (auto it : qr) { - checkIfItemJSONValid(it); - - reindexer::Item item = it.GetItem(false); - - Variant value1 = item["indexed_array_field[0]"]; - ASSERT_TRUE(value1.Type().Is()); - ASSERT_TRUE(static_cast(value1) == 777); - - Variant value2 = item["objects[0].more[0].array[0]"]; - ASSERT_TRUE(value2.Type().Is()); - ASSERT_TRUE(static_cast(value2) == 9); - - Variant value3 = item["objects[0].more[0].array[1]"]; - ASSERT_TRUE(value3.Type().Is()); - ASSERT_TRUE(static_cast(value3) == 8); - - Variant value4 = item["objects[0].more[0].array[2]"]; - ASSERT_TRUE(value4.Type().Is()); - ASSERT_TRUE(static_cast(value4) == 7); - - Variant value5 = item["objects[0].more[0].array[3]"]; - ASSERT_TRUE(value5.Type().Is()); - ASSERT_TRUE(static_cast(value5) == 6); - - Variant value6 = item["objects[0].more[0].array[4]"]; - ASSERT_TRUE(value6.Type().Is()); - ASSERT_TRUE(static_cast(value6) == 5); - - Variant value7 = item["nested.nested_array[1].prices[1]"]; - ASSERT_TRUE(value7.Type().Is()); - ASSERT_TRUE(static_cast(value7) == 5); - } -} - -TEST_F(NsApi, UpdateComplexArrayItem) { - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); - - // 3. Set objects[0].more[1].array[1] to 777 - QueryResults qr; - Error err = rt.reindexer->Update( - Query(default_namespace).Where(idIdxName, CondEq, Variant(1000)).Set("objects[0].more[1].array[1]", Variant(int64_t(777))), qr); - ASSERT_TRUE(err.ok()) << err.what(); - - // 4. Make sure the value of objects[0].more[1].array[1] which was updated above, - // can be accesses correctly with no problems. - for (auto it : qr) { - checkIfItemJSONValid(it); - - reindexer::Item item = it.GetItem(false); - - Variant value = item["objects[0].more[1].array[1]"]; - ASSERT_TRUE(value.Type().Is()); - ASSERT_TRUE(static_cast(value) == 777); - - Variant value2 = item["objects[0].more[1].array[2]"]; - ASSERT_TRUE(value2.Type().Is()); - ASSERT_TRUE(static_cast(value2) == 2); - } -} - -TEST_F(NsApi, CheckIndexedArrayItem) { - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); - - // 3. Select all items of the namespace - QueryResults qr; - Error err = rt.reindexer->Select(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); - - // 4. Check if the value of indexed array objects[0].more[1].array[1] - // can be accessed easily. - for (auto it : qr) { - checkIfItemJSONValid(it); - - reindexer::Item item = it.GetItem(false); - - Variant value = item["objects[0].more[1].array[1]"]; - ASSERT_TRUE(value.Type().Is()); - ASSERT_TRUE(static_cast(value) == 3); - - Variant value1 = item["objects[0].more[1].array[3]"]; - ASSERT_TRUE(value1.Type().Is()); - ASSERT_TRUE(static_cast(value1) == 1); - - Variant value2 = item["objects[0].more[0].array[4]"]; - ASSERT_TRUE(value2.Type().Is()); - ASSERT_TRUE(static_cast(value2) == 5); - } -} - -static void checkFieldConversion(const std::shared_ptr &reindexer, const std::string &ns, - const std::string &updateFieldPath, const VariantArray &newValue, const VariantArray &updatedValue, - reindexer::KeyValueType sourceType, bool expectFail) { - const Query selectQuery{Query(ns).Where("id", CondGe, Variant("500"))}; - QueryResults qrUpdate; - Query updateQuery = selectQuery; - updateQuery.Set(updateFieldPath, newValue); - Error err = reindexer->Update(updateQuery, qrUpdate); - if (expectFail) { - if (err.ok()) { - for (auto it : qrUpdate) checkIfItemJSONValid(it, true); - } - ASSERT_TRUE(!err.ok()); - } else { - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_GT(qrUpdate.Count(), 0); - - QueryResults qrAll; - err = reindexer->Select(selectQuery, qrAll); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qrAll.Count(), qrUpdate.Count()); - - for (auto it : qrAll) { - Item item = it.GetItem(false); - VariantArray val = item[updateFieldPath.c_str()]; - ASSERT_TRUE(val.size() == updatedValue.size()); - for (const Variant &v : val) { - ASSERT_TRUE(v.Type().IsSame(sourceType)) << v.Type().Name(); - } - ASSERT_TRUE(val == updatedValue); - checkIfItemJSONValid(it); - } - } -} - -TEST_F(NsApi, TestIntIndexedFieldConversion) { - DefineDefaultNamespace(); - FillDefaultNamespace(); - - checkFieldConversion(rt.reindexer, default_namespace, intField, {Variant(static_cast(13.33f))}, - {Variant(static_cast(13.33f))}, reindexer::KeyValueType::Int{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, intField, {Variant(static_cast(13))}, {Variant(static_cast(13))}, - reindexer::KeyValueType::Int{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, intField, {Variant(static_cast(false))}, {Variant(static_cast(0))}, - reindexer::KeyValueType::Int{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, intField, {Variant(static_cast(true))}, {Variant(static_cast(1))}, - reindexer::KeyValueType::Int{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, intField, {Variant(std::string("100500"))}, {Variant(static_cast(100500))}, - reindexer::KeyValueType::Int{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, intField, {Variant(std::string("Jesus Christ"))}, {Variant()}, - reindexer::KeyValueType::Int{}, true); -} - -TEST_F(NsApi, TestDoubleIndexedFieldConversion) { - DefineDefaultNamespace(); - FillDefaultNamespace(); - - checkFieldConversion(rt.reindexer, default_namespace, doubleField, {Variant(static_cast(13333))}, - {Variant(static_cast(13333.0f))}, reindexer::KeyValueType::Double{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, doubleField, {Variant(static_cast(13333))}, - {Variant(static_cast(13333.0f))}, reindexer::KeyValueType::Double{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, doubleField, {Variant(static_cast(false))}, - {Variant(static_cast(0.0f))}, reindexer::KeyValueType::Double{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, doubleField, {Variant(static_cast(true))}, - {Variant(static_cast(1.0f))}, reindexer::KeyValueType::Double{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, doubleField, {Variant(std::string("100500.1"))}, - {Variant(static_cast(100500.100000))}, reindexer::KeyValueType::Double{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, doubleField, {Variant(std::string("Jesus Christ"))}, {Variant()}, - reindexer::KeyValueType::Double{}, true); -} - -TEST_F(NsApi, TestBoolIndexedFieldConversion) { - DefineDefaultNamespace(); - FillDefaultNamespace(); - - checkFieldConversion(rt.reindexer, default_namespace, boolField, {Variant(static_cast(100500))}, {Variant(true)}, - reindexer::KeyValueType::Bool{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, boolField, {Variant(static_cast(100500))}, {Variant(true)}, - reindexer::KeyValueType::Bool{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, boolField, {Variant(static_cast(100500.1))}, {Variant(true)}, - reindexer::KeyValueType::Bool{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, boolField, {Variant(std::string("1"))}, {Variant(false)}, - reindexer::KeyValueType::Bool{}, false); - checkFieldConversion(rt.reindexer, default_namespace, boolField, {Variant(std::string("0"))}, {Variant(false)}, - reindexer::KeyValueType::Bool{}, false); - checkFieldConversion(rt.reindexer, default_namespace, boolField, {Variant(std::string("true"))}, {Variant(true)}, - reindexer::KeyValueType::Bool{}, false); - checkFieldConversion(rt.reindexer, default_namespace, boolField, {Variant(std::string("false"))}, {Variant(false)}, - reindexer::KeyValueType::Bool{}, false); -} - -TEST_F(NsApi, TestStringIndexedFieldConversion) { - DefineDefaultNamespace(); - FillDefaultNamespace(); - - checkFieldConversion(rt.reindexer, default_namespace, stringField, {Variant(static_cast(100500))}, {Variant("100500")}, - reindexer::KeyValueType::String{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, stringField, {Variant(true)}, {Variant(std::string("true"))}, - reindexer::KeyValueType::String{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, stringField, {Variant(false)}, {Variant(std::string("false"))}, - reindexer::KeyValueType::String{}, false); -} - -TEST_F(NsApi, TestIntNonindexedFieldConversion) { - DefineDefaultNamespace(); - AddUnindexedData(); - - checkFieldConversion(rt.reindexer, default_namespace, "nested.bonus", {Variant(static_cast(13.33f))}, - {Variant(static_cast(13.33f))}, reindexer::KeyValueType::Double{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, "nested.bonus", {Variant(static_cast(13))}, - {Variant(static_cast(13))}, reindexer::KeyValueType::Int64{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, "nested.bonus", {Variant(static_cast(false))}, - {Variant(static_cast(false))}, reindexer::KeyValueType::Bool{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, "nested.bonus", {Variant(static_cast(true))}, - {Variant(static_cast(true))}, reindexer::KeyValueType::Bool{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, "nested.bonus", {Variant(std::string("100500"))}, - {Variant(std::string("100500"))}, reindexer::KeyValueType::String{}, false); - - checkFieldConversion(rt.reindexer, default_namespace, "nested.bonus", {Variant(std::string("Jesus Christ"))}, - {Variant(std::string("Jesus Christ"))}, reindexer::KeyValueType::String{}, false); -} - -TEST_F(NsApi, TestIndexedArrayFieldConversion) { - DefineDefaultNamespace(); - FillDefaultNamespace(); - - checkFieldConversion( - rt.reindexer, default_namespace, indexedArrayField, - {Variant(static_cast(1.33f)), Variant(static_cast(2.33f)), Variant(static_cast(3.33f)), - Variant(static_cast(4.33f))}, - {Variant(static_cast(1)), Variant(static_cast(2)), Variant(static_cast(3)), Variant(static_cast(4))}, - reindexer::KeyValueType::Int{}, false); -} - -TEST_F(NsApi, TestNonIndexedArrayFieldConversion) { - DefineDefaultNamespace(); - AddUnindexedData(); - - VariantArray newValue = {Variant(3.33f), Variant(4.33), Variant(5.33), Variant(6.33)}; - checkFieldConversion(rt.reindexer, default_namespace, "array_field", newValue, newValue, reindexer::KeyValueType::Double{}, false); -} - -TEST_F(NsApi, TestUpdatePkFieldNoConditions) { - DefineDefaultNamespace(); - FillDefaultNamespace(); - - QueryResults qrCount; - Error err = rt.reindexer->Select("select count(*) from test_namespace", qrCount); - ASSERT_TRUE(err.ok()) << err.what(); - - QueryResults qr; - err = rt.reindexer->Select("update test_namespace set id = id + " + std::to_string(qrCount.TotalCount() + 100), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_GT(qr.Count(), 0); - - int i = 0; - for (auto &it : qr) { - Item item = it.GetItem(false); - Variant intFieldVal = item[idIdxName]; - ASSERT_EQ(static_cast(intFieldVal), i + qrCount.TotalCount() + 100); - i++; - } -} - -TEST_F(NsApi, TestUpdateIndexArrayWithNull) { - DefineDefaultNamespace(); - FillDefaultNamespace(); - - QueryResults qr; - Error err = rt.reindexer->Select("update test_namespace set indexed_array_field = null where id = 1;", qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - for (auto &it : qr) { - Item item = it.GetItem(false); - VariantArray fieldVal = item[indexedArrayField]; - ASSERT_TRUE(fieldVal.empty()); - } -} - -TEST_F(NsApi, TestUpdateIndexToSparse) { - Error err = rt.reindexer->InitSystemNamespaces(); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - const std::string compIndexName = idIdxName + "+" + stringField; - - DefineNamespaceDataset(default_namespace, {IndexDeclaration{idIdxName.c_str(), "hash", "int", IndexOpts().PK(), 0}, - IndexDeclaration{intField.c_str(), "hash", "int", IndexOpts(), 0}, - IndexDeclaration{stringField.c_str(), "hash", "string", IndexOpts(), 0}, - IndexDeclaration{compIndexName.c_str(), "hash", "composite", IndexOpts(), 0}}); - Item item = NewItem(default_namespace); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - const int i = rand() % 20; - item[idIdxName] = i * 2; - item[intField] = i; - item[stringField] = "str_" + std::to_string(i * 5); - Upsert(default_namespace, item); - err = Commit(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - QueryResults qr; - err = rt.reindexer->Select(Query(default_namespace).Where(intField, CondEq, i), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - qr.Clear(); - err = rt.reindexer->Select( - Query(default_namespace) - .WhereComposite(compIndexName, CondEq, {reindexer::VariantArray{Variant{i * 2}, Variant{"str_" + std::to_string(i * 5)}}}), - qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - auto newIdx = reindexer::IndexDef(intField, "hash", "int", IndexOpts().Sparse()); - err = rt.reindexer->UpdateIndex(default_namespace, newIdx); - ASSERT_TRUE(err.ok()) << err.what(); - - qr.Clear(); - err = rt.reindexer->Select(Query(default_namespace).Where(intField, CondEq, i), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - qr.Clear(); - err = rt.reindexer->Select( - Query(default_namespace) - .WhereComposite(compIndexName, CondEq, {reindexer::VariantArray{Variant{i * 2}, Variant{"str_" + std::to_string(i * 5)}}}), - qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - newIdx = reindexer::IndexDef(compIndexName, {idIdxName, stringField}, "hash", "composite", IndexOpts().Sparse()); - err = rt.reindexer->UpdateIndex(default_namespace, newIdx); - ASSERT_EQ(err.code(), errParams) << err.what(); - ASSERT_EQ(err.what(), "Composite index cannot be sparse. Use non-sparse composite instead"); - // Sparse composite do not have any purpose, so just make sure this index was not affected by updateIndex - - qr.Clear(); - err = rt.reindexer->Select(Query(default_namespace).Where(intField, CondEq, i), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - qr.Clear(); - err = rt.reindexer->Select( - Query(default_namespace) - .WhereComposite(compIndexName, CondEq, {reindexer::VariantArray{Variant{i * 2}, Variant{"str_" + std::to_string(i * 5)}}}), - qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - newIdx = reindexer::IndexDef(intField, "hash", "int", IndexOpts()); - err = rt.reindexer->UpdateIndex(default_namespace, newIdx); - ASSERT_TRUE(err.ok()) << err.what(); - - qr.Clear(); - err = rt.reindexer->Select(Query(default_namespace).Where(intField, CondEq, i), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - qr.Clear(); - err = rt.reindexer->Select( - Query(default_namespace) - .WhereComposite(compIndexName, CondEq, {reindexer::VariantArray{Variant{i * 2}, Variant{"str_" + std::to_string(i * 5)}}}), - qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - newIdx = reindexer::IndexDef(compIndexName, {idIdxName, stringField}, "hash", "composite", IndexOpts()); - err = rt.reindexer->UpdateIndex(default_namespace, newIdx); - ASSERT_TRUE(err.ok()) << err.what(); - - qr.Clear(); - err = rt.reindexer->Select(Query(default_namespace).Where(intField, CondEq, i), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - qr.Clear(); - err = rt.reindexer->Select( - Query(default_namespace) - .WhereComposite(compIndexName, CondEq, {reindexer::VariantArray{Variant{i * 2}, Variant{"str_" + std::to_string(i * 5)}}}), - qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); -} - -TEST_F(NsApi, TestUpdateNonIndexFieldWithNull) { - DefineDefaultNamespace(); - AddUnindexedData(); - - QueryResults qr; - Error err = rt.reindexer->Select("update test_namespace set extra = null where id = 1001;", qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - for (auto &it : qr) { - Item item = it.GetItem(false); - Variant fieldVal = item["extra"]; - ASSERT_TRUE(fieldVal.Type().Is()); - } -} - -TEST_F(NsApi, TestUpdateIndexedFieldWithNull) { - DefineDefaultNamespace(); - FillDefaultNamespace(); - - QueryResults qr; - Error err = rt.reindexer->Select("update test_namespace set string_field = null where id = 1;", qr); - EXPECT_TRUE(!err.ok()); -} - -TEST_F(NsApi, TestUpdateEmptyArrayField) { - DefineDefaultNamespace(); - FillDefaultNamespace(); - - QueryResults qr; - Error err = rt.reindexer->Select("update test_namespace set indexed_array_field = [] where id = 1;", qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - Item item = qr.begin().GetItem(false); - Variant idFieldVal = item[idIdxName]; - ASSERT_TRUE(static_cast(idFieldVal) == 1); - - VariantArray arrayFieldVal = item[indexedArrayField]; - ASSERT_TRUE(arrayFieldVal.empty()); -} - -// Update 2 fields with one query in this order: object field, ordinary field of type String -// https://git.restream.ru/itv-backend/reindexer/-/tree/issue_777 -TEST_F(NsApi, TestUpdateObjectFieldWithScalar) { - // Define namespace's schema and fill with data - DefineDefaultNamespace(); - AddUnindexedData(); - - // Prepare and execute Update query - QueryResults qr; - Query q = Query(default_namespace) - .Set("int_field", 7) - .Set("extra", 8) - .SetObject("nested2", Variant(std::string(R"({"bonus2":13,"extra2":"new"})"))); - Error err = rt.reindexer->Update(q, qr); - // Make sure it executed successfully - ASSERT_TRUE(err.ok()) << err.what(); - - // Check in the loop that all the updated fields have correct values - for (auto it : qr) { - reindexer::Item item = it.GetItem(false); - - Variant intVal = item["int_field"]; - ASSERT_TRUE(intVal.Type().Is()); - ASSERT_TRUE(intVal.As() == 7); - Variant extraVal = item["extra"]; - ASSERT_TRUE(extraVal.Type().Is()); - ASSERT_TRUE(extraVal.As() == 8); - - std::string_view json = item.GetJSON(); - std::string_view::size_type pos = json.find(R"("nested2":{"bonus2":13,"extra2":"new"})"); - ASSERT_TRUE(pos != std::string_view::npos); - - Variant bonus2Val = item["nested2.bonus2"]; - ASSERT_TRUE(bonus2Val.Type().Is()); - ASSERT_TRUE(bonus2Val.As() == 13); - Variant extra2Val = item["nested2.extra2"]; - ASSERT_TRUE(extra2Val.Type().Is()); - ASSERT_TRUE(extra2Val.As() == "new"); - } -} - -TEST_F(NsApi, TestUpdateEmptyIndexedField) { - DefineDefaultNamespace(); - AddUnindexedData(); - - QueryResults qr; - Query q = Query(default_namespace) - .Where("id", CondEq, Variant(1001)) - .Set(emptyField, Variant("NEW GENERATION")) - .Set(indexedArrayField, {Variant(static_cast(4)), Variant(static_cast(5)), Variant(static_cast(6))}); - Error err = rt.reindexer->Update(q, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - - QueryResults qr2; - err = rt.reindexer->Select("select * from test_namespace where id = 1001;", qr2); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr2.Count(), 1); - for (auto it : qr2) { - Item item = it.GetItem(false); - - Variant val = item[emptyField]; - ASSERT_TRUE(val.As() == "NEW GENERATION"); - - std::string_view json = item.GetJSON(); - ASSERT_TRUE(json.find_first_of("\"empty_field\":\"NEW GENERATION\"") != std::string::npos); - - VariantArray arrayVals = item[indexedArrayField]; - ASSERT_TRUE(arrayVals.size() == 3); - ASSERT_TRUE(arrayVals[0].As() == 4); - ASSERT_TRUE(arrayVals[1].As() == 5); - ASSERT_TRUE(arrayVals[2].As() == 6); - } -} - -TEST_F(NsApi, TestDropField) { - DefineDefaultNamespace(); - AddUnindexedData(); - - QueryResults qr; - Error err = rt.reindexer->Select("update test_namespace drop extra where id >= 1000 and id < 1010;", qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 10); - - for (auto it : qr) { - Item item = it.GetItem(false); - VariantArray val = item["extra"]; - EXPECT_TRUE(val.empty()); - EXPECT_TRUE(item.GetJSON().find("extra") == std::string::npos); - } - - QueryResults qr2; - err = rt.reindexer->Select("update test_namespace drop nested.bonus where id >= 1005 and id < 1010;", qr2); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr2.Count(), 5); - - for (auto it : qr2) { - Item item = it.GetItem(false); - VariantArray val = item["nested.bonus"]; - EXPECT_TRUE(val.empty()); - EXPECT_TRUE(item.GetJSON().find("nested.bonus") == std::string::npos); - } - - QueryResults qr3; - err = rt.reindexer->Select("update test_namespace drop string_field where id >= 1000 and id < 1010;", qr3); - ASSERT_TRUE(!err.ok()); - - QueryResults qr4; - err = rt.reindexer->Select("update test_namespace drop nested2 where id >= 1030 and id <= 1040;", qr4); - ASSERT_TRUE(err.ok()) << err.what(); - for (auto it : qr4) { - Item item = it.GetItem(false); - EXPECT_TRUE(item.GetJSON().find("nested2") == std::string::npos); - } -} - -TEST_F(NsApi, TestUpdateFieldWithFunction) { - DefineDefaultNamespace(); - FillDefaultNamespace(); - - int64_t updateTime = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); - - QueryResults qr; - Error err = rt.reindexer->Select( - "update test_namespace set int_field = SERIAL(), extra = SERIAL(), nested.timeField = NOW(msec) where id >= 0;", qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_GT(qr.Count(), 0); - - int i = 1; - for (auto &it : qr) { - Item item = it.GetItem(false); - Variant intFieldVal = item[intField]; - Variant extraFieldVal = item["extra"]; - Variant timeFieldVal = item["nested.timeField"]; - ASSERT_TRUE(intFieldVal.As() == i++) << intFieldVal.As(); - ASSERT_TRUE(intFieldVal.As() == extraFieldVal.As()) << extraFieldVal.As(); - ASSERT_TRUE(timeFieldVal.As() >= updateTime); - } -} - -TEST_F(NsApi, TestUpdateFieldWithExpressions) { - DefineDefaultNamespace(); - FillDefaultNamespace(); - - QueryResults qr; - Error err = rt.reindexer->Select( - "update test_namespace set int_field = ((7+8)*(4-3))/3, extra = (SERIAL() + 1)*3, nested.timeField = int_field - 1 where id >= " - "0;", - qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_GT(qr.Count(), 0); - - int i = 1; - for (auto &it : qr) { - Item item = it.GetItem(false); - Variant intFieldVal = item[intField]; - Variant extraFieldVal = item["extra"]; - Variant timeFieldVal = item["nested.timeField"]; - ASSERT_TRUE(intFieldVal.As() == 5) << intFieldVal.As(); - ASSERT_TRUE(extraFieldVal.As() == (i + 1) * 3) << extraFieldVal.As(); - ASSERT_TRUE(timeFieldVal.As() == 4) << timeFieldVal.As(); - ++i; - } -} - -static void checkQueryDsl(const Query &src) { - Query dst; - const std::string dsl = src.GetJSON(); - Error err = dst.FromJSON(dsl); - EXPECT_TRUE(err.ok()) << err.what(); - bool objectValues = false; - if (src.UpdateFields().size() > 0) { - EXPECT_TRUE(src.UpdateFields().size() == dst.UpdateFields().size()); - for (size_t i = 0; i < src.UpdateFields().size(); ++i) { - if (src.UpdateFields()[i].Mode() == FieldModeSetJson) { - ASSERT_EQ(src.UpdateFields()[i].Values().size(), 1); - EXPECT_TRUE(src.UpdateFields()[i].Values().front().Type().Is()); - ASSERT_EQ(dst.UpdateFields()[i].Values().size(), 1); - EXPECT_TRUE(dst.UpdateFields()[i].Values().front().Type().Is()); - reindexer::WrSerializer wrser1; - reindexer::prettyPrintJSON(reindexer::giftStr(std::string_view(src.UpdateFields()[i].Values().front())), wrser1); - reindexer::WrSerializer wrser2; - reindexer::prettyPrintJSON(reindexer::giftStr(std::string_view(dst.UpdateFields()[i].Values().front())), wrser2); - EXPECT_TRUE(wrser1.Slice() == wrser2.Slice()); - objectValues = true; - } - } - } - if (objectValues) { - EXPECT_EQ(src.Entries(), dst.Entries()); - EXPECT_EQ(src.aggregations_, dst.aggregations_); - EXPECT_EQ(src.NsName(), dst.NsName()); - EXPECT_EQ(src.sortingEntries_, dst.sortingEntries_); - EXPECT_EQ(src.CalcTotal(), dst.CalcTotal()); - EXPECT_EQ(src.Offset(), dst.Offset()); - EXPECT_EQ(src.Limit(), dst.Limit()); - EXPECT_EQ(src.GetDebugLevel(), dst.GetDebugLevel()); - EXPECT_EQ(src.GetStrictMode(), dst.GetStrictMode()); - EXPECT_EQ(src.forcedSortOrder_, dst.forcedSortOrder_); - EXPECT_EQ(src.SelectFilters(), dst.SelectFilters()); - EXPECT_EQ(src.selectFunctions_, dst.selectFunctions_); - EXPECT_EQ(src.GetJoinQueries(), dst.GetJoinQueries()); - EXPECT_EQ(src.GetMergeQueries(), dst.GetMergeQueries()); - } else { - EXPECT_EQ(dst, src); - } -} - -TEST_F(NsApi, TestModifyQueriesSqlEncoder) { - const std::string sqlUpdate = - "UPDATE ns SET field1 = 'mrf',field2 = field2+1,field3 = ['one','two','three','four','five'] WHERE a = true AND location = " - "'msk'"; - Query q1 = Query::FromSQL(sqlUpdate); - EXPECT_EQ(q1.GetSQL(), sqlUpdate); - checkQueryDsl(q1); - - const std::string sqlDrop = "UPDATE ns DROP field1,field2 WHERE a = true AND location = 'msk'"; - Query q2 = Query::FromSQL(sqlDrop); - EXPECT_EQ(q2.GetSQL(), sqlDrop); - checkQueryDsl(q2); - - const std::string sqlUpdateWithObject = - R"(UPDATE ns SET field = {"id":0,"name":"apple","price":1000,"nested":{"n_id":1,"desription":"good","array":[{"id":1,"description":"first"},{"id":2,"description":"second"},{"id":3,"description":"third"}]},"bonus":7} WHERE a = true AND location = 'msk')"; - Query q3 = Query::FromSQL(sqlUpdateWithObject); - EXPECT_EQ(q3.GetSQL(), sqlUpdateWithObject); - checkQueryDsl(q3); - - const std::string sqlTruncate = R"(TRUNCATE ns)"; - Query q4 = Query::FromSQL(sqlTruncate); - EXPECT_EQ(q4.GetSQL(), sqlTruncate); - checkQueryDsl(q4); - - const std::string sqlArrayAppend = R"(UPDATE ns SET array = array||[1,2,3]||array2||objects[0].nested.prices[0])"; - Query q5 = Query::FromSQL(sqlArrayAppend); - EXPECT_EQ(q5.GetSQL(), sqlArrayAppend); - checkQueryDsl(q5); - - const std::string sqlIndexUpdate = R"(UPDATE ns SET objects[0].nested.prices[*] = 'NE DOROGO!')"; - Query q6 = Query::FromSQL(sqlIndexUpdate); - EXPECT_EQ(q6.GetSQL(), sqlIndexUpdate); - checkQueryDsl(q6); - - const std::string sqlSpeccharsUpdate = R"(UPDATE ns SET f1 = 'HELLO\n\r\b\f',f2 = '\t',f3 = '\"')"; - Query q7 = Query::FromSQL(sqlSpeccharsUpdate); - EXPECT_EQ(q7.GetSQL(), sqlSpeccharsUpdate); - checkQueryDsl(q7); -} - -static void generateObject(reindexer::JsonBuilder &builder, const std::string &prefix, ReindexerApi *rtapi) { - builder.Put(prefix + "ID", rand() % 1000); - builder.Put(prefix + "Name", rtapi->RandString()); - builder.Put(prefix + "Rating", rtapi->RandString()); - builder.Put(prefix + "Description", rtapi->RandString()); - builder.Put(prefix + "Price", rand() % 1000 + 100); - builder.Put(prefix + "IMDB", 7.77777777777f); - builder.Put(prefix + "Subsription", bool(rand() % 100 > 50 ? 1 : 0)); - { - auto idsArray = builder.Array(prefix + "IDS"); - for (auto id : rtapi->RandIntVector(10, 10, 1000)) idsArray.Put(0, id); - } - { - auto homogeneousArray = builder.Array(prefix + "HomogeneousValues"); - for (int i = 0; i < 20; ++i) { - if (i % 2 == 0) { - homogeneousArray.Put(0, rand()); - } else { - if (i % 5 == 0) { - homogeneousArray.Put(0, 234.778f); - } else { - homogeneousArray.Put(0, rtapi->RandString()); - } - } - } - } -} - -void addObjectsArray(reindexer::JsonBuilder &builder, bool withInnerArray, ReindexerApi *rtapi) { - size_t size = rand() % 10 + 5; - reindexer::JsonBuilder array = builder.Array("object"); - for (size_t i = 0; i < size; ++i) { - reindexer::JsonBuilder obj = array.Object(0); - generateObject(obj, "item", rtapi); - if (withInnerArray && i % 5 == 0) { - addObjectsArray(obj, false, rtapi); - } - } -} - -TEST_F(NsApi, MsgPackEncodingTest) { - DefineDefaultNamespace(); - - reindexer::WrSerializer wrSer1; - - std::vector items; - for (int i = 0; i < 100; ++i) { - reindexer::WrSerializer wrser; - reindexer::JsonBuilder jsonBuilder(wrser); - jsonBuilder.Put("id", i); - jsonBuilder.Put("sparse_field", rand() % 1000); - jsonBuilder.Put("superID", i * 2); - jsonBuilder.Put("superName", RandString()); - { - auto priceArray = jsonBuilder.Array("superPrices"); - for (auto price : RandIntVector(10, 10, 1000)) priceArray.Put(0, price); - } - { - reindexer::JsonBuilder objectBuilder = jsonBuilder.Object("nested1"); - generateObject(objectBuilder, "nested1", this); - addObjectsArray(objectBuilder, true, this); - } - jsonBuilder.Put("superBonus", RuRandString()); - addObjectsArray(jsonBuilder, false, this); - jsonBuilder.End(); - - Item item = NewItem(default_namespace); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - Error err = item.FromJSON(wrser.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - Upsert(default_namespace, item); - - reindexer::WrSerializer wrSer2; - err = item.GetMsgPack(wrSer2); - ASSERT_TRUE(err.ok()) << err.what(); - - err = item.GetMsgPack(wrSer1); - ASSERT_TRUE(err.ok()) << err.what(); - - size_t offset = 0; - Item item2 = NewItem(default_namespace); - err = item2.FromMsgPack(std::string_view(reinterpret_cast(wrSer2.Buf()), wrSer2.Len()), offset); - ASSERT_TRUE(err.ok()) << err.what(); - - std::string json1(item.GetJSON()); - std::string json2(item2.GetJSON()); - ASSERT_TRUE(json1 == json2); - items.emplace_back(json2); - } - - QueryResults qr; - int i = 0; - size_t length = wrSer1.Len(); - size_t offset = 0; - while (offset < length) { - Item item = NewItem(default_namespace); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - Error err = item.FromMsgPack(std::string_view(reinterpret_cast(wrSer1.Buf()), wrSer1.Len()), offset); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->Update(default_namespace, item, qr); - ASSERT_TRUE(err.ok()) << err.what(); - - std::string json(item.GetJSON()); - ASSERT_EQ(json, items[i++]); - } - - reindexer::WrSerializer wrSer3; - for (auto &it : qr) { - const auto err = it.GetMsgPack(wrSer3, false); - ASSERT_TRUE(err.ok()) << err.what(); - } - - i = 0; - offset = 0; - while (offset < length) { - Item item = NewItem(default_namespace); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - Error err = item.FromMsgPack(std::string_view(reinterpret_cast(wrSer3.Buf()), wrSer3.Len()), offset); - ASSERT_TRUE(err.ok()) << err.what(); - - std::string json(item.GetJSON()); - ASSERT_EQ(json, items[i++]); - } -} - -TEST_F(NsApi, MsgPackFromJson) { - DefineDefaultNamespace(); - const std::string json = R"xxx({ - "total_us": 100, - "prepare_us": 12, - "indexes_us": 48, - "postprocess_us": 6, - "loop_us": 32, - "general_sort_us": 0, - "sort_index": "-", - "sort_by_uncommitted_index": false, - "selectors": [ - { - "field": "search", - "keys": 1, - "comparators": 0, - "cost": 18446744073709552000, - "matched": 90, - "method": "index", - "type": "Unsorted" - } - ] - })xxx"; - reindexer::WrSerializer msgpackSer; - reindexer::MsgPackBuilder msgpackBuilder(msgpackSer, reindexer::ObjType::TypeObject, 1); - msgpackBuilder.Json("my_json", json); - msgpackBuilder.End(); - - reindexer::WrSerializer jsonSer; - reindexer::JsonBuilder jsonBuilder(jsonSer); - jsonBuilder.Json("my_json", json); - jsonBuilder.End(); - - Item item1 = NewItem(default_namespace); - ASSERT_TRUE(item1.Status().ok()) << item1.Status().what(); - - size_t offset = 0; - Error err = item1.FromMsgPack(msgpackSer.Slice(), offset); - ASSERT_TRUE(err.ok()) << err.what(); - - Item item2 = NewItem(default_namespace); - ASSERT_TRUE(item2.Status().ok()) << item2.Status().what(); - - err = item2.FromJSON(jsonSer.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - - std::string json1(item1.GetJSON()); - std::string json2(item2.GetJSON()); - ASSERT_TRUE(json1 == json2); -} - -TEST_F(NsApi, DeleteLastItems) { - // Check for bug with memory access after items removing - DefineDefaultNamespace(); - FillDefaultNamespace(2); - QueryResults qr; - auto err = rt.reindexer->Delete(Query(default_namespace), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 2); -} - -TEST_F(NsApi, IncorrectNsName) { - auto check = [&](const std::vector &names, auto func) { - for (const auto &v : names) { - func(v); - } - }; - std::vector variants = {"tes@t1", "@test1", "test1@", "tes#t1", "#test1", "test1#", "test 1", - " test1", "test1 ", "'test1'", "\"test1\"", "test1", "/test1", "test1,test2"}; - - auto open = [&](const std::string &name) { - Error err = rt.reindexer->OpenNamespace(name); - ASSERT_FALSE(err.ok()); - ASSERT_EQ(err.what(), "Namespace name contains invalid character. Only alphas, digits,'_','-', are allowed"); - }; - check(variants, open); - - variants.emplace_back(reindexer::kConfigNamespace); - auto add = [&](const std::string &name) { - reindexer::NamespaceDef nsDef(name); - Error err = rt.reindexer->AddNamespace(nsDef); - ASSERT_FALSE(err.ok()); - ASSERT_EQ(err.what(), "Namespace name contains invalid character. Only alphas, digits,'_','-', are allowed"); - }; - check(variants, add); - - auto rename = [&](const std::string &name) { - const std::string kNsName("test3"); - reindexer::NamespaceDef nsDef(kNsName); - Error err = rt.reindexer->AddNamespace(nsDef); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->RenameNamespace(kNsName, name); - ASSERT_FALSE(err.ok()); - ASSERT_EQ(err.what(), "Namespace name contains invalid character. Only alphas, digits,'_','-', are allowed (" + name + ")"); - err = rt.reindexer->DropNamespace(kNsName); - ASSERT_TRUE(err.ok()) << err.what(); - }; - check(variants, rename); -} - -TEST_F(NsApi, TagsmatchersMerge) { - using reindexer::TagsMatcher; - using reindexer::PayloadType; - using reindexer::PayloadFieldType; - - std::vector tms; - - tms.emplace_back(); // -V760 - tms.back().path2tag("id", true); - tms.back().path2tag("string", true); - tms.back().path2tag("data", true); - tms.back().path2tag("data.value", true); - - tms.emplace_back(); - tms.back().path2tag("id", true); - tms.back().path2tag("string", true); - tms.back().path2tag("data", true); - tms.back().path2tag("data.value", true); - tms.back().path2tag("additional_data", true); - - tms.emplace_back(); - tms.back().path2tag("id", true); - tms.back().path2tag("something_else", true); - - tms.emplace_back(); - tms.back().path2tag("id", true); - tms.back().path2tag("string", true); - tms.back().path2tag("data", true); - tms.back().path2tag("data.value", true); - tms.back().path2tag("yet_another_additional_data", true); - - PayloadType pt("tmp", {PayloadFieldType(reindexer::KeyValueType::String{}, "-tuple", {}, false)}); - auto resultTm = TagsMatcher::CreateMergedTagsMatcher(pt, tms); - - EXPECT_EQ(resultTm.name2tag("id"), 1); - EXPECT_EQ(resultTm.name2tag("string"), 2); - EXPECT_EQ(resultTm.name2tag("data"), 3); - EXPECT_EQ(resultTm.name2tag("value"), 4); - EXPECT_EQ(resultTm.name2tag("additional_data"), 5); - EXPECT_EQ(resultTm.name2tag("something_else"), 6); - EXPECT_EQ(resultTm.name2tag("yet_another_additional_data"), 7); - - EXPECT_TRUE(tms[0].IsSubsetOf(resultTm)); - EXPECT_TRUE(tms[1].IsSubsetOf(resultTm)); - EXPECT_FALSE(tms[2].IsSubsetOf(resultTm)); - EXPECT_FALSE(tms[3].IsSubsetOf(resultTm)); -} diff --git a/cpp_src/gtests/tests/unit/protobuf_test.cc b/cpp_src/gtests/tests/unit/protobuf_test.cc deleted file mode 100644 index 2cc866167..000000000 --- a/cpp_src/gtests/tests/unit/protobuf_test.cc +++ /dev/null @@ -1,794 +0,0 @@ -#ifdef WITH_PROTOBUF - -#include "conversion.pb.h" -#include "core/cjson/jsonbuilder.h" -#include "core/cjson/protobufbuilder.h" -#include "easyarrays.pb.h" -#include "reindexer_api.h" -#include "schema.pb.h" - -const int64_t KIdValue = 13; -const std::string kNameValue = "John White Snow"; -const int64_t kAgeValue = 21; -const std::string kEmailValue = "john_white_as_hell@mail.ru"; -const int64_t kWeightValue = 95; -const std::string kNumberValue = "8-800-2000-600"; -const int64_t kTypeValue = 1; -const std::string kCityValue = "Mapletown"; -const std::string kStreetValue = "Miracle Street, "; -const std::string kPostalCodeValue = "9745 123 "; -const double kSalaryValue = 11238761238768.232342342; - -TEST_F(ReindexerApi, ProtobufConvesrionTest) { - // Check protobuf for basic types (int/double/array) and double <-> int conversion - // !!! This test is using schema from cpp_src/gtests/tests/proto/conversion.proto. - // !!! Protobuf indexes are not constant and depend from the internal reindexer::Schema implementation. - // clang-format off - const std::string schema = R"z( - { - "type": "object", - "required": [ - "id", - "numbers" - ], - "properties": { - "id": { - "type": "integer" - }, - "numbers": { - "items": { - "type": "integer" - }, - "type": "array" - } - } - })z"; - // clang-format on - - const std::string_view nsName = "conversion_namespace"; - Error err = rt.reindexer->OpenNamespace(nsName); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->SetSchema(nsName, schema); - ASSERT_TRUE(err.ok()) << err.what(); - - std::string protobufSchema; - err = rt.reindexer->GetSchema(nsName, ProtobufSchemaType, protobufSchema); - ASSERT_TRUE(err.ok()) << err.what(); - - std::vector numbers; - - reindexer::WrSerializer wrser; - reindexer::JsonBuilder jsonBuilder(wrser); - jsonBuilder.Put("id", 1.1111f); - { - auto nums = jsonBuilder.Array("numbers"); - for (int i = 0; i < 10; ++i) { - numbers.emplace_back(double(rand() + 10 + i) + 0.11111f); - nums.Put(0, numbers.back()); - } - } - jsonBuilder.End(); - - Item item = rt.reindexer->NewItem(nsName); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - err = item.FromJSON(wrser.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - - reindexer::WrSerializer rrser; - err = item.GetProtobuf(rrser); - ASSERT_TRUE(err.ok()) << err.what(); - - conversion_namespace testNs; - ASSERT_TRUE(testNs.ParseFromArray(rrser.Buf(), rrser.Len())); - - EXPECT_EQ(testNs.id(), 1); - ASSERT_EQ(testNs.numbers_size(), int(numbers.size())); - for (size_t i = 0; i < numbers.size(); ++i) { - EXPECT_EQ(testNs.numbers(i), int64_t(numbers[i])); - } -} - -TEST_F(ReindexerApi, ProtobufEasyArrayTest) { - // Check protobuf for arrays and nested objects - // !!! This test is using schema from cpp_src/gtests/tests/proto/easyarrays.proto. - // !!! Protobuf indexes are not constant and depend from the internal reindexer::Schema implementation. - // clang-format off - const std::string schema = R"z( - { - "type": "object", - "required": [ - "id", - "object_of_array" - ], - "properties": { - "id": { - "type": "integer" - }, - "object_of_array": { - "additionalProperties": false, - "type": "object", - "required": ["nums"], - "properties": { - "nums": { - "items": { - "type": "integer" - }, - "type": "array" - }, - "strings": { - "type": "array", - "items": { - "type": "string" - } - } - } - } - } - })z"; - // clang-format on - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex(default_namespace, reindexer::IndexDef("id", {"id"}, "hash", "int", IndexOpts().PK())); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->SetSchema(default_namespace, schema); - ASSERT_TRUE(err.ok()) << err.what(); - - std::string protobufSchema; - err = rt.reindexer->GetSchema(default_namespace, ProtobufSchemaType, protobufSchema); - ASSERT_TRUE(err.ok()) << err.what(); - - std::vector numVals; - std::vector stringVals; - - reindexer::WrSerializer wrser; - reindexer::JsonBuilder jsonBuilder(wrser); - jsonBuilder.Put("id", 1); - { - auto nested = jsonBuilder.Object("object_of_array"); - { - auto nums = nested.Array("nums"); - for (int i = 0; i < 10; ++i) { - numVals.emplace_back(rand() + 10 + i); - nums.Put(0, numVals.back()); - } - } - - { - auto strings = nested.Array("strings"); - for (int i = 0; i < 10; ++i) { - stringVals.emplace_back(RandString()); - strings.Put(0, stringVals.back()); - } - } - } - jsonBuilder.End(); - - Item item = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - err = item.FromJSON(wrser.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - - reindexer::WrSerializer rrser; - err = item.GetProtobuf(rrser); - ASSERT_TRUE(err.ok()) << err.what(); - - Item item2 = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(item2.Status().ok()) << item2.Status().what(); - err = item2.FromProtobuf(rrser.Slice()); - ASSERT_TRUE(err.ok()) << err.what() << wrser.Slice(); - ASSERT_TRUE(item.GetJSON() == item2.GetJSON()) << item.GetJSON() << std::endl << std::endl << item2.GetJSON() << std::endl; - - test_namespace testNs; - ASSERT_TRUE(testNs.ParseFromArray(rrser.Buf(), rrser.Len())); - - EXPECT_TRUE(testNs.id() == 1); - EXPECT_TRUE(testNs.object_of_array().strings().size() == int(stringVals.size())); - for (size_t i = 0; i < stringVals.size(); ++i) { - EXPECT_TRUE(testNs.object_of_array().strings(i) == stringVals[i]); - } - EXPECT_TRUE(testNs.object_of_array().nums().size() == int(numVals.size())); - for (size_t i = 0; i < numVals.size(); ++i) { - EXPECT_TRUE(testNs.object_of_array().nums(i) == numVals[i]); - } -} - -TEST_F(ReindexerApi, ProtobufSchemaFromNsSchema) { - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - // clang-format off - const std::string jsonschema = R"xxx( - { - "required": [ - "Collection", - "floatField", - "intField", - "stringField", - "boolField", - "nested1", - "nested2", - "nested3" - ], - "properties": { - "Collection": { - "items": { - "type": "integer" - }, - "type": "array" - }, - "floatField": { - "type": "number" - }, - "intField": { - "type": "integer" - }, - "stringField": { - "type": "string" - }, - "boolField": { - "type": "boolean" - }, - "nested3": { - "required": [ - "bigField", - "biggerField", - "hugeField" - ], - "properties": { - "bigField": { - "type": "string" - }, - "biggerField": { - "type": "number" - }, - "hugeField": { - "type": "integer" - } - }, - "additionalProperties": false, - "type": "object", - "x-go-type": "NestedStruct3" - }, - "nested1": { - "required": [ - "field1", - "field2", - "field3", - "nested2" - ], - "properties": { - "field1": { - "type": "string" - }, - "field2": { - "type": "number" - }, - "field3": { - "type": "integer" - }, - "nested2": { - "required": [ - "field4", - "field5", - "field6", - "oneMoreNested" - ], - "properties": { - "field4": { - "type": "string" - }, - "field5": { - "type": "number" - }, - "field6": { - "type": "integer" - }, - "oneMoreNested": { - "required": [ - "one", - "two", - "three", - "four" - ], - "properties": { - "one": { - "type": "integer" - }, - "two": { - "type": "number" - }, - "three": { - "type": "boolean" - }, - "four": { - "type": "array", - "items": { - "required": [ - "bigField", - "biggerField", - "hugeField" - ], - "properties": { - "bigField": { - "type": "string" - }, - "biggerField": { - "type": "number" - }, - "hugeField": { - "type": "integer" - } - }, - "additionalProperties": false, - "type": "object", - "x-go-type": "NestedStruct3" - } - } - }, - "additionalProperties": false, - "type": "object", - "x-go-type": "NNested" - } - }, - "additionalProperties": false, - "type": "object" - } - }, - "additionalProperties": false, - "type": "object" - } - }, - "additionalProperties": false, - "type": "object" - } )xxx"; - // clang-format on - - err = rt.reindexer->SetSchema(default_namespace, jsonschema); - ASSERT_TRUE(err.ok()) << err.what(); - - std::string protobufSchema; - err = rt.reindexer->GetSchema(default_namespace, ProtobufSchemaType, protobufSchema); - ASSERT_TRUE(err.ok()) << err.what(); - - reindexer::WrSerializer wrser; - reindexer::JsonBuilder jsonBuilder(wrser); - jsonBuilder.Put("floatField", 5.55f); - jsonBuilder.Put("intField", 5); - jsonBuilder.Put("stringField", "five"); - jsonBuilder.Put("boolField", true); - { - auto nested3 = jsonBuilder.Object("nested3"); - nested3.Put("bigField", "big big real big"); - nested3.Put("biggerField", 77.77); - nested3.Put("hugeField", 33); - } - { - auto nested1 = jsonBuilder.Object("nested1"); - nested1.Put("field1", "one"); - nested1.Put("field2", 222.222); - nested1.Put("field3", 333); - { - auto nested2 = nested1.Object("nested2"); - nested2.Put("field4", "four"); - nested2.Put("field5", 55.55); - nested2.Put("field6", 66); - { - auto oneMoreNested = nested2.Object("oneMoreNested"); - oneMoreNested.Put("one", 1); - oneMoreNested.Put("two", 2.22); - oneMoreNested.Put("three", true); - { - auto four = oneMoreNested.Array("four"); - for (size_t i = 0; i < 10; ++i) { - auto item = four.Object(0); - item.Put("bigField", RandString()); - item.Put("biggerField", double(11.11 + rand())); - item.Put("hugeField", int(33 + rand())); - } - } - } - } - } - auto collection = jsonBuilder.Array("Collection"); - for (int i = 0; i < 10; ++i) collection.Put(0, i); - collection.End(); - jsonBuilder.End(); - - Item item = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - err = item.FromJSON(wrser.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - - reindexer::WrSerializer rrser; - err = item.GetProtobuf(rrser); - ASSERT_TRUE(err.ok()) << err.what(); - - Item item2 = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(item2.Status().ok()) << item2.Status().what(); - err = item2.FromProtobuf(rrser.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(item.GetJSON() == item2.GetJSON()); -} - -TEST_F(ReindexerApi, ProtobufEncodingTest) { - Person person; - person.set_id(KIdValue); - person.set_name(kNameValue); - person.set_age(kAgeValue); - person.set_email(kEmailValue); - person.set_weight(kWeightValue); - person.set_salary(kSalaryValue); - - Person::Address* address = person.mutable_address(); - for (size_t j = 0; j < 10; ++j) { - Person::Address::PhoneNumber* phoneNumber = address->add_phones(); - phoneNumber->set_number(kNumberValue + std::to_string(j)); - phoneNumber->set_type(kTypeValue + j); - } - for (size_t j = 0; j < 20; ++j) { - Person::Address::Home* home = address->add_homes(); - home->set_city(kCityValue + std::to_string(j)); - home->set_street(kStreetValue + std::to_string(j)); - } - for (size_t i = 0; i < 20; ++i) { - std::string* postalCodeItem = address->add_postalcodes(); - *postalCodeItem = kPostalCodeValue + std::to_string(i); - } - for (size_t i = 0; i < 5; ++i) { - person.add_friends(i); - } - for (size_t i = 0; i < 10; ++i) { - person.add_bonuses(i); - } - for (int i = 0; i < 10; ++i) { - person.add_indexedpackeddouble(double(i) + 0.55f); - } - for (int i = 0; i < 10; ++i) { - person.add_indexedunpackeddouble(std::to_string(i + 5)); - } - person.set_enabled(true); - - reindexer::WrSerializer wrser; - reindexer::ProtobufBuilder builder(&wrser); - builder.Put(person.kNameFieldNumber, kNameValue); - builder.Put(person.kIdFieldNumber, KIdValue); - builder.Put(person.kAgeFieldNumber, kAgeValue); - builder.Put(person.kWeightFieldNumber, kWeightValue); - builder.Put(person.kEmailFieldNumber, kEmailValue); - - auto addressBuilder = builder.Object(person.kAddressFieldNumber); - auto phones = addressBuilder.ArrayNotPacked(address->kPhonesFieldNumber); - for (size_t i = 0; i < 10; ++i) { - auto phone = phones.Object(0); - phone.Put(Person_Address_PhoneNumber::kNumberFieldNumber, kNumberValue + std::to_string(i)); - phone.Put(Person_Address_PhoneNumber::kTypeFieldNumber, int64_t(kTypeValue + i)); - } - phones.End(); - auto homes = addressBuilder.ArrayNotPacked(address->kHomesFieldNumber); - for (size_t i = 0; i < 20; ++i) { - auto home = homes.Object(0); - home.Put(Person_Address_Home::kCityFieldNumber, kCityValue + std::to_string(i)); - home.Put(Person_Address_Home::kStreetFieldNumber, kStreetValue + std::to_string(i)); - } - homes.End(); - auto postalCodes = addressBuilder.ArrayNotPacked(address->kPostalcodesFieldNumber); - for (size_t i = 0; i < 20; ++i) { - postalCodes.Put(0, kPostalCodeValue + std::to_string(i)); - } - postalCodes.End(); - addressBuilder.End(); - auto friends = builder.ArrayNotPacked(person.kFriendsFieldNumber); - for (int64_t i = 0; i < 5; ++i) { - friends.Put(0, i); - } - friends.End(); - builder.Put(person.kSalaryFieldNumber, kSalaryValue); - auto bonuses = builder.ArrayPacked(person.kBonusesFieldNumber); - for (int64_t i = 0; i < 10; ++i) { - bonuses.Put(person.kBonusesFieldNumber, i); - } - bonuses.End(); - - auto indexedPackedDouble = builder.ArrayPacked(person.kIndexedPackedDoubleFieldNumber); - for (int i = 0; i < 10; ++i) { - indexedPackedDouble.Put(person.kIndexedPackedDoubleFieldNumber, double(i) + 0.55f); - } - indexedPackedDouble.End(); - - auto indexedUnpackedDouble = builder.ArrayNotPacked(person.kIndexedUnpackedDoubleFieldNumber); - for (int i = 0; i < 10; ++i) { - indexedUnpackedDouble.Put(0, std::to_string(5 + i)); - } - indexedUnpackedDouble.End(); - - builder.Put(person.kEnabledFieldNumber, true); - - builder.End(); - - Person person2; - person2.ParseFromArray(wrser.Buf(), wrser.Len()); - EXPECT_TRUE(person.id() == person2.id()); - EXPECT_TRUE(person.name() == person2.name()); - EXPECT_TRUE(person.age() == person2.age()); - EXPECT_TRUE(person.email() == person2.email()); - EXPECT_TRUE(person.weight() == person2.weight()); - EXPECT_TRUE(person.salary() == person2.salary()); - ASSERT_TRUE(person.address().homes_size() == person2.address().homes_size()); - for (int j = 0; j < person.address().homes_size(); ++j) { - const auto& home = person.address().homes(j); - const auto& home2 = person2.address().homes(j); - EXPECT_TRUE(home.city() == home2.city()); - EXPECT_TRUE(home.street() == home2.street()); - } - ASSERT_TRUE(person.address().phones_size() == person2.address().phones_size()); - for (int j = 0; j < person.address().phones_size(); ++j) { - const auto& phone = person.address().phones(j); - const auto& phone2 = person2.address().phones(j); - EXPECT_TRUE(phone.number() == phone2.number()); - EXPECT_TRUE(phone.type() == phone2.type()); - } - ASSERT_TRUE(person.address().postalcodes_size() == person2.address().postalcodes_size()); - for (int j = 0; j < person.address().postalcodes_size(); ++j) { - EXPECT_TRUE(person.address().postalcodes(j) == person2.address().postalcodes(j)); - } - ASSERT_TRUE(person.friends_size() == person2.friends_size()); - for (int j = 0; j < person.friends_size(); ++j) { - EXPECT_TRUE(person.friends(j) == person2.friends(j)); - } - ASSERT_TRUE(person.bonuses_size() == person2.bonuses_size()); - for (int j = 0; j < person.bonuses_size(); ++j) { - EXPECT_TRUE(person.bonuses(j) == person2.bonuses(j)); - } - ASSERT_TRUE(person.indexedpackeddouble_size() == person2.indexedpackeddouble_size()); - for (int j = 0; j < person.indexedpackeddouble_size(); ++j) { - EXPECT_TRUE(person.indexedpackeddouble(j) == person2.indexedpackeddouble(j)); - } - ASSERT_TRUE(person.indexedunpackeddouble_size() == person2.indexedunpackeddouble_size()); - for (int j = 0; j < person.indexedunpackeddouble_size(); ++j) { - EXPECT_TRUE(person.indexedunpackeddouble(j) == person2.indexedunpackeddouble(j)); - } - ASSERT_TRUE(person.enabled() == person2.enabled()); -} - -TEST_F(ReindexerApi, ProtobufDecodingTest) { - // clang-format off - const std::string jsonSchema = R"xxx( - { - "required": [ - "name", - "id", - "age", - "weight", - "email", - "address", - "friends", - "salary", - "bonuses", - "indexedPackedDouble", - "indexedUnpackedDouble", - "enabled" - ], - "properties": { - "name": { - "type": "string" - }, - "id": { - "type": "integer" - }, - "age": { - "type": "integer" - }, - "weight": { - "type": "integer" - }, - "email": { - "type": "string" - }, - "address": { - "required": [ - "phones", - "homes", - "postalcodes" - ], - "properties": { - "phones": { - "items": { - "type": "object", - "required": [ - "number", - "type" - ], - "properties": { - "number": { - "type": "string" - }, - "type": { - "type": "integer" - } - }, - "additionalProperties": false, - "x-go-type": "PhoneNumber" - }, - "type": "array" - }, - "homes": { - "items": { - "type": "object", - "required": [ - "city", - "street" - ], - "properties": { - "city": { - "type": "string" - }, - "street": { - "type": "string" - } - }, - "additionalProperties": false, - "x-go-type": "Home" - }, - "type": "array" - }, - "postalcodes": { - "items": { - "type": "string" - }, - "type": "array" - } - }, - "additionalProperties": false, - "type": "object", - "x-go-type": "Address" - }, - "friends": { - "items": { - "type": "integer" - }, - "type": "array" - }, - "salary": { - "type": "number" - }, - "bonuses": { - "items": { - "type": "integer" - }, - "type": "array" - }, - "indexedPackedDouble": { - "items": { - "type": "number" - }, - "type": "array" - }, - "indexedUnpackedDouble": { - "items": { - "type": "string" - }, - "type": "array" - }, - "enabled": { - "type": "boolean" - } - }, - "additionalProperties": false, - "type": "object", - "x-go-type": "TestStruct" - } - )xxx"; - // clang-format on - - Error err = rt.reindexer->OpenNamespace(default_namespace); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex( - default_namespace, reindexer::IndexDef("indexedPackedDouble", {"indexedPackedDouble"}, "tree", "double", IndexOpts().Array())); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->AddIndex( - default_namespace, reindexer::IndexDef("indexedUnpackedDouble", {"indexedUnpackedDouble"}, "tree", "string", IndexOpts().Array())); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rt.reindexer->SetSchema(default_namespace, jsonSchema); - ASSERT_TRUE(err.ok()) << err.what(); - - std::string protobufSchema; - err = rt.reindexer->GetSchema(default_namespace, ProtobufSchemaType, protobufSchema); - ASSERT_TRUE(err.ok()) << err.what(); - - reindexer::Item nsItem = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(nsItem.Status().ok()) << nsItem.Status().what(); - - reindexer::WrSerializer wrser; - reindexer::ProtobufBuilder builder(&wrser); - builder.Put(nsItem.GetFieldTag("name"), kNameValue); - builder.Put(nsItem.GetFieldTag("id"), KIdValue); - builder.Put(nsItem.GetFieldTag("age"), kAgeValue); - builder.Put(nsItem.GetFieldTag("weight"), kWeightValue); - builder.Put(nsItem.GetFieldTag("email"), kEmailValue); - - auto addressBuilder = builder.Object(nsItem.GetFieldTag("address")); - auto phones = addressBuilder.ArrayNotPacked(nsItem.GetFieldTag("phones")); - for (size_t i = 0; i < 10; ++i) { - auto phone = phones.Object(0); - phone.Put(nsItem.GetFieldTag("number"), kNumberValue + std::to_string(i)); - phone.Put(nsItem.GetFieldTag("type"), int64_t(kTypeValue + i)); - } - phones.End(); - auto homes = addressBuilder.ArrayNotPacked(nsItem.GetFieldTag("homes")); - for (size_t i = 0; i < 20; ++i) { - auto home = homes.Object(0); - home.Put(nsItem.GetFieldTag("city"), kCityValue + std::to_string(i)); - home.Put(nsItem.GetFieldTag("street"), kStreetValue + std::to_string(i)); - } - homes.End(); - auto postalCodes = addressBuilder.ArrayNotPacked(nsItem.GetFieldTag("postalcodes")); - for (size_t i = 0; i < 20; ++i) { - postalCodes.Put(0, kPostalCodeValue + std::to_string(i)); - } - postalCodes.End(); - addressBuilder.End(); - auto friends = builder.ArrayNotPacked(nsItem.GetFieldTag("friends")); - for (int64_t i = 0; i < 5; ++i) { - friends.Put(0, i); - } - friends.End(); - builder.Put(nsItem.GetFieldTag("salary"), kSalaryValue); - auto bonuses = builder.ArrayPacked(nsItem.GetFieldTag("bonuses")); - for (int64_t i = 0; i < 10; ++i) { - bonuses.Put(9, i); - } - bonuses.End(); - - auto indexedPackedDouble = builder.ArrayPacked(nsItem.GetFieldTag("indexedPackedDouble")); - for (int i = 0; i < 10; ++i) { - indexedPackedDouble.Put(0, double(i) + 0.55f); - } - indexedPackedDouble.End(); - - std::vector strings; - auto indexedUnpackedDouble = builder.ArrayNotPacked(nsItem.GetFieldTag("indexedUnpackedDouble")); - for (int i = 0; i < 10; ++i) { - strings.emplace_back(std::string("BIG_DATA") + std::to_string(i + 1)); - indexedUnpackedDouble.Put(0, strings.back()); - } - indexedUnpackedDouble.End(); - - builder.Put(nsItem.GetFieldTag("enabled"), false); - - builder.End(); - - Item item1 = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(item1.Status().ok()) << item1.Status().what(); - - err = item1.FromProtobuf(wrser.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - - Item item2 = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(item2.Status().ok()) << item2.Status().what(); - - err = item2.FromJSON(item1.GetJSON()); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(item1.GetJSON() == item2.GetJSON()); - - Item item3 = rt.reindexer->NewItem(default_namespace); - ASSERT_TRUE(item3.Status().ok()) << item3.Status().what(); - - reindexer::WrSerializer protobufSer; - err = item2.GetProtobuf(protobufSer); - ASSERT_TRUE(err.ok()) << err.what(); - - err = item3.FromProtobuf(protobufSer.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(item2.GetJSON() == item3.GetJSON()); -} - -#endif diff --git a/cpp_src/gtests/tests/unit/queries_test.cc b/cpp_src/gtests/tests/unit/queries_test.cc deleted file mode 100644 index 0f3d18763..000000000 --- a/cpp_src/gtests/tests/unit/queries_test.cc +++ /dev/null @@ -1,1408 +0,0 @@ -#include - -#include - -#include "core/cjson/csvbuilder.h" -#include "core/schema.h" -#include "csv2jsonconverter.h" -#include "queries_api.h" -#include "tools/jsontools.h" - -#if !defined(REINDEX_WITH_TSAN) -TEST_F(QueriesApi, QueriesStandardTestSet) { - try { - FillDefaultNamespace(0, 2500, 20); - FillDefaultNamespace(2500, 2500, 0); - FillCompositeIndexesNamespace(0, 1000); - FillTestSimpleNamespace(); - FillComparatorsNamespace(); - FillTestJoinNamespace(); - FillGeomNamespace(); - - CheckStandartQueries(); - CheckAggregationQueries(); - CheckSqlQueries(); - CheckDslQueries(); - CheckCompositeIndexesQueries(); - CheckComparatorsQueries(); - CheckDistinctQueries(); - CheckGeomQueries(); - CheckMergeQueriesWithLimit(); - CheckMergeQueriesWithAggregation(); - - int itemsCount = 0; - auto& items = insertedItems_[default_namespace]; - for (auto it = items.begin(); it != items.end();) { - Error err = rt.reindexer->Delete(default_namespace, it->second); - ASSERT_TRUE(err.ok()) << err.what(); - it = items.erase(it); - if (++itemsCount == 4000) break; - } - - FillDefaultNamespace(0, 500, 0); - FillDefaultNamespace(0, 1000, 5); - - itemsCount = 0; - for (auto it = items.begin(); it != items.end();) { - Error err = rt.reindexer->Delete(default_namespace, it->second); - ASSERT_TRUE(err.ok()) << err.what(); - it = items.erase(it); - if (++itemsCount == 5000) break; - } - - for (size_t i = 0; i < 5000; ++i) { - auto itToRemove = items.begin(); - if (itToRemove != items.end()) { - Error err = rt.reindexer->Delete(default_namespace, itToRemove->second); - ASSERT_TRUE(err.ok()) << err.what(); - items.erase(itToRemove); - } - FillDefaultNamespace(rand() % 100, 1, 0); - - if (!items.empty()) { - itToRemove = items.begin(); - std::advance(itToRemove, rand() % std::min(100, int(items.size()))); - if (itToRemove != items.end()) { - Error err = rt.reindexer->Delete(default_namespace, itToRemove->second); - ASSERT_TRUE(err.ok()) << err.what(); - items.erase(itToRemove); - } - } - } - - for (auto it = items.begin(); it != items.end();) { - Error err = rt.reindexer->Delete(default_namespace, it->second); - ASSERT_TRUE(err.ok()) << err.what(); - it = items.erase(it); - } - - FillDefaultNamespace(3000, 1000, 20); - FillDefaultNamespace(1000, 500, 00); - FillCompositeIndexesNamespace(1000, 1000); - FillComparatorsNamespace(); - FillGeomNamespace(); - - CheckStandartQueries(); - CheckAggregationQueries(); - CheckSqlQueries(); - CheckDslQueries(); - CheckCompositeIndexesQueries(); - CheckComparatorsQueries(); - CheckDistinctQueries(); - CheckGeomQueries(); - CheckMergeQueriesWithLimit(); - CheckConditionsMergingQueries(); - } catch (const reindexer::Error& e) { - ASSERT_TRUE(false) << e.what() << std::endl; - } catch (const std::exception& e) { - ASSERT_TRUE(false) << e.what() << std::endl; - } catch (...) { - ASSERT_TRUE(false); - } -} - -TEST_F(QueriesApi, QueriesConditions) { - FillConditionsNs(); - CheckConditions(); -} - -TEST_F(QueriesApi, UuidQueries) { - FillUUIDNs(); - // hack to obtain not index not string uuid fields - /*auto err = rt.reindexer->DropIndex(uuidNs, {kFieldNameUuidNotIndex2}); // TODO uncomment this #1470 - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->DropIndex(uuidNs, {kFieldNameUuidNotIndex3}); - ASSERT_TRUE(err.ok()) << err.what();*/ - CheckUUIDQueries(); -} -#endif // !defined(REINDEX_WITH_TSAN) - -TEST_F(QueriesApi, IndexCacheInvalidationTest) { - std::vector> data{{0, 10}, {1, 9}, {2, 8}, {3, 7}, {4, 6}, {5, 5}, - {6, 4}, {7, 3}, {8, 2}, {9, 1}, {10, 0}, {11, -1}}; - for (auto values : data) { - UpsertBtreeIdxOptNsItem(values); - } - Query q(btreeIdxOptNs); - q.Where(kFieldNameId, CondSet, {3, 5, 7}).Where(kFieldNameStartTime, CondGt, 2).Debug(LogTrace); - std::this_thread::sleep_for(std::chrono::seconds(1)); - for (size_t i = 0; i < 10; ++i) { - ExecuteAndVerify(q); - } - - UpsertBtreeIdxOptNsItem({5, 0}); - std::this_thread::sleep_for(std::chrono::seconds(5)); - for (size_t i = 0; i < 10; ++i) { - ExecuteAndVerify(q); - } -} - -TEST_F(QueriesApi, SelectRaceWithIdxCommit) { - FillDefaultNamespace(0, 1000, 1); - Query q{Query(default_namespace) - .Where(kFieldNameYear, CondGt, {2025}) - .Where(kFieldNameYear, CondLt, {2045}) - .Where(kFieldNameGenre, CondGt, {25}) - .Where(kFieldNameGenre, CondLt, {45})}; - for (unsigned i = 0; i < 50; ++i) { - ExecuteAndVerify(q); - } -} - -TEST_F(QueriesApi, TransactionStress) { - std::vector pool; - FillDefaultNamespace(0, 350, 20); - FillDefaultNamespace(3500, 350, 0); - std::atomic_uint current_size; - current_size = 350; - uint32_t stepSize = 1000; - - for (size_t i = 0; i < 4; i++) { - pool.push_back(std::thread([this, i, ¤t_size, stepSize]() { - size_t start_pos = i * stepSize; - if (i % 2 == 0) { - uint32_t steps = 10; - for (uint32_t j = 0; j < steps; ++j) { - current_size += stepSize / steps; - AddToDefaultNamespace(start_pos, start_pos + stepSize / steps, 20); - start_pos = start_pos + stepSize / steps; - } - } else if (i % 2 == 1) { - uint32_t oldsize = current_size.load(); - current_size += oldsize; - FillDefaultNamespaceTransaction(current_size, start_pos + oldsize, 10); - } - })); - } - - for (auto& tr : pool) { - tr.join(); - } -} - -TEST_F(QueriesApi, SqlParseGenerate) { - using namespace std::string_literals; - enum Direction { PARSE = 1, GEN = 2, BOTH = PARSE | GEN }; - struct { - std::string sql; - std::variant expected; - Direction direction = BOTH; - } cases[]{ - {"SELECT * FROM test_namespace WHERE index = 5", Query{"test_namespace"}.Where("index", CondEq, 5)}, - {"SELECT * FROM test_namespace WHERE index LIKE 'str'", Query{"test_namespace"}.Where("index", CondLike, "str")}, - {"SELECT * FROM test_namespace WHERE index <= field", Query{"test_namespace"}.WhereBetweenFields("index", CondLe, "field")}, - {"SELECT * FROM test_namespace WHERE index+field = 5", Error{errParseSQL, "Expected condition operator, but found '+' in query"}}, - {"SELECT * FROM test_namespace WHERE \"index+field\" > 5", Query{"test_namespace"}.Where("index+field", CondGt, 5)}, - {"SELECT * FROM test_namespace WHERE \"index+field\" LIKE index2.field2", - Query{"test_namespace"}.WhereBetweenFields("index+field", CondLike, "index2.field2")}, - {"SELECT * FROM test_namespace WHERE index2.field2 <> \"index+field\"", - Query{"test_namespace"}.Not().WhereBetweenFields("index2.field2", CondEq, "index+field"), PARSE}, - {"SELECT * FROM test_namespace WHERE NOT index2.field2 = \"index+field\"", - Query{"test_namespace"}.Not().WhereBetweenFields("index2.field2", CondEq, "index+field")}, - {"SELECT * FROM test_namespace WHERE 'index+field' = 5", - Error{errParseSQL, "String is invalid at this location. (text = 'index+field' location = line: 1 column: 49 52)"}}, - {"SELECT * FROM test_namespace WHERE \"index\" = 5", Query{"test_namespace"}.Where("index", CondEq, 5), PARSE}, - {"SELECT * FROM test_namespace WHERE 'index' = 5", - Error{errParseSQL, "String is invalid at this location. (text = 'index' location = line: 1 column: 43 46)"}}, - {"SELECT * FROM test_namespace WHERE NOT index ALLSET 3489578", Query{"test_namespace"}.Not().Where("index", CondAllSet, 3489578)}, - {"SELECT * FROM test_namespace WHERE NOT index ALLSET (0,1)", Query{"test_namespace"}.Not().Where("index", CondAllSet, {0, 1})}, - {"SELECT ID, Year, Genre FROM test_namespace WHERE year > '2016' ORDER BY 'year' DESC LIMIT 10000000", - Query{"test_namespace"}.Select({"ID", "Year", "Genre"}).Where("year", CondGt, "2016").Sort("year", true).Limit(10000000)}, - {"SELECT ID FROM test_namespace WHERE name LIKE 'something' AND (genre IN ('1','2','3') AND year > '2016') OR age IN " - "('1','2','3','4') LIMIT 10000000", - Query{"test_namespace"} - .Select({"ID"}) - .Where("name", CondLike, "something") - .OpenBracket() - .Where("genre", CondSet, {"1", "2", "3"}) - .Where("year", CondGt, "2016") - .CloseBracket() - .Or() - .Where("age", CondSet, {"1", "2", "3", "4"}) - .Limit(10000000)}, - {"SELECT * FROM test_namespace WHERE INNER JOIN join_ns ON test_namespace.id = join_ns.id " - "ORDER BY 'year + join_ns.year * (5 - rand())'", - Query{"test_namespace"}.InnerJoin("id", "id", CondEq, Query{"join_ns"}).Sort("year + join_ns.year * (5 - rand())", false)}, - {"SELECT * FROM "s + geomNs + " WHERE ST_DWithin(" + kFieldNamePointNonIndex + ", ST_GeomFromText('POINT(1.25 -7.25)'), 0.5)", - Query{geomNs}.DWithin(kFieldNamePointNonIndex, reindexer::Point{1.25, -7.25}, 0.5)}, - {"SELECT * FROM test_namespace ORDER BY FIELD(index, 10, 20, 30)", Query{"test_namespace"}.Sort("index", false, {10, 20, 30})}, - {"SELECT * FROM test_namespace ORDER BY FIELD(index, 'str1', 'str2', 'str3') DESC", - Query{"test_namespace"}.Sort("index", true, {"str1", "str2", "str3"})}, - {"SELECT * FROM test_namespace ORDER BY FIELD(index, {10, 'str1'}, {20, 'str2'}, {30, 'str3'})", - Query{"test_namespace"}.Sort("index", false, std::vector>{{10, "str1"}, {20, "str2"}, {30, "str3"}})}, - {"SELECT * FROM main_ns WHERE (SELECT * FROM second_ns WHERE id < 10 LIMIT 0) IS NOT NULL", - Query{"main_ns"}.Where(Query{"second_ns"}.Where("id", CondLt, 10), CondAny, VariantArray{})}, - {"SELECT * FROM main_ns WHERE id = (SELECT id FROM second_ns WHERE id < 10)", - Query{"main_ns"}.Where("id", CondEq, Query{"second_ns"}.Select({"id"}).Where("id", CondLt, 10))}, - {"SELECT * FROM main_ns WHERE (SELECT max(id) FROM second_ns WHERE id < 10) > 18", - Query{"main_ns"}.Where(Query{"second_ns"}.Aggregate(AggMax, {"id"}).Where("id", CondLt, 10), CondGt, {18})}, - {"SELECT * FROM main_ns WHERE id > (SELECT avg(id) FROM second_ns WHERE id < 10)", - Query{"main_ns"}.Where("id", CondGt, Query{"second_ns"}.Aggregate(AggAvg, {"id"}).Where("id", CondLt, 10))}, - {"SELECT * FROM main_ns WHERE id > (SELECT COUNT(*) FROM second_ns WHERE id < 10 LIMIT 0)", - Query{"main_ns"}.Where("id", CondGt, Query{"second_ns"}.Where("id", CondLt, 10).ReqTotal())}, - {"SELECT * FROM main_ns WHERE (SELECT * FROM second_ns WHERE id < 10 LIMIT 0) IS NOT NULL AND value IN (5,4,1)", - Query{"main_ns"} - .Where(Query{"second_ns"}.Where("id", CondLt, 10), CondAny, {}) - .Where("value", CondSet, {Variant{5}, Variant{4}, Variant{1}})}, - {"SELECT * FROM main_ns WHERE ((SELECT * FROM second_ns WHERE id < 10 LIMIT 0) IS NOT NULL) AND value IN (5,4,1)", - Query{"main_ns"} - .OpenBracket() - .Where(Query{"second_ns"}.Where("id", CondLt, 10), CondAny, {}) - .CloseBracket() - .Where("value", CondSet, {Variant{5}, Variant{4}, Variant{1}})}, - {"SELECT * FROM main_ns WHERE id IN (SELECT id FROM second_ns WHERE id < 999) AND value >= 1000", - Query{"main_ns"}.Where("id", CondSet, Query{"second_ns"}.Select({"id"}).Where("id", CondLt, 999)).Where("value", CondGe, 1000)}, - {"SELECT * FROM main_ns WHERE (id IN (SELECT id FROM second_ns WHERE id < 999)) AND value >= 1000", - Query{"main_ns"} - .OpenBracket() - .Where("id", CondSet, Query{"second_ns"}.Select({"id"}).Where("id", CondLt, 999)) - .CloseBracket() - .Where("value", CondGe, 1000)}, - {"SELECT * FROM main_ns " - "WHERE (SELECT id FROM second_ns WHERE id < 999 AND xxx IS NULL ORDER BY 'value' DESC LIMIT 10) = 0 " - "ORDER BY 'tree'", - Query{"main_ns"} - .Where(Query{"second_ns"} - .Select({"id"}) - .Where("id", CondLt, 999) - .Where("xxx", CondEmpty, VariantArray{}) - .Limit(10) - .Sort("value", true), - CondEq, 0) - .Sort("tree", false)}, - {"SELECT * FROM main_ns " - "WHERE ((SELECT id FROM second_ns WHERE id < 999 AND xxx IS NULL ORDER BY 'value' DESC LIMIT 10) = 0) " - "ORDER BY 'tree'", - Query{"main_ns"} - .OpenBracket() - .Where(Query{"second_ns"} - .Select({"id"}) - .Where("id", CondLt, 999) - .Where("xxx", CondEmpty, VariantArray{}) - .Limit(10) - .Sort("value", true), - CondEq, 0) - .CloseBracket() - .Sort("tree", false)}, - {"SELECT * FROM main_ns " - "WHERE INNER JOIN (SELECT * FROM second_ns WHERE NOT val = 10) ON main_ns.id = second_ns.uid " - "AND id IN (SELECT id FROM third_ns WHERE id < 999) " - "AND INNER JOIN (SELECT * FROM fourth_ns WHERE val IS NOT NULL OFFSET 2 LIMIT 1) ON main_ns.uid = fourth_ns.id", - Query{"main_ns"} - .InnerJoin("id", "uid", CondEq, Query("second_ns").Not().Where("val", CondEq, 10)) - .Where("id", CondSet, Query{"third_ns"}.Select({"id"}).Where("id", CondLt, 999)) - .InnerJoin("uid", "id", CondEq, Query("fourth_ns").Where("val", CondAny, VariantArray{}).Limit(1).Offset(2))}, - {"SELECT * FROM main_ns " - "WHERE INNER JOIN (SELECT * FROM second_ns WHERE NOT val = 10 OFFSET 2 LIMIT 1) ON main_ns.id = second_ns.uid " - "AND id IN (SELECT id FROM third_ns WHERE id < 999) " - "LEFT JOIN (SELECT * FROM fourth_ns WHERE val IS NOT NULL) ON main_ns.uid = fourth_ns.id", - Query{"main_ns"} - .InnerJoin("id", "uid", CondEq, Query("second_ns").Not().Where("val", CondEq, 10).Limit(1).Offset(2)) - .Where("id", CondSet, Query{"third_ns"}.Select({"id"}).Where("id", CondLt, 999)) - .LeftJoin("uid", "id", CondEq, Query("fourth_ns").Where("val", CondAny, VariantArray{}))}, - {"SELECT * FROM main_ns " - "WHERE id IN (SELECT id FROM third_ns WHERE id < 999 OFFSET 7 LIMIT 5) " - "LEFT JOIN (SELECT * FROM second_ns WHERE NOT val = 10 OFFSET 2 LIMIT 1) ON main_ns.id = second_ns.uid " - "LEFT JOIN (SELECT * FROM fourth_ns WHERE val IS NOT NULL) ON main_ns.uid = fourth_ns.id", - Query{"main_ns"} - .LeftJoin("id", "uid", CondEq, Query("second_ns").Not().Where("val", CondEq, 10).Limit(1).Offset(2)) - .Where("id", CondSet, Query{"third_ns"}.Select({"id"}).Where("id", CondLt, 999).Limit(5).Offset(7)) - .LeftJoin("uid", "id", CondEq, Query("fourth_ns").Where("val", CondAny, VariantArray{}))}, - }; - - for (const auto& [sql, expected, direction] : cases) { - if (std::holds_alternative(expected)) { - const Query& q = std::get(expected); - if (direction & GEN) { - EXPECT_EQ(q.GetSQL(), sql); - } - if (direction & PARSE) { - try { - Query parsed = Query::FromSQL(sql); - EXPECT_EQ(parsed, q) << sql; - } catch (const Error& err) { - ADD_FAILURE() << "Unexpected error: " << err.what() << "\nSQL: " << sql; - continue; - } - } - } else { - const Error& expectedErr = std::get(expected); - try { - Query parsed = Query::FromSQL(sql); - ADD_FAILURE() << "Expected error: " << expectedErr.what() << "\nSQL: " << sql; - } catch (const Error& err) { - EXPECT_EQ(err.what(), expectedErr.what()) << "\nSQL: " << sql; - } - } - } -} - -TEST_F(QueriesApi, DslGenerateParse) { - using namespace std::string_literals; - enum Direction { PARSE = 1, GEN = 2, BOTH = PARSE | GEN }; - struct { - std::string dsl; - std::variant expected; - Direction direction = BOTH; - } cases[]{{fmt::sprintf( - R"({ - "namespace": "%s", - "limit": -1, - "offset": 0, - "req_total": "disabled", - "explain": false, - "type": "select", - "select_with_rank": false, - "select_filter": [], - "select_functions": [], - "sort": [], - "filters": [ - { - "op": "and", - "always": true - } - ], - "merge_queries": [], - "aggregations": [] -})", - geomNs), - Query{geomNs}.AppendQueryEntry(OpAnd)}, - {fmt::sprintf( - R"({ - "namespace": "%s", - "limit": -1, - "offset": 0, - "req_total": "disabled", - "explain": false, - "type": "select", - "select_with_rank": false, - "select_filter": [], - "select_functions": [], - "sort": [], - "filters": [ - { - "op": "and", - "cond": "dwithin", - "field": "%s", - "value": [ - [ - -9.2, - -0.145 - ], - 0.581 - ] - } - ], - "merge_queries": [], - "aggregations": [] -})", - geomNs, kFieldNamePointLinearRTree), - Query{geomNs}.DWithin(kFieldNamePointLinearRTree, reindexer::Point{-9.2, -0.145}, 0.581)}, - {fmt::sprintf( - R"({ - "namespace": "%s", - "limit": -1, - "offset": 0, - "req_total": "disabled", - "explain": false, - "type": "select", - "select_with_rank": false, - "select_filter": [], - "select_functions": [], - "sort": [], - "filters": [ - { - "op": "and", - "cond": "gt", - "first_field": "%s", - "second_field": "%s" - } - ], - "merge_queries": [], - "aggregations": [] -})", - default_namespace, kFieldNameStartTime, kFieldNamePackages), - Query(default_namespace).WhereBetweenFields(kFieldNameStartTime, CondGt, kFieldNamePackages)}, - {fmt::sprintf( - R"({ - "namespace": "%s", - "limit": -1, - "offset": 0, - "req_total": "disabled", - "explain": false, - "type": "select", - "select_with_rank": false, - "select_filter": [], - "select_functions": [], - "sort": [], - "filters": [ - { - "op": "and", - "cond": "gt", - "subquery": { - "namespace": "%s", - "limit": 10, - "offset": 10, - "req_total": "disabled", - "select_filter": [], - "sort": [], - "filters": [], - "aggregations": [ - { - "type": "max", - "fields": [ - "%s" - ] - } - ] - }, - "value": 18 - } - ], - "merge_queries": [], - "aggregations": [] -})", - default_namespace, joinNs, kFieldNameAge), - Query(default_namespace).Where(Query(joinNs).Aggregate(AggMax, {kFieldNameAge}).Limit(10).Offset(10), CondGt, {18})}, - {fmt::sprintf( - R"({ - "namespace": "%s", - "limit": -1, - "offset": 0, - "req_total": "disabled", - "explain": false, - "type": "select", - "select_with_rank": false, - "select_filter": [], - "select_functions": [], - "sort": [], - "filters": [ - { - "op": "and", - "cond": "any", - "subquery": { - "namespace": "%s", - "limit": 0, - "offset": 0, - "req_total": "disabled", - "select_filter": [], - "sort": [], - "filters": [ - { - "op": "and", - "cond": "eq", - "field": "%s", - "value": 1 - } - ], - "aggregations": [] - } - } - ], - "merge_queries": [], - "aggregations": [] -})", - default_namespace, joinNs, kFieldNameId), - Query(default_namespace).Where(Query(joinNs).Where(kFieldNameId, CondEq, 1), CondAny, {})}, - {fmt::sprintf( - R"({ - "namespace": "%s", - "limit": -1, - "offset": 0, - "req_total": "disabled", - "explain": false, - "type": "select", - "select_with_rank": false, - "select_filter": [], - "select_functions": [], - "sort": [], - "filters": [ - { - "op": "and", - "cond": "eq", - "field": "%s", - "subquery": { - "namespace": "%s", - "limit": -1, - "offset": 0, - "req_total": "disabled", - "select_filter": [ - "%s" - ], - "sort": [], - "filters": [ - { - "op": "and", - "cond": "set", - "field": "%s", - "value": [ - 1, - 10, - 100 - ] - } - ], - "aggregations": [] - } - } - ], - "merge_queries": [], - "aggregations": [] -})", - default_namespace, kFieldNameName, joinNs, kFieldNameName, kFieldNameId), - Query(default_namespace) - .Where(kFieldNameName, CondEq, Query(joinNs).Select({kFieldNameName}).Where(kFieldNameId, CondSet, {1, 10, 100}))}, - {fmt::sprintf( - R"({ - "namespace": "%s", - "limit": -1, - "offset": 0, - "req_total": "disabled", - "explain": false, - "type": "select", - "select_with_rank": false, - "select_filter": [], - "select_functions": [], - "sort": [], - "filters": [ - { - "op": "and", - "cond": "gt", - "field": "%s", - "subquery": { - "namespace": "%s", - "limit": -1, - "offset": 0, - "req_total": "disabled", - "select_filter": [], - "sort": [], - "filters": [], - "aggregations": [ - { - "type": "avg", - "fields": [ - "%s" - ] - } - ] - } - } - ], - "merge_queries": [], - "aggregations": [] -})", - default_namespace, kFieldNameId, joinNs, kFieldNameId), - Query(default_namespace).Where(kFieldNameId, CondGt, Query(joinNs).Aggregate(AggAvg, {kFieldNameId}))}, - {fmt::sprintf( - R"({ - "namespace": "%s", - "limit": -1, - "offset": 0, - "req_total": "disabled", - "explain": false, - "type": "select", - "select_with_rank": false, - "select_filter": [], - "select_functions": [], - "sort": [], - "filters": [ - { - "op": "and", - "cond": "gt", - "field": "%s", - "subquery": { - "namespace": "%s", - "limit": 0, - "offset": 0, - "req_total": "enabled", - "select_filter": [], - "sort": [], - "filters": [], - "aggregations": [] - } - } - ], - "merge_queries": [], - "aggregations": [] -})", - default_namespace, kFieldNameId, joinNs, kFieldNameId), - Query(default_namespace).Where(kFieldNameId, CondGt, Query(joinNs).ReqTotal())}}; - for (const auto& [dsl, expected, direction] : cases) { - if (std::holds_alternative(expected)) { - const Query& q = std::get(expected); - if (direction & GEN) { - reindexer::WrSerializer ser; - reindexer::prettyPrintJSON(q.GetJSON(), ser, 3); - EXPECT_EQ(ser.Slice(), dsl); - } - if (direction & PARSE) { - Query parsed; - try { - const auto err = parsed.FromJSON(dsl); - ASSERT_TRUE(err.ok()) << err.what() << "\nDSL: " << dsl; - } catch (const Error& err) { - ADD_FAILURE() << "Unexpected error: " << err.what() << "\nDSL: " << dsl; - continue; - } - EXPECT_EQ(parsed, q) << dsl; - } - } else { - const Error& expectedErr = std::get(expected); - Query parsed; - try { - const auto err = parsed.FromJSON(dsl); - ASSERT_TRUE(err.ok()) << err.what(); - ADD_FAILURE() << "Expected error: " << expectedErr.what() << "\nDSL: " << dsl; - } catch (const Error& err) { - EXPECT_EQ(err.what(), expectedErr.what()) << "\nDSL: " << dsl; - } - } - } -} - -static std::vector generateForcedSortOrder(int maxValue, size_t size) { - std::set res; - while (res.size() < size) res.insert(rand() % maxValue); - return {res.cbegin(), res.cend()}; -} - -TEST_F(QueriesApi, ForcedSortOffsetTest) { - FillForcedSortNamespace(); - for (size_t i = 0; i < 100; ++i) { - const auto forcedSortOrder = - generateForcedSortOrder(forcedSortOffsetMaxValue * 1.1, rand() % static_cast(forcedSortOffsetNsSize * 1.1)); - const size_t offset = rand() % static_cast(forcedSortOffsetNsSize * 1.1); - const size_t limit = rand() % static_cast(forcedSortOffsetNsSize * 1.1); - const bool desc = rand() % 2; - // Single column sort - auto expectedResults = ForcedSortOffsetTestExpectedResults(offset, limit, desc, forcedSortOrder, First); - ExecuteAndVerify(Query(forcedSortOffsetNs).Sort(kFieldNameColumnHash, desc, forcedSortOrder).Offset(offset).Limit(limit), - kFieldNameColumnHash, expectedResults); - expectedResults = ForcedSortOffsetTestExpectedResults(offset, limit, desc, forcedSortOrder, Second); - ExecuteAndVerify(Query(forcedSortOffsetNs).Sort(kFieldNameColumnTree, desc, forcedSortOrder).Offset(offset).Limit(limit), - kFieldNameColumnTree, expectedResults); - // Multicolumn sort - const bool desc2 = rand() % 2; - auto expectedResultsMult = ForcedSortOffsetTestExpectedResults(offset, limit, desc, desc2, forcedSortOrder, First); - ExecuteAndVerify(Query(forcedSortOffsetNs) - .Sort(kFieldNameColumnHash, desc, forcedSortOrder) - .Sort(kFieldNameColumnTree, desc2) - .Offset(offset) - .Limit(limit), - kFieldNameColumnHash, expectedResultsMult.first, kFieldNameColumnTree, expectedResultsMult.second); - expectedResultsMult = ForcedSortOffsetTestExpectedResults(offset, limit, desc, desc2, forcedSortOrder, Second); - ExecuteAndVerify(Query(forcedSortOffsetNs) - .Sort(kFieldNameColumnTree, desc2, forcedSortOrder) - .Sort(kFieldNameColumnHash, desc) - .Offset(offset) - .Limit(limit), - kFieldNameColumnHash, expectedResultsMult.first, kFieldNameColumnTree, expectedResultsMult.second); - } -} - -TEST_F(QueriesApi, StrictModeTest) { - FillTestSimpleNamespace(); - - const std::string kNotExistingField = "some_random_name123"; - QueryResults qr; - { - Query query = Query(testSimpleNs).Where(kNotExistingField, CondEmpty, VariantArray{}); - Error err = rt.reindexer->Select(query.Strict(StrictModeNames), qr); - EXPECT_EQ(err.code(), errStrictMode); - qr.Clear(); - err = rt.reindexer->Select(query.Strict(StrictModeIndexes), qr); - EXPECT_EQ(err.code(), errStrictMode); - qr.Clear(); - err = rt.reindexer->Select(query.Strict(StrictModeNone), qr); - ASSERT_TRUE(err.ok()) << err.what(); - Verify(qr, Query(testSimpleNs), *rt.reindexer); - qr.Clear(); - } - - { - Query query = Query(testSimpleNs).Where(kNotExistingField, CondEq, 0); - Error err = rt.reindexer->Select(query.Strict(StrictModeNames), qr); - EXPECT_EQ(err.code(), errStrictMode); - qr.Clear(); - err = rt.reindexer->Select(query.Strict(StrictModeIndexes), qr); - EXPECT_EQ(err.code(), errStrictMode); - qr.Clear(); - err = rt.reindexer->Select(query.Strict(StrictModeNone), qr); - ASSERT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 0); - } -} - -TEST_F(QueriesApi, SQLLeftJoinSerialize) { - const char* condNames[] = {"IS NOT NULL", "=", "<", "<=", ">", ">=", "RANGE", "IN", "ALLSET", "IS NULL", "LIKE"}; - const std::string sqlTemplate = "SELECT * FROM tleft LEFT JOIN tright ON %s.%s %s %s.%s"; - - const std::string tLeft = "tleft"; - const std::string tRight = "tright"; - const std::string iLeft = "ileft"; - const std::string iRight = "iright"; - - auto createQuery = [&sqlTemplate, &condNames](const std::string& leftTable, const std::string& rightTable, const std::string& leftIndex, - const std::string& rightIndex, CondType t) -> std::string { - return fmt::sprintf(sqlTemplate, leftTable, leftIndex, condNames[t], rightTable, rightIndex); - }; - - std::vector> conditions = {{CondLe, CondGe}, {CondGe, CondLe}, {CondLt, CondGt}, {CondGt, CondLt}}; - - for (auto& c : conditions) { - try { - reindexer::Query q(tLeft); - reindexer::Query qr(tRight); - q.LeftJoin(iLeft, iRight, c.first, qr); - - { - std::string sqlQCmp = createQuery(tLeft, tRight, iLeft, iRight, c.first); - reindexer::WrSerializer wrSer; - q.GetSQL(wrSer); - ASSERT_EQ(sqlQCmp, std::string(wrSer.c_str())); - } - - { - std::string sqlQ = createQuery(tLeft, tRight, iLeft, iRight, c.first); - Query qSql = Query::FromSQL(sqlQ); - - reindexer::WrSerializer wrSer; - qSql.GetSQL(wrSer); - ASSERT_EQ(sqlQ, std::string(wrSer.c_str())); - } - { - std::string sqlQ = createQuery(tRight, tLeft, iRight, iLeft, c.second); - Query qSql = Query::FromSQL(sqlQ); - ASSERT_EQ(q.GetJSON(), qSql.GetJSON()); - reindexer::WrSerializer wrSer; - qSql.GetSQL(wrSer); - ASSERT_EQ(sqlQ, std::string(wrSer.c_str())); - } - } catch (const Error& e) { - ASSERT_TRUE(e.ok()) << e.what(); - } - } -} - -TEST_F(QueriesApi, JoinByNotIndexField) { - static constexpr int kItemsCount = 10; - const std::string leftNs = "join_by_not_index_field_left_ns"; - const std::string rightNs = "join_by_not_index_field_right_ns"; - - reindexer::WrSerializer ser; - for (const auto& nsName : {leftNs, rightNs}) { - Error err = rt.reindexer->OpenNamespace(nsName); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(nsName, reindexer::IndexDef{"id", {"id"}, "tree", "int", IndexOpts{}.PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - for (int i = 0; i < kItemsCount; ++i) { - ser.Reset(); - reindexer::JsonBuilder json{ser}; - json.Put("id", i); - if (i % 2 == 1) json.Put("f", i); - json.End(); - Item item = rt.reindexer->NewItem(nsName); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - err = item.FromJSON(ser.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - Upsert(nsName, item); - } - } - reindexer::QueryResults qr; - Error err = rt.reindexer->Select( - Query(leftNs).Strict(StrictModeNames).Join(InnerJoin, Query(rightNs).Where("id", CondGe, 5)).On("f", CondEq, "f"), qr); - ASSERT_TRUE(err.ok()) << err.what(); - const int expectedIds[] = {5, 7, 9}; - ASSERT_EQ(qr.Count(), sizeof(expectedIds) / sizeof(int)); - unsigned i = 0; - for (auto& it : qr) { - Item item = it.GetItem(false); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - VariantArray values = item["id"]; - ASSERT_EQ(values.size(), 1); - EXPECT_EQ(values[0].As(), expectedIds[i++]); - } -} - -TEST_F(QueriesApi, AllSet) { - const std::string nsName = "allset_ns"; - Error err = rt.reindexer->OpenNamespace(nsName); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(nsName, reindexer::IndexDef{"id", {"id"}, "hash", "int", IndexOpts{}.PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - reindexer::WrSerializer ser; - reindexer::JsonBuilder json{ser}; - json.Put("id", 0); - json.Array("array", {0, 1, 2}); - json.End(); - Item item = rt.reindexer->NewItem(nsName); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - err = item.FromJSON(ser.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - Upsert(nsName, item); - Query q{nsName}; - q.Where("array", CondAllSet, {0, 1, 2}); - reindexer::QueryResults qr; - err = rt.reindexer->Select(q, qr); - ASSERT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 1); -} - -TEST_F(QueriesApi, SetByTreeIndex) { - // Execute query with sort and set condition by btree index - const std::string nsName = "set_by_tree_ns"; - constexpr int kMaxID = 20; - Error err = rt.reindexer->OpenNamespace(nsName); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(nsName, reindexer::IndexDef{"id", {"id"}, "tree", "int", IndexOpts{}.PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - setPkFields(nsName, {"id"}); - for (int id = kMaxID; id != 0; --id) { - Item item = rt.NewItem(nsName); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - item["id"] = id; - Upsert(nsName, item); - saveItem(std::move(item), nsName); - } - - Query q{nsName}; - q.Where("id", CondSet, {rand() % kMaxID, rand() % kMaxID, rand() % kMaxID, rand() % kMaxID}).Sort("id", false); - { - QueryResults qr; - ExecuteAndVerifyWithSql(q, qr); - // Expecting no sort index and filtering by index - EXPECT_NE(qr.GetExplainResults().find(",\"sort_index\":\"-\","), std::string::npos); - EXPECT_NE(qr.GetExplainResults().find(",\"method\":\"index\","), std::string::npos); - EXPECT_EQ(qr.GetExplainResults().find("\"scan\""), std::string::npos); - } - - { - // Execute the same query after indexes optimization - AwaitIndexOptimization(nsName); - QueryResults qr; - ExecuteAndVerifyWithSql(q, qr); - // Expecting 'id' as a sort index and filtering by index - EXPECT_NE(qr.GetExplainResults().find(",\"sort_index\":\"id\","), std::string::npos); - EXPECT_NE(qr.GetExplainResults().find(",\"method\":\"index\","), std::string::npos); - EXPECT_EQ(qr.GetExplainResults().find("\"scan\""), std::string::npos); - } -} - -TEST_F(QueriesApi, TestCsvParsing) { - std::vector> fieldsArr{ - {"field0", "", "\"field1\"", "field2", "", "", "", "field3", "field4", "", "", "field5", ""}, - {"", "", "\"field6\"", "field7", "field8", "field9", "", "", "", "", "", "field10", "field11"}, - {"field12", "field13", "\"field14\"", "", "", "", "", "field15", "field16", "", "", "field17", "field18"}, - {"", "", "\"field19\"", "field20", "", "", "", "field21", "", "", "field22", "", ""}, - {"", "", "\"\"", "", "", "", "", "", "", "", "", "", ""}, - {"", "field23", "\"field24\"", "field25", "", "", "", "", "field26", "", "", "", ""}}; - - std::string_view dblQuote = "\"\""; - - for (const auto& fields : fieldsArr) { - std::stringstream ss; - for (size_t i = 0; i < fields.size(); ++i) { - if (i == 2) { - ss << dblQuote << fields[i] << dblQuote; - } else { - ss << fields[i]; - } - if (i < fields.size() - 1) { - ss << ','; - } - } - - auto resFields = reindexer::parseCSVRow(ss.str()); - ASSERT_EQ(resFields.size(), fields.size()); - - for (size_t i = 0; i < fields.size(); ++i) { - ASSERT_EQ(resFields[i], fields[i]); - } - } -} - -TEST_F(QueriesApi, TestCsvProcessingWithSchema) { - using namespace std::string_literals; - std::array nsNames = {"csv_test1", "csv_test2", "csv_test3"}; - - auto openNs = [this](std::string_view nsName) { - Error err = rt.reindexer->OpenNamespace(nsName); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(nsName, reindexer::IndexDef{"id", {"id"}, "hash", "int", IndexOpts{}.PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - }; - - for (auto& nsName : nsNames) { - openNs(nsName); - } - - const std::string jsonschema = R"!( - { - "required": - [ - "id", - "Field0", - "Field1", - "Field2", - "Field3", - "Field4", - "Field5", - "Field6", - "Field7", - "quoted_field", - "join_field", - "Array_level0_id_0", - "Array_level0_id_1", - "Array_level0_id_2", - "Array_level0_id_3", - "Array_level0_id_4", - "Object_level0_id_0", - "Object_level0_id_1", - "Object_level0_id_2", - "Object_level0_id_3", - "Object_level0_id_4" - ], - "properties": - { - "id": { "type": "int" }, - "Field0": { "type": "string" }, - "Field1": { "type": "string" }, - "Field2": { "type": "string" }, - "Field3": { "type": "string" }, - "Field4": { "type": "string" }, - "Field5": { "type": "string" }, - "Field6": { "type": "string" }, - "Field7": { "type": "string" }, - - "quoted_field":{ "type": "string" }, - "join_field": { "type": "int" }, - - "Array_level0_id_0":{"items":{"type": "string"},"type": "array"}, - "Array_level0_id_1":{"items":{"type": "string"},"type": "array"}, - "Array_level0_id_2":{"items":{"type": "string"},"type": "array"}, - "Array_level0_id_3":{"items":{"type": "string"},"type": "array"}, - "Array_level0_id_4":{"items":{"type": "string"},"type": "array"} - - "Object_level0_id_0":{"additionalProperties": false,"type": "object"}, - "Object_level0_id_1":{"additionalProperties": false,"type": "object"}, - "Object_level0_id_2":{"additionalProperties": false,"type": "object"}, - "Object_level0_id_3":{"additionalProperties": false,"type": "object"}, - "Object_level0_id_4":{"additionalProperties": false,"type": "object"} - }, - "additionalProperties": false, - "type": "object" - })!"; - - auto err = rt.reindexer->SetSchema(nsNames[0], jsonschema); - ASSERT_TRUE(err.ok()) << err.what(); - - int fieldNum = 0; - const auto addItem = [&fieldNum, this](int id, std::string_view nsName, bool needJoinField = true) { - reindexer::WrSerializer ser; - { - reindexer::JsonBuilder json{ser}; - json.Put("id", id); - json.Put(fmt::sprintf("Field%d", fieldNum), fmt::sprintf("field_%d_data", fieldNum)); - ++fieldNum; - json.Put(fmt::sprintf("Field%d", fieldNum), fmt::sprintf("field_%d_data", fieldNum)); - ++fieldNum; - json.Put(fmt::sprintf("Field%d", fieldNum), fmt::sprintf("field_%d_data", fieldNum)); - ++fieldNum; - json.Put(fmt::sprintf("Field%d", fieldNum), fmt::sprintf("field_%d_data", fieldNum)); - json.Put("quoted_field", "\"field_with_\"quoted\""); - if (needJoinField) { - json.Put("join_field", id % 3); - } - { - auto data0 = json.Array(fmt::sprintf("Array_level0_id_%d", id)); - for (int i = 0; i < 5; ++i) data0.Put(nullptr, fmt::sprintf("array_data_0_%d", i)); - data0.Put(nullptr, std::string("\"arr_quoted_field(\"this is quoted too\")\"")); - } - { - auto data0 = json.Object(fmt::sprintf("Object_level0_id_%d", id)); - for (int i = 0; i < 5; ++i) data0.Put(fmt::sprintf("Object_%d", i), fmt::sprintf("object_data_0_%d", i)); - data0.Put("Quoted Field lvl0", std::string("\"obj_quoted_field(\"this is quoted too\")\"")); - { - auto data1 = data0.Object(fmt::sprintf("Object_level1_id_%d", id)); - for (int j = 0; j < 5; ++j) data1.Put(fmt::sprintf("objectData1 %d", j), fmt::sprintf("objectData1 %d", j)); - data1.Put("Quoted Field lvl1", std::string("\"obj_quoted_field(\"this is quoted too\")\"")); - } - } - } - Item item = rt.reindexer->NewItem(nsName); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - auto err = item.FromJSON(ser.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - Upsert(nsName, item); - }; - - for (auto& nsName : nsNames) { - for (int i = 0; i < 5; i++) { - addItem(i, nsName, !(i == 4 && nsName == "csv_test1")); // one item for check when item without joined nss - fieldNum -= 2; - } - } - - Query q = Query{nsNames[0]}; - q.Join(LeftJoin, "join_field", "join_field", CondEq, OpAnd, Query(nsNames[1])); - q.Join(LeftJoin, "id", "join_field", CondEq, OpAnd, Query(nsNames[2])); - reindexer::QueryResults qr; - err = rt.reindexer->Select(q, qr); - ASSERT_TRUE(err.ok()) << err.what(); - - for (auto& ordering : std::array{qr.GetSchema(0)->MakeCsvTagOrdering(qr.GetTagsMatcher(0)), - qr.ToLocalQr().MakeCSVTagOrdering(std::numeric_limits::max(), 0)}) { - auto csv2jsonSchema = [&ordering, &qr] { - std::vector res; - for (auto tag : ordering) { - res.emplace_back(qr.GetTagsMatcher(0).tag2name(tag)); - } - res.emplace_back("joined_nss_map"); - return res; - }(); - - reindexer::WrSerializer serCsv, serJson; - for (auto& q : qr) { - err = q.GetCSV(serCsv, ordering); - ASSERT_TRUE(err.ok()) << err.what(); - - err = q.GetJSON(serJson, false); - ASSERT_TRUE(err.ok()) << err.what(); - - gason::JsonParser parserCsv, parserJson; - auto converted = parserCsv.Parse(std::string_view(reindexer::csv2json(serCsv.Slice(), csv2jsonSchema))); - auto orig = parserJson.Parse(serJson.Slice()); - - // for check that all tags related to joined nss from json-result are present in csv-result - std::set checkJoinedNssTags; - for (const auto& node : orig) { - if (std::string_view(node.key).substr(0, 7) == "joined_") { - checkJoinedNssTags.insert(node.key); - } - } - - for (const auto& fieldName : csv2jsonSchema) { - if (fieldName == "joined_nss_map" && !converted[fieldName].empty()) { - EXPECT_EQ(converted[fieldName].value.getTag(), gason::JSON_OBJECT); - for (auto& node : converted[fieldName]) { - EXPECT_TRUE(!orig[node.key].empty()) << "not found joined data: " << node.key; - auto origStr = reindexer::stringifyJson(orig[node.key]); - auto convertedStr = reindexer::stringifyJson(node); - EXPECT_EQ(origStr, convertedStr); - checkJoinedNssTags.erase(node.key); - } - continue; - } - if (converted[fieldName].empty() || orig[fieldName].empty()) { - EXPECT_TRUE(converted[fieldName].empty() && orig[fieldName].empty()) << "fieldName: " << fieldName; - continue; - } - if (orig[fieldName].value.getTag() == gason::JSON_NUMBER) { - EXPECT_EQ(orig[fieldName].As(), converted[fieldName].As()); - } else if (orig[fieldName].value.getTag() == gason::JSON_STRING) { - EXPECT_EQ(orig[fieldName].As(), converted[fieldName].As()); - } else if (orig[fieldName].value.getTag() == gason::JSON_OBJECT || orig[fieldName].value.getTag() == gason::JSON_ARRAY) { - auto origStr = reindexer::stringifyJson(orig[fieldName]); - auto convertedStr = reindexer::stringifyJson(converted[fieldName]); - EXPECT_EQ(origStr, convertedStr); - } - } - - EXPECT_TRUE(checkJoinedNssTags.empty()); - serCsv.Reset(); - serJson.Reset(); - } - } -} - -TEST_F(QueriesApi, ConvertationStringToDoubleDuringSorting) { - using namespace std::string_literals; - const std::string nsName = "ns_convertation_string_to_double_during_sorting"; - Error err = rt.reindexer->OpenNamespace(nsName); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(nsName, reindexer::IndexDef{"id", {"id"}, "hash", "int", IndexOpts{}.PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(nsName, reindexer::IndexDef{"str_idx", {"str_idx"}, "hash", "string", IndexOpts{}}); - ASSERT_TRUE(err.ok()) << err.what(); - - const auto addItem = [&](int id, std::string_view strIdx, std::string_view strFld) { - reindexer::WrSerializer ser; - { - reindexer::JsonBuilder json{ser}; - json.Put("id", id); - json.Put("str_idx", strIdx); - json.Put("str_fld", strFld); - } - Item item = rt.reindexer->NewItem(nsName); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - err = item.FromJSON(ser.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - Upsert(nsName, item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - }; - addItem(0, "123.5", "123.5"); - addItem(1, " 23.5", " 23.5"); - addItem(2, "3.5 ", "3.5 "); - addItem(3, " .5", " .5"); - addItem(4, " .15 ", " .15 "); - addItem(10, "123.5 and something", "123.5 and something"); - addItem(11, " 23.5 and something", " 23.5 and something"); - addItem(12, "3.5 and something", "3.5 and something"); - addItem(13, " .5 and something", " .5 and something"); - - for (const auto& f : {"str_idx"s, "str_fld"s}) { - Query q = Query{nsName}.Where("id", CondLt, 5).Sort("2 * "s + f, false).Strict(StrictModeNames); - reindexer::QueryResults qr; - err = rt.reindexer->Select(q, qr); - ASSERT_TRUE(err.ok()) << err.what(); - int prevId = 10; - for (auto& it : qr) { - ASSERT_TRUE(it.Status().ok()) << it.Status().what(); - const auto item = it.GetItem(); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - const auto currId = item["id"].As(); - EXPECT_LT(currId, prevId); - prevId = currId; - } - } - - for (const auto& f : {"str_idx"s, "str_fld"s}) { - Query q = Query{nsName}.Where("id", CondGt, 5).Sort("2 * "s + f, false).Strict(StrictModeNames); - reindexer::QueryResults qr; - err = rt.reindexer->Select(q, qr); - EXPECT_FALSE(err.ok()); - EXPECT_THAT(err.what(), testing::MatchesRegex("Can't convert '.*' to number")); - } -} - -std::string print(const reindexer::Query& q, reindexer::QueryResults::Iterator& currIt, reindexer::QueryResults::Iterator& prevIt, - const reindexer::QueryResults& qr) { - assertrx(currIt.Status().ok()); - std::string res = '\n' + q.GetSQL() + "\ncurr: "; - reindexer::WrSerializer ser; - const auto err = currIt.GetJSON(ser, false); - assertrx(err.ok()); - res += ser.Slice(); - if (prevIt != qr.end()) { - assertrx(prevIt.Status().ok()); - res += "\nprev: "; - ser.Reset(); - const auto err = prevIt.GetJSON(ser, false); - assertrx(err.ok()); - res += ser.Slice(); - } - return res; -} - -void QueriesApi::sortByNsDifferentTypesImpl(std::string_view fillingNs, const reindexer::Query& qTemplate, const std::string& sortPrefix) { - const auto addItem = [&](int id, auto v) { - reindexer::WrSerializer ser; - { - reindexer::JsonBuilder json{ser}; - json.Put("id", id); - json.Put("value", v); - { - auto obj = json.Object("object"); - obj.Put("nested_value", v); - } - } - Item item = rt.reindexer->NewItem(fillingNs); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - const auto err = item.FromJSON(ser.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - Upsert(fillingNs, item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - }; - for (int id = 0; id < 100; ++id) { - addItem(id, id); - } - for (int id = 100; id < 200; ++id) { - addItem(id, int64_t(id)); - } - for (int id = 200; id < 300; ++id) { - addItem(id, double(id) + 0.5); - } - for (int id = 500; id < 600; ++id) { - addItem(id, std::to_string(id)); - } - for (int id = 600; id < 700; ++id) { - addItem(id, std::to_string(id) + RandString()); - } - for (int id = 700; id < 800; ++id) { - addItem(id, char('a' + (id % 100) / 10) + std::string{char('a' + id % 10)} + RandString()); - } - - const auto check = [&](CondType cond, std::vector values, const char* expectedErr = nullptr) { - for (bool desc : {true, false}) { - for (const char* sortField : {"value", "object.nested_value"}) { - auto q = qTemplate; - q.Where("id", cond, values).Sort(sortPrefix + sortField, desc); - reindexer::QueryResults qr; - const auto err = rt.reindexer->Select(q, qr); - if (expectedErr) { - EXPECT_FALSE(err.ok()) << q.GetSQL(); - EXPECT_EQ(err.what(), expectedErr) << q.GetSQL(); - } else { - ASSERT_TRUE(err.ok()) << err.what() << '\n' << q.GetSQL(); - switch (cond) { - case CondRange: - EXPECT_EQ(qr.Count(), values.at(1) - values.at(0) + 1) << q.GetSQL(); - break; - case CondSet: - case CondEq: - EXPECT_EQ(qr.Count(), values.size()) << q.GetSQL(); - break; - case CondAny: - case CondEmpty: - case CondLike: - case CondDWithin: - case CondLt: - case CondLe: - case CondGt: - case CondGe: - case CondAllSet: - assert(0); - } - int prevId = 10000 * (desc ? 1 : -1); - auto prevIt = qr.end(); - for (auto& it : qr) { - ASSERT_TRUE(it.Status().ok()) << it.Status().what() << print(q, it, prevIt, qr); - const auto item = it.GetItem(); - ASSERT_TRUE(item.Status().ok()) << item.Status().what() << print(q, it, prevIt, qr); - const auto currId = item["id"].As(); - if (desc) { - EXPECT_LT(currId, prevId) << print(q, it, prevIt, qr); - } else { - EXPECT_GT(currId, prevId) << print(q, it, prevIt, qr); - } - prevId = currId; - prevIt = it; - } - } - } - } - }; - // same types - for (int id : {0, 100, 200, 500, 600, 700}) { - check(CondRange, {id, id + 99}); - } - // numeric types - check(CondRange, {0, 299}); - // string - check(CondRange, {500, 799}); - // different types - for (int i = 0; i < 10; ++i) { - check(CondSet, {rand() % 100 + 100, 500 + rand() % 300}, "Not comparable types: string and int64"); - check(CondSet, {rand() % 100 + 200, 500 + rand() % 300}, "Not comparable types: string and double"); - } -} - -TEST_F(QueriesApi, SortByJoinedNsDifferentTypes) { - const std::string nsMain{"sort_by_joined_ns_different_types_main"}; - const std::string nsRight{"sort_by_joined_ns_different_types_right"}; - Error err = rt.reindexer->OpenNamespace(nsMain); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(nsMain, reindexer::IndexDef{"id", {"id"}, "hash", "int", IndexOpts{}.PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->OpenNamespace(nsRight); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(nsRight, reindexer::IndexDef{"id", {"id"}, "hash", "int", IndexOpts{}.PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - for (int id = 0; id < 1000; ++id) { - Item item = rt.reindexer->NewItem(nsMain); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - item["id"] = id; - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - Upsert(nsMain, item); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - } - - sortByNsDifferentTypesImpl(nsRight, Query{nsMain}.InnerJoin("id", "id", CondEq, Query{nsRight}), nsRight + '.'); -} - -TEST_F(QueriesApi, SortByFieldWithDifferentTypes) { - const std::string nsName{"sort_by_field_different_types"}; - Error err = rt.reindexer->OpenNamespace(nsName); - ASSERT_TRUE(err.ok()) << err.what(); - err = rt.reindexer->AddIndex(nsName, reindexer::IndexDef{"id", {"id"}, "hash", "int", IndexOpts{}.PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - sortByNsDifferentTypesImpl(nsName, Query{nsName}, ""); -} - -TEST_F(QueriesApi, SerializeDeserialize) { - Query q = Query(default_namespace).Where(Query(default_namespace), CondAny, {}); - (void)q; - - Query queries[]{ - Query(default_namespace).Where(Query(default_namespace), CondAny, {}), - Query(default_namespace).Where(kFieldNameUuidArr, CondRange, randHeterogeneousUuidArray(2, 2)), - Query(default_namespace) - .WhereComposite(kCompositeFieldUuidName, CondRange, - {VariantArray::Create(nilUuid(), RandString()), VariantArray::Create(randUuid(), RandString())}), - Query(default_namespace).Where(Query(default_namespace).Where(kFieldNameId, CondEq, 10), CondAny, {}), - Query(default_namespace).Not().Where(Query(default_namespace), CondEmpty, {}), - Query(default_namespace).Where(kFieldNameId, CondLt, Query(default_namespace).Aggregate(AggAvg, {kFieldNameId})), - Query(default_namespace) - .Where(kFieldNameGenre, CondSet, Query(joinNs).Select({kFieldNameGenre}).Where(kFieldNameId, CondSet, {10, 20, 30, 40})), - - Query(default_namespace).Where(Query(joinNs).Select({kFieldNameGenre}).Where(kFieldNameId, CondGt, 10), CondSet, {10, 20, 30, 40}), - Query(default_namespace) - .Where(Query(joinNs).Select({kFieldNameGenre}).Where(kFieldNameId, CondGt, 10).Offset(1), CondSet, {10, 20, 30, 40}), - Query(default_namespace) - .Where(Query(joinNs).Where(kFieldNameId, CondGt, 10).Aggregate(AggMax, {kFieldNameGenre}), CondRange, {48, 50}), - Query(default_namespace).Where(Query(joinNs).Where(kFieldNameId, CondGt, 10).ReqTotal(), CondGt, {50}), - Query(default_namespace) - .Debug(LogTrace) - .Where(kFieldNameGenre, CondEq, 5) - .Not() - .Where(Query(default_namespace).Where(kFieldNameGenre, CondEq, 5), CondAny, {}) - .Or() - .Where(kFieldNameGenre, CondSet, Query(joinNs).Select({kFieldNameGenre}).Where(kFieldNameId, CondSet, {10, 20, 30, 40})) - .Not() - .OpenBracket() - .Where(kFieldNameYear, CondRange, {2001, 2020}) - .Or() - .Where(kFieldNameName, CondLike, RandLikePattern()) - .Or() - .Where(Query(joinNs).Where(kFieldNameYear, CondEq, 2000 + rand() % 210), CondEmpty, {}) - .CloseBracket() - .Or() - .Where(kFieldNamePackages, CondSet, RandIntVector(5, 10000, 50)) - .OpenBracket() - .Where(kFieldNameNumeric, CondLt, std::to_string(600)) - .Not() - .OpenBracket() - .Where(kFieldNamePackages, CondSet, RandIntVector(5, 10000, 50)) - .Where(kFieldNameGenre, CondLt, 6) - .Or() - .Where(kFieldNameId, CondLt, Query(default_namespace).Aggregate(AggAvg, {kFieldNameId})) - .CloseBracket() - .Not() - .Where(Query(joinNs).Where(kFieldNameId, CondGt, 10).Aggregate(AggMax, {kFieldNameGenre}), CondRange, {48, 50}) - .Or() - .Where(kFieldNameYear, CondEq, 10) - .CloseBracket(), - - Query(default_namespace) - .Where(kCompositeFieldIdTemp, CondEq, Query(default_namespace).Select({kCompositeFieldIdTemp}).Where(kFieldNameId, CondGt, 10)), - Query(default_namespace) - .Where(Query(default_namespace).Select({kCompositeFieldUuidName}).Where(kFieldNameId, CondGt, 10), CondRange, - {VariantArray::Create(nilUuid(), RandString()), VariantArray::Create(randUuid(), RandString())}), - Query(default_namespace) - .Where(Query(default_namespace).Select({kCompositeFieldAgeGenre}).Where(kFieldNameId, CondGt, 10).Limit(10), CondLe, - {Variant(VariantArray::Create(rand() % 50, rand() % 50))}), - }; - for (Query& q : queries) { - reindexer::WrSerializer wser; - q.Serialize(wser); - reindexer::Serializer rser(wser.Slice()); - const auto deserializedQuery = Query::Deserialize(rser); - EXPECT_EQ(q, deserializedQuery) << "Origin query:\n" << q.GetSQL() << "\nDeserialized query:\n" << deserializedQuery.GetSQL(); - } -} diff --git a/cpp_src/gtests/tests/unit/replication_config_tests.cc b/cpp_src/gtests/tests/unit/replication_config_tests.cc deleted file mode 100644 index acd8704b1..000000000 --- a/cpp_src/gtests/tests/unit/replication_config_tests.cc +++ /dev/null @@ -1,716 +0,0 @@ -#include -#include -#include -#include - -#include "core/cjson/jsonbuilder.h" -#include "core/dbconfig.h" -#include "core/item.h" -#include "core/keyvalue/key_string.h" -#include "core/keyvalue/variant.h" -#include "core/namespace/namespacestat.h" -#include "core/queryresults/joinresults.h" -#include "core/reindexer.h" -#include "core/type_consts.h" - -#include "tools/errors.h" -#include "tools/fsops.h" -#include "tools/logger.h" -#include "tools/serializer.h" - -#include "gtest/gtest.h" - -#include - -#define __FILENAME__ (strrchr("/" __FILE__, '/') + 1) -#define GTEST_TRACE_SCOPE(SCOPE_DESCRIPTION) testing::ScopedTrace trace(__FILE__, __LINE__, SCOPE_DESCRIPTION) -#define GTEST_TRACE_FUNCTION() GTEST_TRACE_SCOPE(__FUNCTION__) - -using namespace reindexer; - -class ReplicationConfigTests : public ::testing::Test { -public: - using ItemType = typename reindexer::Reindexer::ItemT; - using QueryResultsType = typename reindexer::Reindexer::QueryResultsT; - enum class ConfigType { File, Namespace }; - - const std::chrono::milliseconds kReplicationConfLoadDelay = std::chrono::milliseconds(1200); - const std::string kSimpleReplConfigStoragePath = fs::JoinPath(fs::GetTempDir(), "reindex/simple_replicationConf_tests/"); - const std::string kStoragePath = kSimpleReplConfigStoragePath; - const std::string kBuiltin = "builtin://" + kStoragePath; - const std::string kConfigNs = "#config"; - const std::string kReplicationConfigFilename = "replication.conf"; - const std::string kReplFilePath = reindexer::fs::JoinPath(kStoragePath, kReplicationConfigFilename); - - // defining test data - const reindexer::ReplicationConfigData initialReplConf{0, 2}; - const reindexer::ReplicationConfigData correctReplConf{10, 2}; - const reindexer::ReplicationConfigData updatedReplConf{100, 3}; - - const reindexer::ReplicationConfigData invalidReplConf{-10, 2}; - const reindexer::ReplicationConfigData invalidReplConf1000{1000, 2}; - const reindexer::ReplicationConfigData fallbackReplConf{0, 2}; - - void SetUp() override { fs::RmDirAll(kStoragePath); } - void TearDown() override {} - - void WriteConfigFile(const std::string& path, const std::string& configYaml) { - std::ofstream file(path, std::ios_base::trunc); - file << configYaml; - file.flush(); - } - - template - void WriteConfigToFile(const ConfigT& newConf, const std::string& path) { - WrSerializer ser; - newConf.GetYAML(ser); - - WriteConfigFile(path, std::string(ser.Slice())); - } - - bool CheckReplicationConfigData(const Error& errParse, const ReplicationConfigData& actualConfig, - const ReplicationConfigData& expectedConfig) { - GTEST_TRACE_FUNCTION(); - - EXPECT_EQ(expectedConfig.Validate(), actualConfig.Validate()) << "parse result: " << errParse.what(); - EXPECT_EQ(expectedConfig, actualConfig); - return expectedConfig == actualConfig; - } - - bool CheckReplicationConfigFile(const std::string& storagePath, const ReplicationConfigData& expectedConf, - bool expectErrorParseYAML = false) { - GTEST_TRACE_FUNCTION(); - - std::string replConfYaml; - auto read = fs::ReadFile(fs::JoinPath(storagePath, kReplicationConfigFilename), replConfYaml); - EXPECT_GT(read, 0) << "Repl config file read error"; - if (read < 0) return false; - ReplicationConfigData replConf; - auto errParse = replConf.FromYAML(replConfYaml); - - if (expectErrorParseYAML) { - EXPECT_EQ(errParse.code(), errParseYAML) << errParse.what(); - return errParse.code() == errParseYAML; - } else - return CheckReplicationConfigData(errParse, replConf, expectedConf); - } - - bool CheckReplicationConfigNS(reindexer::Reindexer& rx, const ReplicationConfigData& expectedConf, bool expectErrorParseJSON = false) { - GTEST_TRACE_FUNCTION(); - QueryResultsType results; - auto err = rx.Select(Query(kConfigNs).Where("type", CondEq, "replication"), results); - EXPECT_TRUE(err.ok()) << err.what(); - - ReplicationConfigData replConf; - Error errParse{errNotFound, "Not found"}; - for (auto it : results) { - WrSerializer ser; - - err = it.GetJSON(ser, false); - EXPECT_TRUE(err.ok()) << err.what(); - try { - gason::JsonParser parser; - gason::JsonNode configJson = parser.Parse(ser.Slice()); - auto confType = configJson["type"].As(); - if (confType == "replication") { - auto& replConfigJson = configJson["replication"]; - errParse = replConf.FromJSON(replConfigJson); - break; - } - } catch (const Error&) { - assert(false); - } - } - - if (expectErrorParseJSON) { - EXPECT_EQ(errParse.code(), errParseJson) << errParse.what(); - return errParse.code() == errParseJson; - } else { - return CheckReplicationConfigData(errParse, replConf, expectedConf); - } - } - - bool CheckNamespacesReplicationConfig(reindexer::Reindexer& rx, const ReplicationConfigData& expected) { - GTEST_TRACE_FUNCTION(); - { - GTEST_TRACE_SCOPE("Checking #memstats.server_id for non-system namespaces."); - QueryResultsType results; - auto query = "select name, replication.server_id from #memstats"; - auto err = rx.Select(query, results); - EXPECT_TRUE(err.ok()) << err.what(); - for (auto it : results) { - WrSerializer ser; - err = it.GetJSON(ser, false); - EXPECT_TRUE(err.ok()) << err.what(); - try { - gason::JsonParser parser; - gason::JsonNode memstatJson = parser.Parse(ser.Slice()); - auto nsName = memstatJson["name"].As(); - auto& replJson = memstatJson["replication"]; - auto namespaceServerId = replJson["server_id"].As(-1); - EXPECT_EQ(namespaceServerId, expected.serverID) << "Failed for Namespace \"" << nsName << "\""; - } catch (const Error& error) { - EXPECT_TRUE(error.ok()) << error.what(); - } - } - } - - { - GTEST_TRACE_SCOPE("Checking ReplState.nsVersion.server_id for system namespaces"); - std::vector nsDefs; - auto err = rx.EnumNamespaces(nsDefs, reindexer::EnumNamespacesOpts().OnlyNames()); - EXPECT_TRUE(err.ok()) << err.what(); - for (auto& nsDef : nsDefs) { - if (nsDef.name.empty() || (nsDef.name[0] != '#')) { - // we will perform checks only for well defined system namespaces - continue; - } - reindexer::ReplicationStateV2 replState; - auto error = rx.GetReplState(nsDef.name, replState); - EXPECT_TRUE(error.ok()) << error.what(); - EXPECT_EQ(replState.nsVersion.Server(), expected.serverID) << "Check failed for NS \"" << nsDef.name << "\""; - } - } - return true; - } - - template - void SetReplicationConfigNS(reindexer::Reindexer& rx, const ReplicationConfigData& config) { - GTEST_TRACE_FUNCTION(); - upsertConfigItemFromObject(rx, "replication", config); - auto err = rx.Commit(kConfigNs); - EXPECT_TRUE(err.ok()) << err.what(); - } - - template - void SetJSONtoConfigNS(reindexer::Reindexer& rx, std::string_view stringJSON) { - GTEST_TRACE_FUNCTION(); - upsertConfigItemFromJSON(rx, stringJSON); - auto err = rx.Commit(kConfigNs); - EXPECT_TRUE(err.ok()) << err.what(); - } - -protected: - template - void upsertConfigItemFromObject(reindexer::Reindexer& rx, std::string_view type, const ValueT& object) { - GTEST_TRACE_FUNCTION(); - - WrSerializer ser; - { - JsonBuilder jb(ser); - jb.Put("type", type); - { - auto objBuilder = jb.Object(type); - object.GetJSON(objBuilder); - } - } - - upsertConfigItemFromJSON(rx, ser.Slice()); - } - - template - void upsertConfigItemFromJSON(reindexer::Reindexer& rx, const std::string_view stringJSON) { - GTEST_TRACE_FUNCTION(); - - auto item = rx.NewItem(kConfigNs); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - auto err = item.FromJSON(stringJSON); - ASSERT_TRUE(err.ok()) << err.what(); - err = rx.Upsert(kConfigNs, item); - if constexpr (ExpectErrorOnUpsert) { - ASSERT_FALSE(err.ok()) << err.what(); - } else { - ASSERT_TRUE(err.ok()) << err.what(); - } - } -}; - -TEST(DBConfigTests, ReadValidJsonConfiguration) { - DBConfigProvider db; - - constexpr std::string_view jsonStr = R"json({ - "profiling":{ - "queriesperfstats": false, - "queries_threshold_us": 10, - "perfstats": false, - "memstats": true, - "activitystats": false, - "long_queries_logging":{ - "select":{ - "threshold_us": -1, - "normalized": false - }, - "update_delete":{ - "threshold_us": -1, - "normalized": false - }, - "transaction":{ - "threshold_us": -1, - "avg_step_threshold_us": -1 - } - } - }, - "namespaces":[ - { - "namespace":"*", - "log_level":"none", - "lazyload":false, - "unload_idle_threshold":0, - "join_cache_mode":"off", - "start_copy_policy_tx_size":10000, - "copy_policy_multiplier":5, - "tx_size_to_always_copy":100000, - "optimization_timeout_ms":800, - "optimization_sort_workers":4, - "wal_size":4000000, - "min_preselect_size": 1000, - "max_preselect_size": 1000, - "max_preselect_part":0.1, - "index_updates_counting_mode":false, - "sync_storage_flush_limit":25000 - } - ], - "replication":{ - "server_id": 10, - "cluster_id": 11, - }, - "async_replication":{ - "role": "none", - "log_level":"none", - "sync_threads":4, - "syncs_per_thread":2, - "online_updates_timeout_sec":20, - "sync_timeout_sec":60, - "retry_sync_interval_msec":30000, - "enable_compression":true, - "batching_routines_count": 100, - "force_sync_on_logic_error": false, - "force_sync_on_wrong_data_hash": false, - "max_wal_depth_on_force_sync": 1000, - "namespaces":[] - "nodes": [] - } - })json"; - - gason::JsonParser parser; - gason::JsonNode configJson = parser.Parse(jsonStr); - - auto err = db.FromJSON(configJson); - EXPECT_TRUE(err.ok()) << err.what(); -} - -TEST(DBConfigTests, ReadInvalidJsonConfiguration) { - DBConfigProvider db; - - constexpr std::string_view jsonStr = R"json({ - "profiling":{ - "queriesperfstats":"false_", - "queries_threshold_us":"10_", - "perfstats": "false_", - "memstats": "true_", - "activitystats": "false_", - "long_queries_logging":{ - "select":{ - "threshold_us": "-1_", - "normalized": "_false" - }, - "update_delete":{ - "threshold_us": "-1_", - "normalized": "+false" - }, - "transaction":{ - "threshold_us": "-1_", - "avg_step_threshold_us": "test -1" - } - } - }, - "namespaces":[ - { - "namespace":"*", - "log_level":"none", - "lazyload":false, - "unload_idle_threshold":0, - "join_cache_mode":"off", - "start_copy_policy_tx_size":10000, - "copy_policy_multiplier":5, - "tx_size_to_always_copy":100000, - "optimization_timeout_ms":800, - "optimization_sort_workers":"4_", - "wal_size":4000000, - "min_preselect_size":1000, - "max_preselect_size":"1000_", - "max_preselect_part":0.1, - "index_updates_counting_mode":false, - "sync_storage_flush_limit":25000 - } - ], - "replication":{ - "server_id": "0test", - "cluster_id": "1test", - }, - "async_replication":{ - "role": "none", - "sync_threads":4, - "syncs_per_thread":2, - "online_updates_timeout_sec":20, - "sync_timeout_sec":60, - "retry_sync_interval_msec":30000, - "enable_compression":true, - "batching_routines_count":"100test", - "force_sync_on_logic_error": false, - "force_sync_on_wrong_data_hash": false, - "max_wal_depth_on_force_sync":1000, - "namespaces":[] - "nodes": [] - } - })json"; - - gason::JsonParser parser; - gason::JsonNode configJson = parser.Parse(jsonStr); - - auto err = db.FromJSON(configJson); - EXPECT_FALSE(err.ok()) << err.what(); -} - -/** @brief Tests replication.conf readings at db load or startup - * @details Test plan: - * Continuous tests set, after step 0 working with existing db folder - * 0. Warm up: create and initialize new DB in kStoragePath, close db - * 1. Write correct replication.conf, connect to DB, verify readings - shall success - * 2. Write invalid replication.conf, connect to DB - shall fail with error.code() = errParams - * 3. Write invalid1000 replication.conf, connect to DB - shall fail with error.code() = errParams - * 4. Write invalid replication.conf with non-numeric values, connect to DB - shall fail with error error.code() = errParseYAML - * 5. Write correct updated replication.conf, connect to DB, verify readings - shall success - */ -TEST_F(ReplicationConfigTests, ReadReplicationConfAtStartup) { - // 0. Warm up: - { - reindexer::Reindexer rt; - Error err = rt.Connect(kBuiltin); - ASSERT_TRUE(err.ok()) << err.what(); - } - - // 1. For existing DB: Start with correct replication.conf and connect. CHECK readings. - { - GTEST_TRACE_SCOPE("Starting with correct replication.conf server_id = " + std::to_string(correctReplConf.serverID)); - - WriteConfigToFile(correctReplConf, kReplFilePath); - CheckReplicationConfigFile(kStoragePath, correctReplConf); - - reindexer::Reindexer rt; - Error err = rt.Connect(kBuiltin); - ASSERT_TRUE(err.ok()) << err.what(); - CheckReplicationConfigNS(rt, correctReplConf); - CheckNamespacesReplicationConfig(rt, correctReplConf); - CheckReplicationConfigFile(kStoragePath, correctReplConf); - } - - // 2. Write invalid replication.conf, connect to DB - shall fail with error.code() = errParams - { - GTEST_TRACE_SCOPE("Starting with invalid replication.conf server_id = " + std::to_string(invalidReplConf.serverID)); - WriteConfigToFile(invalidReplConf, kReplFilePath); - CheckReplicationConfigFile(kStoragePath, invalidReplConf); - - reindexer::Reindexer rt; - Error err = rt.Connect(kBuiltin); - ASSERT_EQ(err.code(), errParams) << err.what(); - } - - // 3. Write invalid1000 replication.conf, connect to DB - shall fail with error.code() = errParams - { - GTEST_TRACE_SCOPE("Starting with invalid replication.conf server_id = " + std::to_string(invalidReplConf1000.serverID)); - WriteConfigToFile(invalidReplConf1000, kReplFilePath); - - reindexer::Reindexer rt; - Error err = rt.Connect(kBuiltin); - ASSERT_EQ(err.code(), errParams) << err.what(); - } - - // 4. Write invalid replication.conf with non-numeric values, connect to DB - shall fail with error error.code() = errParseYAML - { - GTEST_TRACE_SCOPE("Starting with invalid replication.server_id = \"invalid\""); - WriteConfigFile(kReplFilePath, - "server_id: invalid\n" - "cluster_id: invalid\n"); - - reindexer::Reindexer rt; - Error err = rt.Connect(kBuiltin); - ASSERT_EQ(err.code(), errParseYAML) << err.what(); - } - - // 5. Write correct updated replication.conf, connect to DB, verify readings - shall success - { - GTEST_TRACE_SCOPE("Starting with updated replication.conf server_id = " + std::to_string(updatedReplConf.serverID)); - WriteConfigToFile(updatedReplConf, kReplFilePath); - CheckReplicationConfigFile(kStoragePath, updatedReplConf); - - reindexer::Reindexer rt; - Error err = rt.Connect(kBuiltin); - ASSERT_TRUE(err.ok()) << err.what(); - CheckReplicationConfigNS(rt, updatedReplConf); - CheckNamespacesReplicationConfig(rt, updatedReplConf); - CheckReplicationConfigFile(kStoragePath, updatedReplConf); - } -} - -/** @brief Tests invalid server_id in #config.replication and replication.conf when replication.conf always present - * @details Test plan: - * Continuous tests set - * 0. Warm up: create and initialize new DB in kStoragePath, close - * 1. Write correct replication.conf from initialReplConf, CONNECT, verify - shall success - * All cases from 2 shall be done on existing db connection without restarts - * 2. Write correct replication.conf from correctReplConf, CHECK readings - internal state and replication.conf shall be changed - * 3. Write invalid replication.conf from invalidReplConf, CHECK readings - internal state and replication.conf shall NOT be changed - * 4. Write invalid replication.conf from invalidReplConf1000, CHECK readings - internal state and replication.conf shall NOT be changed - * 5. Write invalid replication.conf with non-numeric values, CHECK readings - internal state and replication.conf shall NOT be changed - */ -TEST_F(ReplicationConfigTests, ReplicationConfChangedAtRuntime) { - // 0. Warm up: create and initialize new DB in kStoragePath - { - reindexer::Reindexer rt; - Error err = rt.Connect(kBuiltin); - ASSERT_TRUE(err.ok()) << err.what(); - } - - // 1. Write correct replication.conf from initialReplConf, CONNECT, verify - shall success - reindexer::Reindexer rt; - { - GTEST_TRACE_SCOPE("Starting with replication.conf server_id = " + std::to_string(initialReplConf.serverID)); - WriteConfigToFile(initialReplConf, kReplFilePath); - CheckReplicationConfigFile(kStoragePath, initialReplConf); - Error err = rt.Connect(kBuiltin); - ASSERT_TRUE(err.ok()) << err.what(); - CheckReplicationConfigNS(rt, initialReplConf); - CheckNamespacesReplicationConfig(rt, initialReplConf); - } - - // All cases from 2 shall be done on existing db connection without restarts - // 2. Write correct replication.conf from correctReplConf, - { - GTEST_TRACE_SCOPE("Writing correct replication.conf server_id = " + std::to_string(correctReplConf.serverID)); - WriteConfigToFile(correctReplConf, kReplFilePath); - std::this_thread::sleep_for(kReplicationConfLoadDelay); - CheckReplicationConfigFile(kStoragePath, correctReplConf); - CheckReplicationConfigNS(rt, correctReplConf); - CheckNamespacesReplicationConfig(rt, correctReplConf); - } - - // 3. Write invalid replication.conf from invalidReplConf, - { - GTEST_TRACE_SCOPE("Writing invalid replication.conf server_id = " + std::to_string(invalidReplConf.serverID)); - WriteConfigToFile(invalidReplConf, kReplFilePath); - std::this_thread::sleep_for(kReplicationConfLoadDelay); - CheckReplicationConfigFile(kStoragePath, invalidReplConf); - CheckReplicationConfigNS(rt, correctReplConf); - CheckNamespacesReplicationConfig(rt, correctReplConf); - } - - // 4. Write invalid replication.conf from invalidReplConf1000, CHECK readings - internal state and replication.conf shall NOT be - // changed - { - GTEST_TRACE_SCOPE("Writing invalid replication.conf server_id = " + std::to_string(invalidReplConf1000.serverID)); - WriteConfigToFile(invalidReplConf1000, kReplFilePath); - std::this_thread::sleep_for(kReplicationConfLoadDelay); - CheckReplicationConfigFile(kStoragePath, invalidReplConf1000); - CheckReplicationConfigNS(rt, correctReplConf); - CheckNamespacesReplicationConfig(rt, correctReplConf); - } - - // 5. Write invalid replication.conf with non-numeric values, CHECK readings - internal state and replication.conf shall NOT be changed - { - GTEST_TRACE_SCOPE("Writing invalid replication.conf server_id = \"invalid\""); - const bool kExpectErrorParseYAML = true; - WriteConfigFile(kReplFilePath, - "server_id: invalid\n" - "cluster_id: invalid\n"); - std::this_thread::sleep_for(kReplicationConfLoadDelay); - CheckReplicationConfigFile(kStoragePath, invalidReplConf, kExpectErrorParseYAML); - CheckReplicationConfigNS(rt, correctReplConf); - CheckNamespacesReplicationConfig(rt, correctReplConf); - } -} - -/** @brief Tests updates to server_id in #config.replication when replication.conf always present - * @details Test plan: - * Continuous tests set - * 0. Warm up: create and initialize new DB in kStoragePath, close - * All cases from 1 shall be done on existing db connection without restarts - * 1. Write correct replication.conf from initialReplConf, CHECK readings - * - internal state and replication.conf shall be changed - * 2. Set correct replication data to #config from correctReplConf, CHECK readings - * - internal state and replication.conf shall be changed - * 3. Set invalid replication data to #config from invalidReplConf, CHECK readings - * - internal state and replication.conf shall NOT be changed - * 4. Set invalid replication data to #config from invalidReplConf1000, CHECK readings - * - internal state and replication.conf shall NOT be changed - * 5. Set invalid replication data to #config with non-numeric values, CHECK readings - * - internal state and replication.conf shall NOT be changed - */ -TEST_F(ReplicationConfigTests, SetServerIdToConfigWithReplicationConf) { - const bool kExpectErrorOnUpsert = true; - reindexer::Reindexer rt; - Error err = rt.Connect(kBuiltin); - ASSERT_TRUE(err.ok()) << err.what(); - - // 1. Write correct replication.conf from initialReplConf, CHECK readings - // - internal state and replication.conf shall be changed - { - GTEST_TRACE_SCOPE("Writing replication.conf server_id = " + std::to_string(initialReplConf.serverID)); - WriteConfigToFile(initialReplConf, kReplFilePath); - std::this_thread::sleep_for(kReplicationConfLoadDelay); - CheckReplicationConfigNS(rt, initialReplConf); - CheckReplicationConfigFile(kStoragePath, initialReplConf); - CheckNamespacesReplicationConfig(rt, initialReplConf); - } - - // 2. Set correct replication data to #config from correctReplConf, CHECK readings - // - internal state and replication.conf shall be changed - { - GTEST_TRACE_SCOPE("Setting replication.server_id = " + std::to_string(correctReplConf.serverID) + " to #config"); - SetReplicationConfigNS(rt, correctReplConf); - CheckReplicationConfigNS(rt, correctReplConf); - std::this_thread::sleep_for(kReplicationConfLoadDelay); - CheckReplicationConfigFile(kStoragePath, correctReplConf); - CheckNamespacesReplicationConfig(rt, correctReplConf); - } - - // 3. Set invalid replication data to #config from invalidReplConf, CHECK readings - // - internal state and replication.conf shall NOT be changed - { - GTEST_TRACE_SCOPE("Setting invalid replication.server_id = " + std::to_string(invalidReplConf.serverID) + " to #config"); - SetReplicationConfigNS(rt, invalidReplConf); - CheckReplicationConfigNS(rt, invalidReplConf); - std::this_thread::sleep_for(kReplicationConfLoadDelay); - CheckReplicationConfigFile(kStoragePath, correctReplConf); - CheckNamespacesReplicationConfig(rt, correctReplConf); - } - - // 4. Set invalid replication data to #config from invalidReplConf1000, CHECK readings - // - internal state and replication.conf shall NOT be changed - { - GTEST_TRACE_SCOPE("Setting invalid replication.server_id = " + std::to_string(invalidReplConf1000.serverID) + " to #config"); - SetReplicationConfigNS(rt, invalidReplConf1000); - CheckReplicationConfigNS(rt, invalidReplConf1000); - std::this_thread::sleep_for(kReplicationConfLoadDelay); - CheckReplicationConfigFile(kStoragePath, correctReplConf); - CheckNamespacesReplicationConfig(rt, correctReplConf); - } - - // 5. Set invalid replication data to #config with non-numeric values, CHECK readings - // - internal state and replication.conf shall NOT be changed - { - using namespace std::string_view_literals; - GTEST_TRACE_SCOPE("Setting invalid replication.server_id = \"invalid\" to #config"); - SetJSONtoConfigNS(rt, - R"json({ - "type":"replication", - "replication":{ - "server_id":"invalid", - "cluster_id":"invalid", - } - })json"sv); - const bool kExpectErrorParseJson = true; - CheckReplicationConfigNS(rt, invalidReplConf1000, kExpectErrorParseJson); - std::this_thread::sleep_for(kReplicationConfLoadDelay); - CheckReplicationConfigFile(kStoragePath, correctReplConf); - CheckNamespacesReplicationConfig(rt, correctReplConf); - } -} - -/** @brief Checks invalid server_id in #config when replication.conf does not exist - * @details Test plan: - * Continuous tests set - * 1. Start with default db, set correct server_id, set invalid server_id, shutdown - * All cases from 2 shall be done on existing db - * 2. Start with invalid server_id, check fallback server_id present, set another invalid server_id, shutdown - * 3. Start with invalid server_id, check fallback server_id present, restore correct server_id, shutdown - * 4. Start with correct sever_id, verify - */ -TEST_F(ReplicationConfigTests, SetServerIDToConfigRestartWithoutReplicationConf) { - const bool kExpectErrorOnUpsert = true; - - // 1. Start with default db, set correct server_id, set invalid server_id, shutdown - { - reindexer::Reindexer rt; - { - GTEST_TRACE_SCOPE("Setting correct replication.server_id = " + std::to_string(correctReplConf.serverID) + " to #config"); - Error err = rt.Connect(kBuiltin); - ASSERT_TRUE(err.ok()) << err.what(); - - SetReplicationConfigNS(rt, correctReplConf); - CheckReplicationConfigNS(rt, correctReplConf); - std::this_thread::sleep_for(kReplicationConfLoadDelay); - ASSERT_EQ(fs::Stat(kReplFilePath), fs::StatError) << "replication.conf shall not exist when present."; - CheckNamespacesReplicationConfig(rt, correctReplConf); - } - { - GTEST_TRACE_SCOPE("Setting invalid replication.server_id = " + std::to_string(invalidReplConf.serverID) + " to #config"); - - SetReplicationConfigNS(rt, invalidReplConf); - CheckReplicationConfigNS(rt, invalidReplConf); - std::this_thread::sleep_for(kReplicationConfLoadDelay); - ASSERT_EQ(fs::Stat(kReplFilePath), fs::StatError) << "replication.conf shall not exist when present."; - CheckNamespacesReplicationConfig(rt, correctReplConf); - } - } - - // 2. Start with invalid server_id, check fallback server_id present, set another invalid server_id, shutdown - { - reindexer::Reindexer rt; - { - GTEST_TRACE_SCOPE("Reloading with invalid replication.server_id = " + std::to_string(invalidReplConf.serverID) + " to #config"); - ASSERT_TRUE(fs::Stat(kReplFilePath) == fs::StatError) << "replication.conf shall not exist when present."; - - Error err = rt.Connect(kBuiltin); - ASSERT_TRUE(err.ok()) << err.what(); - CheckReplicationConfigNS(rt, invalidReplConf); - CheckNamespacesReplicationConfig(rt, fallbackReplConf); - } - - { - GTEST_TRACE_SCOPE("Setting invalid replication.server_id = " + std::to_string(invalidReplConf1000.serverID) + " to #config"); - SetReplicationConfigNS(rt, invalidReplConf1000); - std::this_thread::sleep_for(kReplicationConfLoadDelay); - CheckReplicationConfigNS(rt, invalidReplConf1000); - ASSERT_EQ(fs::Stat(kReplFilePath), fs::StatError) << "replication.conf shall not exist when present."; - CheckNamespacesReplicationConfig(rt, fallbackReplConf); - } - } - - // 3. Start with invalid server_id, check fallback server_id present, restore correct server_id, shutdown - { - reindexer::Reindexer rt; - { - GTEST_TRACE_SCOPE("Reloading with invalid replication.server_id = " + std::to_string(invalidReplConf1000.serverID) + - " in #config"); - ASSERT_TRUE(fs::Stat(kReplFilePath) == fs::StatError) << "replication.conf shall not exist when present."; - - Error err = rt.Connect(kBuiltin); - ASSERT_TRUE(err.ok()) << err.what(); - CheckReplicationConfigNS(rt, invalidReplConf1000); - ASSERT_EQ(fs::Stat(kReplFilePath), fs::StatError) << "replication.conf shall not exist when present."; - CheckNamespacesReplicationConfig(rt, fallbackReplConf); - } - - { - GTEST_TRACE_SCOPE("Setting correct replication.server_id = " + std::to_string(correctReplConf.serverID) + " to #config"); - SetReplicationConfigNS(rt, correctReplConf); - CheckReplicationConfigNS(rt, correctReplConf); - ASSERT_EQ(fs::Stat(kReplFilePath), fs::StatError) << "replication.conf shall not exist when present."; - CheckNamespacesReplicationConfig(rt, correctReplConf); - } - } - - // 4. Start with correct sever_id, verify - { - GTEST_TRACE_SCOPE("Reloading with correct replication.server_id = " + std::to_string(correctReplConf.serverID) + " in #config"); - ASSERT_EQ(fs::Stat(kReplFilePath), fs::StatError) << "replication.conf shall not exist when present."; - - reindexer::Reindexer rt; - Error err = rt.Connect(kBuiltin); - ASSERT_TRUE(err.ok()) << err.what(); - CheckReplicationConfigNS(rt, correctReplConf); - ASSERT_EQ(fs::Stat(kReplFilePath), fs::StatError) << "replication.conf shall not exist when present."; - CheckNamespacesReplicationConfig(rt, correctReplConf); - } -} diff --git a/cpp_src/gtests/tests/unit/replication_test.cc b/cpp_src/gtests/tests/unit/replication_test.cc deleted file mode 100644 index 63eb540b5..000000000 --- a/cpp_src/gtests/tests/unit/replication_test.cc +++ /dev/null @@ -1,600 +0,0 @@ -#include -#include -#include "cluster/stats/replicationstats.h" -#include "replication_load_api.h" -#include "wal/walrecord.h" - -TEST_F(ReplicationLoadApi, Base) { - // Check replication in multithread mode with data writes and server restarts - std::atomic leaderWasRestarted = false; - const std::string kNsSome = "some"; - const std::string kNsSome1 = "some1"; - InitNs(); - stop = false; - SetWALSize(masterId_, 100000, kNsSome); - WaitSync(kNsSome); - WaitSync(kNsSome1); - - FillData(1000); - - std::thread destroyer([this, &leaderWasRestarted]() { - int count = 0; - while (!stop) { - if (!(count % 30)) { - auto restartId = rand() % kDefaultServerCount; - RestartServer(restartId); - if (restartId == masterId_) { - leaderWasRestarted = true; - } - } - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - } - }); - - std::thread statsReader([this]() { - while (!stop) { - GetReplicationStats(masterId_); - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - } - }); - - SetWALSize(masterId_, 50000, kNsSome); - for (size_t i = 0; i < 2; ++i) { - if (i % 3 == 0) DeleteFromMaster(); - SetWALSize(masterId_, (int64_t(i) + 1) * 25000, kNsSome1); - FillData(1000); - GetReplicationStats(masterId_); - SetWALSize(masterId_, (int64_t(i) + 1) * 50000, kNsSome); - SimpleSelect(0); - } - - SetWALSize(masterId_, 50000, "some1"); - - stop = true; - destroyer.join(); - statsReader.join(); - - ForceSync(); - WaitSync(kNsSome); - WaitSync(kNsSome1); - - std::this_thread::sleep_for(std::chrono::seconds(1)); // Add some time for stats stabilization - - // Check final stats - auto stats = GetReplicationStats(masterId_); - EXPECT_EQ(stats.logLevel, LogTrace); - // Validate force/wal syncs - if (leaderWasRestarted) { - EXPECT_GE(stats.forceSyncs.count + stats.walSyncs.count, 2 * (kDefaultServerCount - 1)) - << "Force syncs: " << stats.forceSyncs.count << "; WAL syncs: " << stats.walSyncs.count; - } else { - EXPECT_GE(stats.walSyncs.count, kDefaultServerCount - 1); - EXPECT_GT(stats.walSyncs.avgTimeUs, 0); - EXPECT_GT(stats.walSyncs.maxTimeUs, 0); - } - if (stats.forceSyncs.count > 0) { - EXPECT_GT(stats.forceSyncs.avgTimeUs, 0); - EXPECT_GT(stats.forceSyncs.maxTimeUs, 0); - } else { - EXPECT_EQ(stats.forceSyncs.avgTimeUs, 0); - EXPECT_EQ(stats.forceSyncs.maxTimeUs, 0); - } - if (stats.walSyncs.count > 0) { - EXPECT_GT(stats.walSyncs.avgTimeUs, 0); - EXPECT_GT(stats.walSyncs.maxTimeUs, 0); - } else { - EXPECT_EQ(stats.walSyncs.avgTimeUs, 0); - EXPECT_EQ(stats.walSyncs.maxTimeUs, 0); - } - // Validate nodes/ns states - auto replConf = GetSrv(masterId_)->GetServerConfig(ServerControl::ConfigType::Namespace); - ASSERT_EQ(replConf.nodes.size(), stats.nodeStats.size()); - for (auto& nodeStat : stats.nodeStats) { - auto dsnIt = std::find_if(replConf.nodes.begin(), replConf.nodes.end(), - [&nodeStat](const AsyncReplicationConfigTest::Node& node) { return nodeStat.dsn == node.dsn; }); - ASSERT_NE(dsnIt, replConf.nodes.end()) << "Unexpected dsn value: " << nodeStat.dsn; - ASSERT_EQ(nodeStat.status, cluster::NodeStats::Status::Online); - ASSERT_EQ(nodeStat.syncState, cluster::NodeStats::SyncState::OnlineReplication); - ASSERT_EQ(nodeStat.role, cluster::RaftInfo::Role::Follower); - ASSERT_TRUE(nodeStat.namespaces.empty()); - } -} - -#if !defined(REINDEX_WITH_TSAN) -TEST_F(ReplicationLoadApi, SingleSlaveTest) { - // Check replication in multithread mode with data writes, delete queries and server restarts - InitNs(); - stop = false; - FillData(1000); - - std::thread writingThread([this]() { - while (!stop) { - FillData(1000); - } - }); - - std::thread removingThread([this]() { - size_t counter = 0; - while (!stop) { - std::this_thread::sleep_for(std::chrono::seconds(3)); - int i = rand() % 2; - counter++; - - RestartServer(i); - if (counter % 3 == 0) DeleteFromMaster(); - } - }); - - for (size_t i = 0; i < 2; ++i) { - SimpleSelect(0); - SetWALSize(masterId_, (int64_t(i) + 1) * 1000, "some1"); - SetWALSize(masterId_, (int64_t(i) + 1) * 1000, "some"); - std::this_thread::sleep_for(std::chrono::seconds(3)); - } - - stop = true; - writingThread.join(); - removingThread.join(); - ForceSync(); - WaitSync("some"); - WaitSync("some1"); -} -#endif - -TEST_F(ReplicationLoadApi, WALResizeStaticData) { - // Check WAL resizing with constant data part - InitNs(); - - const std::string nsName("some"); - auto master = GetSrv(masterId_)->api.reindexer; - // Check new wal size with empty namespace - ASSERT_NO_FATAL_FAILURE(SetWALSize(masterId_, 1000, nsName)); - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(0)), qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 4); - } - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(2)), qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 2); - } - - // Add data, which do not exceed current wal size - FillData(500); - - BaseApi::QueryResultsType qrLast100_1(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - BaseApi::QueryResultsType qrLast100_2(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - BaseApi::QueryResultsType qrLast100_3(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(0)), qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 504); - } - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(503)), qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 1); - } - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(504)), qr); - EXPECT_EQ(err.code(), errOutdatedWAL) << err.what(); - EXPECT_EQ(qr.Count(), 0); - } - { - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(404)), qrLast100_1); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qrLast100_1.Count(), 100); - } - // Set wal size, which is less than current data count - ASSERT_NO_FATAL_FAILURE(SetWALSize(masterId_, 100, nsName)); - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(403)), qr); - EXPECT_EQ(err.code(), errOutdatedWAL) << err.what(); - } - { - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(404)), qrLast100_2); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qrLast100_2.Count(), 100); - } - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(503)), qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 1); - } - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(504)), qr); - EXPECT_EQ(err.code(), errOutdatedWAL) << err.what(); - EXPECT_EQ(qr.Count(), 0); - } - // Set wal size, which is larger than current data count - ASSERT_NO_FATAL_FAILURE(SetWALSize(masterId_, 2000, nsName)); - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(403)), qr); - EXPECT_EQ(err.code(), errOutdatedWAL) << err.what(); - EXPECT_EQ(qr.Count(), 0); - } - { - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(404)), qrLast100_3); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qrLast100_3.Count(), 100); - } - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(503)), qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 1); - } - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(504)), qr); - EXPECT_EQ(err.code(), errOutdatedWAL) << err.what(); - EXPECT_EQ(qr.Count(), 0); - } - - auto qrToSet = [](const BaseApi::QueryResultsType& qr) { - std::unordered_set items; - WrSerializer ser; - for (auto& item : qr) { - if (item.IsRaw()) { - reindexer::WALRecord rec(item.GetRaw()); - EXPECT_EQ(rec.type, WalReplState); - } else { - ser.Reset(); - auto err = item.GetCJSON(ser, false); - EXPECT_TRUE(err.ok()); - items.emplace(ser.Slice()); - } - } - return items; - }; - // Validate, that there are some records, which were not changed after all the wal resizings - auto items_1 = qrToSet(qrLast100_1); - auto items_2 = qrToSet(qrLast100_2); - auto items_3 = qrToSet(qrLast100_3); - ASSERT_EQ(items_1.size(), 99); - ASSERT_TRUE(items_1 == items_2); - ASSERT_TRUE(items_1 == items_3); -} - -TEST_F(ReplicationLoadApi, WALResizeDynamicData) { - // Check WAL resizing in combination with data refilling - InitNs(); - - // Check case, when new wal size is larger, than actual records count, and records count does not exceed wal size after setting - const std::string nsName("some"); - ASSERT_NO_FATAL_FAILURE(SetWALSize(masterId_, 1000, nsName)); - FillData(500); - - // Check case, when new wal size is less, than actual records count - auto master = GetSrv(masterId_)->api.reindexer; - ASSERT_NO_FATAL_FAILURE(SetWALSize(masterId_, 100, nsName)); - FillData(50); - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(453)), qr); - EXPECT_EQ(err.code(), errOutdatedWAL) << err.what(); - } - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(454)), qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 100); - } - // Check case, when new wal size is larger, than actual records count, and records count exceeds wal size after setting - ASSERT_NO_FATAL_FAILURE(SetWALSize(masterId_, 200, nsName)); - FillData(500); - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(853)), qr); - EXPECT_EQ(err.code(), errOutdatedWAL) << err.what(); - EXPECT_EQ(qr.Count(), 0); - } - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(854)), qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 200); - } - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(1053)), qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(qr.Count(), 1); - } - { - BaseApi::QueryResultsType qr(kResultsWithPayloadTypes | kResultsCJson | kResultsWithItemID | kResultsWithRaw); - Error err = master->Select(Query(nsName).Where("#lsn", CondGt, int64_t(1054)), qr); - EXPECT_EQ(err.code(), errOutdatedWAL) << err.what(); - EXPECT_EQ(qr.Count(), 0); - } -} - -TEST_F(ReplicationLoadApi, ConfigReadingOnStartup) { - // Check if server reads config on startup - const size_t kTestServerID = 0; - - auto srv = GetSrv(kTestServerID); - const auto kReplFilePath = srv->GetReplicationConfigFilePath(); - const auto kAsyncReplFilePath = srv->GetAsyncReplicationConfigFilePath(); - srv.reset(); - StopServer(kTestServerID); - WriteConfigFile(kAsyncReplFilePath, - "role: none\n" - "mode: default\n" - "retry_sync_interval_msec: 3000\n" - "syncs_per_thread: 2\n" - "app_name: node_XXX\n" - "force_sync_on_logic_error: true\n" - "force_sync_on_wrong_data_hash: false\n" - "online_updates_delay_msec: 200\n" - "namespaces: []\n" - "nodes: []"); - WriteConfigFile(kReplFilePath, - "server_id: 4\n" - "cluster_id: 2\n"); - StartServer(kTestServerID); - AsyncReplicationConfigTest config("none", {}, true, false, 4, "node_XXX", {}, "default", 200); - CheckReplicationConfigNamespace(kTestServerID, config); -} - -TEST_F(ReplicationLoadApi, DuplicatePKFollowerTest) { - InitNs(); - const unsigned int kItemCount = 5; - auto srv = GetSrv(masterId_); - auto& api = srv->api; - - std::string changedIds; - const unsigned int kChangedCount = 2; - std::unordered_set ids; - for (unsigned i = 0; i < kChangedCount; ++i) { - ids.insert(std::rand() % kItemCount); - } - - bool isFirst = true; - for (const auto id : ids) { - if (!isFirst) changedIds += ", "; - changedIds += std::to_string(id); - isFirst = false; - } - - std::unordered_map> items; - Error err; - for (size_t i = 0; i < kItemCount; ++i) { - std::string jsonChange; - BaseApi::ItemType item = api.NewItem("some"); - auto json = fmt::sprintf(R"json({"id":%d,"int":%d,"string":"%s","uuid":"%s"})json", i, i + 100, std::to_string(1 + 1000), nilUUID); - err = item.FromJSON(json); - api.Upsert("some", item); - jsonChange = json; - int idNew = i; - if (ids.find(i) != ids.end()) { - jsonChange = fmt::sprintf(R"json({"id":%d,"int":%d,"string":"%s","uuid":"%s"})json", kItemCount * 2 + i, i + 100, - std::to_string(1 + 1000), nilUUID); - idNew = kItemCount * 2 + i; - } - items.emplace(idNew, std::make_pair(json, jsonChange)); - } - - WaitSync("some"); - { - BaseApi::QueryResultsType qr; - err = api.reindexer->Select("Update some set id=id+" + std::to_string(kItemCount * 2) + " where id in(" + changedIds + ")", qr); - ASSERT_TRUE(err.ok()) << err.what(); - WaitSync("some"); - } - - for (size_t k = 0; k < GetServersCount(); k++) { - auto server = GetSrv(k); - { - BaseApi::QueryResultsType qr; - err = server->api.reindexer->Select("select * from some order by id", qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), items.size()); - for (auto i : qr) { - WrSerializer ser; - err = i.GetJSON(ser, false); - gason::JsonParser parser; - auto root = parser.Parse(ser.Slice()); - int id = root["id"].As(); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(ser.Slice(), items[id].second); - } - } - { - for (auto id : ids) { - BaseApi::QueryResultsType qr; - err = server->api.reindexer->Select("select * from some where id=" + std::to_string(id), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 0); - } - } - { - for (auto id : ids) { - BaseApi::QueryResultsType qr; - err = server->api.reindexer->Select("select * from some where id=" + std::to_string(id + kItemCount * 2), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 1); - } - } - } -} - -TEST_F(ReplicationLoadApi, ConfigSync) { - // Check automatic replication config file and #config namespace sync - using ReplNode = AsyncReplicationConfigTest::Node; - const size_t kTestServerID = 0; - - SCOPED_TRACE("Set replication config via file"); - RestartWithReplicationConfigFiles(kTestServerID, - "role: none\n" - "retry_sync_interval_msec: 3000\n" - "syncs_per_thread: 2\n" - "app_name: node_1\n" - "force_sync_on_logic_error: true\n" - "force_sync_on_wrong_data_hash: false\n" - "namespaces: []\n" - "nodes: []", - "server_id: 3\n" - "cluster_id: 2\n"); - // Validate config file - AsyncReplicationConfigTest config("none", {}, true, false, 3, "node_1", {}, "default"); - CheckReplicationConfigNamespace(kTestServerID, config); - - config = AsyncReplicationConfigTest("leader", {ReplNode{"cproto://127.0.0.1:53019/db"}, ReplNode{"cproto://127.0.0.1:53020/db"}}, false, - true, 3, "node_1", {"ns1", "ns2"}, "default"); - SCOPED_TRACE("Set replication config(two nodes) via namespace"); - SetServerConfig(kTestServerID, config); - // Validate #config namespace - CheckReplicationConfigFile(kTestServerID, config); - - config = AsyncReplicationConfigTest("leader", {ReplNode{"cproto://127.0.0.1:45000/db"}}, false, true, 3, "node_xxx", {}, "default"); - SCOPED_TRACE("Set replication config(one node) via namespace"); - SetServerConfig(kTestServerID, config); - // Validate replication.conf file - CheckReplicationConfigFile(kTestServerID, config); - - config = AsyncReplicationConfigTest("leader", {ReplNode{"cproto://127.0.0.1:45000/db", {{"ns1", "ns2"}}}}, false, true, 3, "node_xxx", - {}, "default", 150); - SCOPED_TRACE("Set replication config with custom ns list for existing node via namespace"); - SetServerConfig(kTestServerID, config); - // Validate replication.conf file - CheckReplicationConfigFile(kTestServerID, config); - std::this_thread::sleep_for(std::chrono::seconds(2)); // In case if OS doesn't have nanosecods in stat result - - SCOPED_TRACE("Set replication config via file"); - GetSrv(kTestServerID) - ->WriteAsyncReplicationConfig( - "role: leader\n" - "retry_sync_interval_msec: 3000\n" - "syncs_per_thread: 2\n" - "app_name: node_1\n" - "force_sync_on_logic_error: false\n" - "force_sync_on_wrong_data_hash: true\n" - "online_updates_delay_msec: 50\n" - "namespaces:\n" - " - ns1\n" - " - ns3\n" - "nodes:\n" - " -\n" - " dsn: cproto://127.0.0.1:53001/db1\n" - " namespaces:\n" - " - ns4\n" - " -\n" - " dsn: cproto://127.0.0.1:53002/db2\n"); - config = AsyncReplicationConfigTest("leader", - {ReplNode{"cproto://127.0.0.1:53001/db1", {{"ns4"}}}, ReplNode{"cproto://127.0.0.1:53002/db2"}}, - false, true, 3, "node_1", {"ns1", "ns3"}, "default", 50); - // Validate #config namespace - CheckReplicationConfigNamespace(kTestServerID, config, std::chrono::seconds(3)); - - SCOPED_TRACE("Check server id switch"); - GetSrv(kTestServerID) - ->WriteReplicationConfig( - "server_id: 2\n" - "cluster_id: 2\n"); - config.serverId = 2; - // Validate #config namespace - CheckReplicationConfigNamespace(kTestServerID, config, std::chrono::seconds(3)); -} - -#if !defined(REINDEX_WITH_TSAN) -TEST_F(ReplicationLoadApi, DynamicRoleSwitch) { - // Validate replication behavior after node's role switch - InitNs(); - stop = false; - - // Create #config changing threads - std::vector configUpdateThreads(GetServersCount()); - for (size_t i = 0; i < configUpdateThreads.size(); ++i) { - configUpdateThreads[i] = std::thread( - [this](size_t id) { - while (!stop) { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - size_t cnt = rand() % 5; - SetOptmizationSortWorkers(id, cnt, "*"); - } - }, - i); - } - - // Switch master and await sync in each loop iteration - const size_t kPortionSize = 2000; - size_t expectedLsnCounter = 3; - for (size_t i = 1; i < 8; i++) { - FillData(kPortionSize); - expectedLsnCounter += kPortionSize; - WaitSync("some", lsn_t(expectedLsnCounter, masterId_)); - WaitSync("some1", lsn_t(expectedLsnCounter, masterId_)); - SwitchMaster(i % kDefaultServerCount, {"some", "some1"}, (i % 2 == 0) ? "default" : "from_sync_leader"); - } - - stop = true; - for (auto& th : configUpdateThreads) { - th.join(); - } -} -#endif - -TEST_F(ReplicationLoadApi, NodeOfflineLastError) { - InitNs(); - - ServerControl::Interface::Ptr leader = GetSrv(0); - StopServer(1); - for (std::size_t i = 0; i < 10; i++) { - reindexer::cluster::ReplicationStats stats = leader->GetReplicationStats(cluster::kAsyncReplStatsType); - if (!stats.nodeStats.empty() && stats.nodeStats[0].lastError.code() == errNetwork) { - break; - } - std::this_thread::sleep_for(std::chrono::seconds(1)); - } - - reindexer::cluster::ReplicationStats stats = leader->GetReplicationStats(cluster::kAsyncReplStatsType); - ASSERT_EQ(stats.nodeStats.size(), std::size_t(3)); - ASSERT_EQ(stats.nodeStats[0].lastError.code(), errNetwork); - ASSERT_FALSE(stats.nodeStats[0].lastError.what().empty()); -} - -TEST_F(ReplicationLoadApi, LogLevel) { - // Check async replication log level setup - InitNs(); - - std::atomic stop = {false}; - std::thread th([this, &stop] { - // Simple insertion thread for race detection - while (!stop) { - FillData(1); - std::this_thread::sleep_for(std::chrono::milliseconds(5)); - } - }); - - // Replication in tests must be started with 'Trace' log level - auto stats = GetReplicationStats(masterId_); - EXPECT_EQ(stats.logLevel, LogTrace); - - // Changing log level - const LogLevel levels[] = {LogInfo, LogTrace, LogWarning, LogError, LogNone, LogInfo}; - for (auto level : levels) { - SetReplicationLogLevel(masterId_, LogLevel(level)); - stats = GetReplicationStats(masterId_); - EXPECT_EQ(stats.logLevel, LogLevel(level)); - } - - // Checking log level after replication restart. It should be reset to 'Trace' - ForceSync(); - stats = GetReplicationStats(masterId_); - EXPECT_EQ(stats.logLevel, LogTrace); - - stop = true; - th.join(); -} diff --git a/cpp_src/gtests/tests/unit/rpcclient_test.cc b/cpp_src/gtests/tests/unit/rpcclient_test.cc deleted file mode 100644 index a53f11310..000000000 --- a/cpp_src/gtests/tests/unit/rpcclient_test.cc +++ /dev/null @@ -1,1075 +0,0 @@ -#include -#include -#include -#include "query_aggregate_strict_mode_test.h" -#include "rpcclient_api.h" -#include "rpcserver_fake.h" -#include "tools/fsops.h" - -#include "client/reindexer.h" -#include "client/snapshot.h" -#include "core/cjson/jsonbuilder.h" -#include "coroutine/waitgroup.h" -#include "gtests/tests/gtest_cout.h" -#include "net/ev/ev.h" -#include "reindexertestapi.h" -#include "tools/hardware_concurrency.h" - -using std::chrono::seconds; - -TEST_F(RPCClientTestApi, CoroRequestTimeout) { - // Should return error on request timeout - RPCServerConfig conf; - conf.loginDelay = std::chrono::seconds(0); - conf.openNsDelay = std::chrono::seconds(4); - AddFakeServer(kDefaultRPCServerAddr, conf); - StartServer(); - ev::dynamic_loop loop; - loop.spawn([&loop]() noexcept { - reindexer::client::ReindexerConfig config; - config.NetTimeout = seconds(1); - reindexer::client::CoroReindexer rx(config); - auto err = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db", loop); - ASSERT_TRUE(err.ok()) << err.what(); - const std::string kNamespaceName = "MyNamespace"; - err = rx.AddNamespace(reindexer::NamespaceDef(kNamespaceName)); - EXPECT_EQ(err.code(), errTimeout); - loop.sleep(std::chrono::seconds(4)); - err = rx.DropNamespace(kNamespaceName); - ASSERT_TRUE(err.ok()) << err.what(); - }); - loop.run(); - Error err = StopServer(); - ASSERT_TRUE(err.ok()) << err.what(); -} - -static std::chrono::seconds GetMaxTimeForCoroSelectTimeout(unsigned requests, std::chrono::seconds delay) { - const auto cpus = reindexer::hardware_concurrency(); - const auto kBase = std::max(requests * delay.count() / 16, delay.count()); - const std::chrono::seconds kDefaultMaxTime(kBase + 10); - if (cpus == 0) { - TestCout() << fmt::sprintf("Unable to get CPUs count. Using test max time %d seconds Test may flack in this case", - 4 * kDefaultMaxTime.count()) - << std::endl; - return 4 * kDefaultMaxTime; - } - auto resultMaxTime = kDefaultMaxTime; - if (cpus == 1) { - resultMaxTime = 16 * kDefaultMaxTime; - } else if (cpus > 1 && cpus < 4) { - resultMaxTime = 8 * kDefaultMaxTime; - } else if (cpus >= 4 && cpus < 8) { - resultMaxTime = 4 * kDefaultMaxTime; - } else if (cpus >= 8 && cpus < 16) { - resultMaxTime = 2 * kDefaultMaxTime; - } - TestCout() << fmt::sprintf("Test max time: %d seconds for %d total requests on %d CPUs with %d seconds of delay for each request", - resultMaxTime.count(), requests, cpus, delay.count()) - << std::endl; - return resultMaxTime; -} - -TEST_F(RPCClientTestApi, CoroSelectTimeout) { - const std::string kNamespaceName = "MyNamespace"; - constexpr size_t kCorCount = 16; - constexpr size_t kQueriesCount = 3; - constexpr std::chrono::seconds kSelectDelay(4); - RPCServerConfig conf; - conf.loginDelay = std::chrono::seconds(0); - conf.selectDelay = kSelectDelay; - conf.openNsDelay = std::chrono::seconds{0}; - auto& server = AddFakeServer(kDefaultRPCServerAddr, conf); - StartServer(); - ev::dynamic_loop loop; - std::vector finished(kCorCount, false); - ev::timer testTimer; - testTimer.set([&](ev::timer&, int) { - // Just to print output on CI - ASSERT_TRUE(false) << fmt::sprintf("Test deadline exceeded. Closed count: %d. Expected: %d. %d|", server.CloseQRRequestsCount(), - kCorCount * kQueriesCount, std::chrono::steady_clock::now().time_since_epoch().count()); - }); - testTimer.set(loop); - const auto kMaxTime = GetMaxTimeForCoroSelectTimeout(kCorCount * kQueriesCount, kSelectDelay); - testTimer.start(double(kMaxTime.count())); - for (size_t i = 0; i < kCorCount; ++i) { - loop.spawn([&, index = i] { - reindexer::client::ReindexerConfig config; - config.NetTimeout = seconds(1); - reindexer::client::CoroReindexer rx(config); - auto err = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db", loop); - ASSERT_TRUE(err.ok()) << err.what(); - coroutine::wait_group wg; - wg.add(kQueriesCount); - for (size_t j = 0; j < kQueriesCount; ++j) { - loop.spawn([&] { - coroutine::wait_group_guard wgg(wg); - reindexer::client::CoroQueryResults qr; - err = rx.Select(reindexer::Query(kNamespaceName), qr); - EXPECT_EQ(err.code(), errTimeout); - }); - } - wg.wait(); - loop.granular_sleep(kSelectDelay * kQueriesCount * kCorCount, std::chrono::milliseconds{300}, - [&] { return server.CloseQRRequestsCount() >= kCorCount * kQueriesCount; }); - EXPECT_EQ(server.CloseQRRequestsCount(), kCorCount * kQueriesCount); - err = rx.AddNamespace(reindexer::NamespaceDef(kNamespaceName + std::to_string(index))); - ASSERT_TRUE(err.ok()) << err.what(); - finished[index] = true; - }); - } - loop.run(); - for (size_t i = 0; i < kCorCount; ++i) { - ASSERT_TRUE(finished[i]); - } - Error const err = StopServer(); - ASSERT_TRUE(err.ok()) << err.what(); -} - -TEST_F(RPCClientTestApi, CoroRequestCancels) { - // Should return error on request cancel - AddFakeServer(); - StartServer(); - ev::dynamic_loop loop; - loop.spawn([&loop]() noexcept { - reindexer::client::CoroReindexer rx; - auto err = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db", loop); - ASSERT_TRUE(err.ok()) << err.what(); - - { - CancelRdxContext ctx; - ctx.Cancel(); - err = rx.WithContext(&ctx).AddNamespace(reindexer::NamespaceDef("MyNamespace")); - EXPECT_EQ(err.code(), errCanceled); - } - - { - CancelRdxContext ctx; - coroutine::wait_group wg; - loop.spawn(wg, [&ctx, &rx] { - auto err = rx.WithContext(&ctx).AddNamespace(reindexer::NamespaceDef("MyNamespace")); - EXPECT_EQ(err.code(), errCanceled); - }); - - loop.sleep(std::chrono::seconds(1)); - ctx.Cancel(); - wg.wait(); - } - }); - loop.run(); - Error err = StopServer(); - ASSERT_TRUE(err.ok()) << err.what(); -} - -TEST_F(RPCClientTestApi, CoroSuccessfullRequestWithTimeout) { - // Should be able to execute some basic requests with timeout - AddFakeServer(); - StartServer(); - ev::dynamic_loop loop; - loop.spawn([&loop]() noexcept { - reindexer::client::ReindexerConfig config; - config.NetTimeout = seconds(6); - reindexer::client::CoroReindexer rx(config); - auto err = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db", loop); - ASSERT_TRUE(err.ok()) << err.what(); - err = rx.AddNamespace(reindexer::NamespaceDef("MyNamespace")); - ASSERT_TRUE(err.ok()) << err.what(); - }); - loop.run(); - Error err = StopServer(); - ASSERT_TRUE(err.ok()) << err.what(); -} - -TEST_F(RPCClientTestApi, CoroErrorLoginResponse) { - // Should return error on failed Login - AddFakeServer(); - StartServer(kDefaultRPCServerAddr, errForbidden); - ev::dynamic_loop loop; - loop.spawn([&loop]() noexcept { - reindexer::client::CoroReindexer rx; - auto err = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db", loop); - ASSERT_TRUE(err.ok()) << err.what(); - err = rx.AddNamespace(reindexer::NamespaceDef("MyNamespace")); - EXPECT_EQ(err.code(), errForbidden); - }); - loop.run(); - Error err = StopServer(); - ASSERT_TRUE(err.ok()) << err.what(); -} - -TEST_F(RPCClientTestApi, CoroStatus) { - // Should return correct Status, based on server's state - std::string dbPath = std::string(kDbPrefix) + "/" + std::to_string(kDefaultRPCPort); - reindexer::fs::RmDirAll(dbPath); - AddRealServer(dbPath); - ev::dynamic_loop loop; - loop.spawn([this, &loop]() noexcept { - reindexer::client::CoroReindexer rx; - reindexer::client::ConnectOpts opts; - opts.CreateDBIfMissing(); - auto err = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/db1", loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - for (size_t i = 0; i < 5; ++i) { - StartServer(); - err = rx.Status(); - ASSERT_TRUE(err.ok()) << err.what(); - err = StopServer(); - ASSERT_TRUE(err.ok()) << err.what(); - loop.sleep(std::chrono::milliseconds(20)); // Allow reading coroutine to handle disconnect - err = rx.Status(); - ASSERT_EQ(err.code(), errNetwork) << err.what(); - } - }); - loop.run(); -} - -TEST_F(RPCClientTestApi, CoroUpserts) { - // Should be able to execute some basic operations within multiple concurrent coroutines - using namespace reindexer::client; - using namespace reindexer::net::ev; - using reindexer::coroutine::wait_group; - - StartDefaultRealServer(); - dynamic_loop loop; - - loop.spawn([&loop]() noexcept { - const std::string nsName = "ns1"; - const std::string dsn = "cproto://" + kDefaultRPCServerAddr + "/db1"; - reindexer::client::ConnectOpts opts; - opts.CreateDBIfMissing(); - CoroReindexer rx; - auto err = rx.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rx.OpenNamespace(nsName); - ASSERT_TRUE(err.ok()) << err.what(); - err = rx.AddIndex(nsName, {"id", {"id"}, "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - auto upsertFn = [&rx, &nsName](size_t begin, size_t cnt) { - for (size_t i = begin; i < begin + cnt; ++i) { - auto item = rx.NewItem(nsName); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - reindexer::WrSerializer wrser; - reindexer::JsonBuilder jsonBuilder(wrser, ObjType::TypeObject); - jsonBuilder.Put("id", i); - jsonBuilder.End(); - char* endp = nullptr; - auto err = item.Unsafe().FromJSON(wrser.Slice(), &endp); - ASSERT_TRUE(err.ok()) << err.what(); - err = rx.Upsert(nsName, item); - ASSERT_TRUE(err.ok()) << err.what(); - } - }; - - auto txFunc = [&rx, &nsName](size_t begin, size_t cnt) { - auto tx = rx.NewTransaction(nsName); - ASSERT_TRUE(tx.Status().ok()) << tx.Status().what(); - for (size_t i = begin; i < begin + cnt; ++i) { - auto item = tx.NewItem(); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - reindexer::WrSerializer wrser; - reindexer::JsonBuilder jsonBuilder(wrser, ObjType::TypeObject); - jsonBuilder.Put("id", i); - jsonBuilder.End(); - char* endp = nullptr; - auto err = item.Unsafe().FromJSON(wrser.Slice(), &endp); - ASSERT_TRUE(err.ok()) << err.what(); - err = tx.Upsert(std::move(item)); - ASSERT_TRUE(err.ok()) << err.what(); - } - CoroQueryResults qrTx; - auto err = rx.CommitTransaction(tx, qrTx); - ASSERT_TRUE(err.ok()) << err.what(); - }; - - auto selectFn = [&loop, &rx, &nsName](size_t cnt) { - constexpr size_t kMultiplier = 9; - for (size_t j = 0; j < kMultiplier * cnt; ++j) { - if (j % kMultiplier == 0) { - reindexer::client::CoroQueryResults qr; - auto err = rx.Select(reindexer::Query(nsName), qr); - ASSERT_TRUE(err.ok()) << err.what(); - for (auto& it : qr) { - ASSERT_TRUE(it.Status().ok()) << it.Status().what(); - } - } else { - auto err = rx.Status(); - ASSERT_TRUE(err.ok()) << err.what(); - } - loop.sleep(std::chrono::milliseconds(1)); - } - }; - - wait_group wg; - constexpr size_t kCnt = 3000; - loop.spawn(wg, std::bind(upsertFn, 0, kCnt)); - loop.spawn(wg, std::bind(upsertFn, kCnt, kCnt)); - loop.spawn(wg, std::bind(upsertFn, 2 * kCnt, kCnt)); - loop.spawn(wg, std::bind(upsertFn, 3 * kCnt, kCnt)); - loop.spawn(wg, std::bind(selectFn, 300)); - loop.spawn(wg, std::bind(txFunc, 4 * kCnt, 2 * kCnt)); - loop.spawn(wg, std::bind(txFunc, 6 * kCnt, 2 * kCnt)); - loop.spawn(wg, std::bind(txFunc, 8 * kCnt, 2 * kCnt)); - loop.spawn(wg, std::bind(selectFn, 300)); - - wg.wait(); - - reindexer::client::CoroQueryResults qr; - err = rx.Select(reindexer::Query(nsName), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), 10 * kCnt); - for (auto& it : qr) { - ASSERT_TRUE(it.Status().ok()) << it.Status().what(); - } - rx.Stop(); - }); - - loop.run(); - Error err = StopServer(); - EXPECT_TRUE(err.ok()) << err.what(); -} - -template -void ReconnectTest(RxT& rx, RPCClientTestApi& api, size_t dataCount, const std::string& nsName) { - typename RxT::QueryResultsT qr; - auto err = rx.Select(reindexer::Query(nsName), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), dataCount); - - api.StopServer(); - api.StartServer(); - qr = typename RxT::QueryResultsT(); - err = rx.Select(reindexer::Query(nsName), qr); - if (err.ok()) { - ASSERT_EQ(qr.Count(), dataCount); - } else { - ASSERT_EQ(err.code(), errNetwork) << err.what(); - } - qr = typename RxT::QueryResultsT(); - err = rx.Select(reindexer::Query(nsName), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), dataCount); - - rx.Stop(); -} - -TEST_F(RPCClientTestApi, Reconnect) { - // CoroReindexer should be able to handle reconnect properly - using namespace reindexer::client; - using namespace reindexer::net::ev; - - StartDefaultRealServer(); - dynamic_loop loop; - - loop.spawn([this, &loop]() noexcept { - constexpr auto kDataCount = 2; - const std::string kNsName = "ns1"; - const std::string dsn = "cproto://" + kDefaultRPCServerAddr + "/db1"; - reindexer::client::ConnectOpts opts; - opts.CreateDBIfMissing(); - CoroReindexer rx; - auto err = rx.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - CreateNamespace(rx, kNsName); - FillData(rx, kNsName, 0, kDataCount); - - ReconnectTest(rx, *this, kDataCount, kNsName); - }); - - loop.run(); -} - -TEST_F(RPCClientTestApi, ReconnectSyncCoroRx) { - // Reindexer should be able to handle reconnect properly - using namespace reindexer::client; - using namespace reindexer::net::ev; - - StartDefaultRealServer(); - dynamic_loop loop; - - loop.spawn([this, &loop]() noexcept { - constexpr auto kDataCount = 2; - const std::string kNsName = "ns1"; - const std::string dsn = "cproto://" + kDefaultRPCServerAddr + "/db1"; - { - reindexer::client::ConnectOpts opts; - opts.CreateDBIfMissing(); - CoroReindexer crx; - auto err = crx.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - CreateNamespace(crx, kNsName); - FillData(crx, kNsName, 0, kDataCount); - } - - client::Reindexer rx; - auto err = rx.Connect(dsn); - ASSERT_TRUE(err.ok()) << err.what(); - - ReconnectTest(rx, *this, kDataCount, kNsName); - }); - - loop.run(); -} - -TEST_F(RPCClientTestApi, ServerRestart) { - // Client should handle error on server's restart - using namespace reindexer::client; - using namespace reindexer::net::ev; - using reindexer::coroutine::wait_group; - using reindexer::coroutine::wait_group_guard; - - std::atomic terminate = false; - std::atomic ready = false; - - // Startup server - StartDefaultRealServer(); - enum class Step { Init, ShutdownInProgress, ShutdownDone, RestartInProgress, RestartDone }; - std::atomic step = Step::Init; - - // Create thread, performing upserts - std::thread upsertsTh([&terminate, &ready, &step] { - dynamic_loop loop; - - loop.spawn([&loop, &terminate, &ready, &step]() noexcept { - const std::string nsName = "ns1"; - const std::string dsn = "cproto://" + kDefaultRPCServerAddr + "/db1"; - reindexer::client::ConnectOpts opts; - opts.CreateDBIfMissing(); - CoroReindexer rx; - auto err = rx.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rx.OpenNamespace(nsName); - ASSERT_TRUE(err.ok()) << err.what(); - err = rx.AddIndex(nsName, {"id", {"id"}, "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - auto upsertFn = [&loop, &rx, &nsName, &terminate, &step](wait_group& wg, size_t begin, size_t cnt) { - wait_group_guard wgg(wg); - while (!terminate) { - for (size_t i = begin; i < begin + cnt; ++i) { - auto item = rx.NewItem(nsName); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - reindexer::WrSerializer wrser; - reindexer::JsonBuilder jsonBuilder(wrser, ObjType::TypeObject); - jsonBuilder.Put("id", i); - jsonBuilder.End(); - char* endp = nullptr; - auto err = item.Unsafe().FromJSON(wrser.Slice(), &endp); - ASSERT_TRUE(err.ok()) << err.what(); - auto localStep = step.load(); - err = rx.Upsert(nsName, item); - if (localStep == step.load()) { - switch (localStep) { - case Step::Init: - // If server is running, updates have to return OK - ASSERT_TRUE(err.ok()) << err.what(); - break; - case Step::ShutdownDone: - // If server was shutdown, updates have to return error - ASSERT_TRUE(!err.ok()); - break; - case Step::RestartDone: - // If server was restarted, updates have to return OK - ASSERT_TRUE(err.ok()) << err.what(); - break; - case Step::ShutdownInProgress: - case Step::RestartInProgress:; // No additional checks in transition states - } - } - } - loop.sleep(std::chrono::milliseconds(50)); - } - }; - - wait_group wg; - constexpr size_t kCnt = 100; - wg.add(3); - loop.spawn(std::bind(upsertFn, std::ref(wg), 0, kCnt)); - loop.spawn(std::bind(upsertFn, std::ref(wg), kCnt, kCnt)); - loop.spawn(std::bind(upsertFn, std::ref(wg), 2 * kCnt, kCnt)); - - ready = true; - wg.wait(); - - rx.Stop(); - }); - - loop.run(); - }); - while (!ready) { // -V776 - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - } - - // Shutdown server - step = Step::ShutdownInProgress; - Error err = StopServer(); - ASSERT_TRUE(err.ok()) << err.what(); - step = Step::ShutdownDone; - std::this_thread::sleep_for(std::chrono::milliseconds(300)); - - step = Step::RestartInProgress; - StartServer(); - step = Step::RestartDone; - std::this_thread::sleep_for(std::chrono::milliseconds(300)); - - terminate = true; - upsertsTh.join(); -} - -TEST_F(RPCClientTestApi, TemporaryNamespaceAutoremove) { - // Temporary namespace must be automaticly removed after disconnect - using namespace reindexer::client; - using namespace reindexer::net::ev; - - StartDefaultRealServer(); - dynamic_loop loop; - - loop.spawn([&loop]() noexcept { - const std::string dsn = "cproto://" + kDefaultRPCServerAddr + "/db1"; - reindexer::client::ConnectOpts opts; - opts.CreateDBIfMissing(); - CoroReindexer rx; - auto err = rx.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - - std::string tmpNsName; - err = rx.CreateTemporaryNamespace("ns1", tmpNsName); - ASSERT_TRUE(err.ok()) << err.what(); - - // Check if temporary ns was created - std::vector nsList; - err = rx.EnumNamespaces(nsList, EnumNamespacesOpts().OnlyNames().HideSystem().HideTemporary()); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(nsList.size(), 0); - err = rx.EnumNamespaces(nsList, EnumNamespacesOpts().OnlyNames().HideSystem()); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(nsList.size(), 1); - ASSERT_EQ(nsList[0].name, tmpNsName); - - // Reconnect - rx.Stop(); - err = rx.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - - // Allow server to handle disconnect - std::this_thread::sleep_for(std::chrono::seconds(2)); - - // Check if namespce was removed - nsList.clear(); - err = rx.EnumNamespaces(nsList, EnumNamespacesOpts().OnlyNames().HideSystem()); - ASSERT_TRUE(err.ok()) << err.what(); - if (nsList.size() > 0) { - for (auto& ns : nsList) std::cerr << ns.name << std::endl; - ASSERT_TRUE(false); - } - - rx.Stop(); - }); - - loop.run(); -} - -TEST_F(RPCClientTestApi, ItemJSONWithDouble) { - ev::dynamic_loop loop; - loop.spawn([&loop]() noexcept { - reindexer::client::CoroReindexer rx; - auto err = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db", loop); - ASSERT_TRUE(err.ok()) << err.what(); - auto item = rx.NewItem("ns"); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - - { - const std::string kJSON = R"_({"id":1234,"double":0.0})_"; - err = item.FromJSON(kJSON); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(item.GetJSON(), kJSON); - } - - { - const std::string kJSON = R"_({"id":1234,"double":0.1})_"; - err = item.FromJSON(kJSON); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(item.GetJSON(), kJSON); - } - }); - loop.run(); -} - -TEST_F(RPCClientTestApi, UnknowResultsFlag) { - // Check if server will not resturn unknown result flag - StartDefaultRealServer(); - ev::dynamic_loop loop; - bool finished = false; - loop.spawn([&loop, &finished] { - reindexer::client::CoroReindexer rx; - reindexer::client::ConnectOpts opts; - opts.CreateDBIfMissing(); - auto err = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/db1", loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - const int kResultsUnknownFlag = 0x40000000; // Max available int flag - client::CoroQueryResults qr(kResultsCJson | kResultsWithItemID | kResultsUnknownFlag); - err = rx.Select(Query("#config").Where("type", CondEq, {"namespaces"}), qr); - ASSERT_TRUE(err.ok()) << err.what(); - // Check, that kResultsUnknownFlag was not sent back - ASSERT_EQ(qr.GetFlags(), kResultsCJson | kResultsWithItemID); - ASSERT_EQ(qr.Count(), 1); - finished = true; - }); - loop.run(); - ASSERT_TRUE(finished); -} - -TEST_F(RPCClientTestApi, FirstSelectWithFetch) { - StartDefaultRealServer(); - ev::dynamic_loop loop; - - loop.spawn([this, &loop]() noexcept { - constexpr auto kDataCount = 15000; - const std::string kNsName = "ns1"; - const std::string dsn = "cproto://" + kDefaultRPCServerAddr + "/db1"; - { - reindexer::client::ConnectOpts opts; - opts.CreateDBIfMissing(); - client::CoroReindexer crx; - auto err = crx.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - CreateNamespace(crx, kNsName); - FillData(crx, kNsName, 0, kDataCount); - } - { - reindexer::client::ConnectOpts opts; - client::CoroReindexer rxs; - auto err = rxs.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - client::CoroQueryResults res; - rxs.Select("Select * from " + kNsName + " order by id", res); - size_t idCounter = 0; - for (auto i : res) { - ASSERT_TRUE(i.Status().ok()); - WrSerializer ser; - err = i.GetJSON(ser, false); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(ser.Slice(), "{\"id\":" + std::to_string(idCounter) + "}"); - idCounter++; - } - } - { - client::ConnectOpts opts; - client::CoroReindexer rxs; - auto err = rxs.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - client::Snapshot snapshot; - err = rxs.GetSnapshot(kNsName, SnapshotOpts(), snapshot); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_GT(snapshot.Size(), 0); - for (auto s : snapshot) { - const SnapshotChunk& chunk = s.Chunk(); - const std::vector& rec = chunk.Records(); - ASSERT_GT(rec.size(), 0); - } - } - { - client::ConnectOpts opts; - client::CoroReindexer rxs; - auto err = rxs.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - client::CoroTransaction tr = rxs.NewTransaction(kNsName); - const int kTrItemCount = 10; - for (int ti = 0; ti < kTrItemCount; ti++) { - auto item = tr.NewItem(); - reindexer::WrSerializer wrser; - reindexer::JsonBuilder jb(wrser); - jb.Put("id", ti + 100000); - jb.End(); - err = item.FromJSON(wrser.Slice()); - ASSERT_TRUE(err.ok()) << err.what(); - err = tr.Insert(std::move(item)); - ASSERT_TRUE(err.ok()) << err.what(); - } - client::CoroQueryResults res; - err = rxs.CommitTransaction(tr, res); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(res.Count(), kTrItemCount); - } - }); - - loop.run(); -} - -TEST_F(RPCClientTestApi, FetchingWithJoin) { - // Check that particular results fetching does not break tagsmatchers - using namespace reindexer::client; - using namespace reindexer::net::ev; - using reindexer::coroutine::wait_group; - using reindexer::coroutine::wait_group_guard; - - StartDefaultRealServer(); - dynamic_loop loop; - - loop.spawn([&loop]() noexcept { - const std::string kLeftNsName = "left_ns"; - const std::string kRightNsName = "right_ns"; - const std::string dsn = "cproto://" + kDefaultRPCServerAddr + "/db1"; - reindexer::client::ConnectOpts opts; - opts.CreateDBIfMissing(); - reindexer::client::ReindexerConfig cfg; - constexpr auto kFetchCount = 50; - constexpr auto kNsSize = kFetchCount * 3; - cfg.FetchAmount = kFetchCount; - CoroReindexer rx(cfg); - auto err = rx.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rx.OpenNamespace(kLeftNsName); - ASSERT_TRUE(err.ok()) << err.what(); - err = rx.AddIndex(kLeftNsName, {"id", {"id"}, "tree", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - err = rx.OpenNamespace(kRightNsName); - ASSERT_TRUE(err.ok()) << err.what(); - err = rx.AddIndex(kRightNsName, {"id", {"id"}, "hash", "int", IndexOpts().PK()}); - ASSERT_TRUE(err.ok()) << err.what(); - - auto upsertFn = [&rx](const std::string& nsName, bool withValue) { - for (size_t i = 0; i < kNsSize; ++i) { - auto item = rx.NewItem(nsName); - ASSERT_TRUE(item.Status().ok()) << nsName << " " << item.Status().what(); - - WrSerializer wrser; - JsonBuilder jsonBuilder(wrser, ObjType::TypeObject); - jsonBuilder.Put("id", i); - if (withValue) { - jsonBuilder.Put("value", "value_" + std::to_string(i)); - } - jsonBuilder.End(); - char* endp = nullptr; - auto err = item.Unsafe().FromJSON(wrser.Slice(), &endp); - ASSERT_TRUE(err.ok()) << nsName << " " << err.what(); - err = rx.Upsert(nsName, item); - ASSERT_TRUE(err.ok()) << nsName << " " << err.what(); - } - }; - - upsertFn(kLeftNsName, false); - upsertFn(kRightNsName, true); - - client::CoroQueryResults qr; - err = rx.Select(Query(kLeftNsName).Join(InnerJoin, Query(kRightNsName)).On("id", CondEq, "id").Sort("id", false), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), kNsSize); - WrSerializer ser; - unsigned i = 0; - for (auto& it : qr) { - ser.Reset(); - ASSERT_TRUE(it.Status().ok()) << it.Status().what(); - err = it.GetJSON(ser, false); - ASSERT_TRUE(err.ok()) << err.what(); - const auto expected = fmt::sprintf(R"json({"id":%d,"joined_%s":[{"id":%d,"value":"value_%d"}]})json", i, kRightNsName, i, i); - EXPECT_EQ(ser.Slice(), expected); - i++; - } - rx.Stop(); - }); - - loop.run(); -} - -TEST_F(RPCClientTestApi, QRWithMultipleIterationLoops) { - // Check if iterator has error status if user attempts to iterate over qrs, which were already fetched - using namespace reindexer::client; - using namespace reindexer::net::ev; - - StartDefaultRealServer(); - dynamic_loop loop; - - loop.spawn([&loop, this]() noexcept { - const std::string kNsName = "QRWithMultipleIterationLoops"; - const std::string dsn = "cproto://" + kDefaultRPCServerAddr + "/db1"; - client::ConnectOpts opts; - opts.CreateDBIfMissing(); - client::ReindexerConfig cfg; - constexpr auto kFetchCount = 50; - constexpr auto kNsSize = kFetchCount * 3; - cfg.FetchAmount = kFetchCount; - CoroReindexer rx(cfg); - auto err = rx.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - - CreateNamespace(rx, kNsName); - FillData(rx, kNsName, 0, kNsSize); - - client::CoroQueryResults qr; - err = rx.Select(Query(kNsName), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), kNsSize); - WrSerializer ser; - // First iteration loop (all of the items must be valid) - for (auto& it : qr) { - ser.Reset(); - ASSERT_TRUE(it.Status().ok()) << it.Status().what(); - err = it.GetJSON(ser, false); - ASSERT_TRUE(err.ok()) << err.what(); - auto item = it.GetItem(); - ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - } - // Second iteration loop (unavailable items must be invalid) - unsigned id = 0; - for (auto& it : qr) { - ser.Reset(); - if (id >= kNsSize - kFetchCount) { - ASSERT_TRUE(it.Status().ok()) << it.Status().what(); - err = it.GetJSON(ser, false); - ASSERT_TRUE(err.ok()) << err.what(); - EXPECT_EQ(fmt::sprintf("{\"id\":%d}", id), ser.Slice()); - } else { - EXPECT_FALSE(it.Status().ok()) << it.Status().what(); - err = it.GetJSON(ser, false); - EXPECT_FALSE(err.ok()) << err.what(); - auto item = it.GetItem(); - EXPECT_FALSE(item.Status().ok()) << item.Status().what(); - err = it.GetCJSON(ser, false); - EXPECT_FALSE(err.ok()) << err.what(); - err = it.GetMsgPack(ser, false); - EXPECT_FALSE(err.ok()) << err.what(); - } - ++id; - } - rx.Stop(); - }); - - loop.run(); -} - -TEST_F(RPCClientTestApi, AggregationsFetching) { - // Validate, that distinct results will remain valid after query results fetching. - // Actual aggregation values will be sent for initial 'select' only, but must be available at any point of iterator's lifetime. - using namespace reindexer::client; - using namespace reindexer::net::ev; - - StartDefaultRealServer(); - dynamic_loop loop; - constexpr unsigned kItemsCount = 100; - constexpr unsigned kFetchLimit = kItemsCount / 5; - - loop.spawn([&loop, this, kItemsCount]() noexcept { - const std::string nsName = "ns1"; - const std::string dsn = "cproto://" + kDefaultRPCServerAddr + "/db1"; - client::ConnectOpts opts; - opts.CreateDBIfMissing(); - client::ReindexerConfig cfg; - cfg.FetchAmount = kFetchLimit; - CoroReindexer rx(cfg); - auto err = rx.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - - CreateNamespace(rx, nsName); - FillData(rx, nsName, 0, kItemsCount); - - { - CoroQueryResults qr; - const auto q = Query(nsName).Distinct("id").ReqTotal().Explain(); - err = rx.Select(q, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), kItemsCount); - const auto initialAggs = qr.GetAggregationResults(); - ASSERT_EQ(initialAggs.size(), 2); - ASSERT_EQ(initialAggs[0].type, AggDistinct); - ASSERT_EQ(initialAggs[1].type, AggCount); - const std::string explain = qr.GetExplainResults(); - ASSERT_GT(explain.size(), 0); - WrSerializer wser; - initialAggs[0].GetJSON(wser); - initialAggs[1].GetJSON(wser); - const std::string initialAggJSON(wser.Slice()); - for (auto& it : qr) { - ASSERT_TRUE(it.Status().ok()) << it.Status().what(); - auto& aggs = qr.GetAggregationResults(); - ASSERT_EQ(aggs.size(), 2); - wser.Reset(); - aggs[0].GetJSON(wser); - aggs[1].GetJSON(wser); - EXPECT_EQ(initialAggJSON, wser.Slice()) << q.GetSQL(); - EXPECT_EQ(qr.TotalCount(), kItemsCount); - EXPECT_EQ(explain, qr.GetExplainResults()); - } - } - - rx.Stop(); - }); - - loop.run(); -} - -TEST_F(RPCClientTestApi, AggregationsFetchingWithLazyMode) { - // Validate, that distinct results will remain valid after query results fetching in lazy mode - // Actual aggregation values will be sent for initial 'select' only, but must be available at any point of iterator's lifetime. - using namespace reindexer::client; - using namespace reindexer::net::ev; - - StartDefaultRealServer(); - dynamic_loop loop; - constexpr unsigned kItemsCount = 100; - constexpr unsigned kFetchLimit = kItemsCount / 5; - - loop.spawn([&loop, this, kItemsCount]() noexcept { - const std::string nsName = "ns1"; - const std::string dsn = "cproto://" + kDefaultRPCServerAddr + "/db1"; - client::ConnectOpts opts; - opts.CreateDBIfMissing(); - client::ReindexerConfig cfg; - cfg.FetchAmount = kFetchLimit; - CoroReindexer rx(cfg); - auto err = rx.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - - CreateNamespace(rx, nsName); - FillData(rx, nsName, 0, kItemsCount); - - { - // Aggregation and explain will be available, if first access was perfomed before fetching - CoroQueryResults qr(0, 0, client::LazyQueryResultsMode{}); - const auto q = Query(nsName).Distinct("id").ReqTotal().Explain(); - err = rx.Select(q, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), kItemsCount); - const auto initialAggs = qr.GetAggregationResults(); - ASSERT_EQ(initialAggs.size(), 2); - ASSERT_EQ(initialAggs[0].type, AggDistinct); - ASSERT_EQ(initialAggs[1].type, AggCount); - const std::string explain = qr.GetExplainResults(); - ASSERT_GT(explain.size(), 0); - WrSerializer wser; - initialAggs[0].GetJSON(wser); - initialAggs[1].GetJSON(wser); - const std::string initialAggJSON(wser.Slice()); - for (auto& it : qr) { - ASSERT_TRUE(it.Status().ok()) << it.Status().what(); - auto& aggs = qr.GetAggregationResults(); - ASSERT_EQ(aggs.size(), 2); - wser.Reset(); - aggs[0].GetJSON(wser); - aggs[1].GetJSON(wser); - EXPECT_EQ(initialAggJSON, wser.Slice()) << q.GetSQL(); - EXPECT_EQ(qr.TotalCount(), kItemsCount); - EXPECT_EQ(explain, qr.GetExplainResults()); - } - } - { - // Aggregation and explain will throw exception, if first access was perfomed after fetching - CoroQueryResults qr(0, 0, client::LazyQueryResultsMode{}); - const auto q = Query(nsName).Distinct("id").ReqTotal().Explain(); - err = rx.Select(q, qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), kItemsCount); - unsigned i = 0; - for (auto& it : qr) { - ASSERT_TRUE(it.Status().ok()) << it.Status().what(); - if (i++ > kFetchLimit) { - break; - } - } - - EXPECT_THROW(qr.GetAggregationResults(), Error); - EXPECT_THROW(qr.GetExplainResults(), Error); - EXPECT_EQ(qr.TotalCount(), kItemsCount); // Total count is still available - } - - rx.Stop(); - }); - - loop.run(); -} -TEST_F(RPCClientTestApi, AggregationsWithStrictModeTest) { - using namespace reindexer::client; - using namespace reindexer::net::ev; - - StartDefaultRealServer(); - dynamic_loop loop; - - loop.spawn([&loop]() noexcept { - const std::string dsn = "cproto://" + kDefaultRPCServerAddr + "/db1"; - reindexer::client::ConnectOpts opts; - opts.CreateDBIfMissing(); - reindexer::client::ReindexerConfig cfg; - auto rx = std::make_unique(cfg); - auto err = rx->Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - - QueryAggStrictModeTest(rx); - }); - - loop.run(); -} - -TEST_F(RPCClientTestApi, SubQuery) { - using namespace reindexer::client; - using namespace reindexer::net::ev; - using reindexer::coroutine::wait_group; - using reindexer::coroutine::wait_group_guard; - - StartDefaultRealServer(); - dynamic_loop loop; - - loop.spawn([&loop, this]() noexcept { - const std::string kLeftNsName = "left_ns"; - const std::string kRightNsName = "right_ns"; - const std::string dsn = "cproto://" + kDefaultRPCServerAddr + "/db1"; - reindexer::client::ConnectOpts opts; - opts.CreateDBIfMissing(); - reindexer::client::ReindexerConfig cfg; - constexpr auto kFetchCount = 50; - constexpr auto kNsSize = kFetchCount * 3; - cfg.FetchAmount = kFetchCount; - CoroReindexer rx(cfg); - auto err = rx.Connect(dsn, loop, opts); - ASSERT_TRUE(err.ok()) << err.what(); - - CreateNamespace(rx, kLeftNsName); - CreateNamespace(rx, kRightNsName); - - auto upsertFn = [&rx](const std::string& nsName) { - for (size_t i = 0; i < kNsSize; ++i) { - auto item = rx.NewItem(nsName); - ASSERT_TRUE(item.Status().ok()) << nsName << " " << item.Status().what(); - - WrSerializer wrser; - JsonBuilder jsonBuilder(wrser, ObjType::TypeObject); - jsonBuilder.Put("id", i); - jsonBuilder.Put("value", "value_" + std::to_string(i)); - jsonBuilder.End(); - char* endp = nullptr; - auto err = item.Unsafe().FromJSON(wrser.Slice(), &endp); - ASSERT_TRUE(err.ok()) << nsName << " " << err.what(); - err = rx.Upsert(nsName, item); - ASSERT_TRUE(err.ok()) << nsName << " " << err.what(); - } - }; - - upsertFn(kLeftNsName); - upsertFn(kRightNsName); - - const auto kHalfSize = kNsSize / 2; - { - client::CoroQueryResults qr; - err = rx.Select(Query(kLeftNsName).Where("id", CondSet, Query(kRightNsName).Select({"id"}).Where("id", CondLt, kHalfSize)), qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), kHalfSize); - } - { - const int limit = 10; - client::CoroQueryResults qr; - err = rx.Select( - Query(kLeftNsName).Where(Query(kRightNsName).Where("id", CondLt, kHalfSize).ReqTotal(), CondEq, {kHalfSize}).Limit(limit), - qr); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(qr.Count(), limit); - } - rx.Stop(); - }); - - loop.run(); -} diff --git a/cpp_src/gtests/tests/unit/rtree_test.cc b/cpp_src/gtests/tests/unit/rtree_test.cc deleted file mode 100644 index a6c0f59c4..000000000 --- a/cpp_src/gtests/tests/unit/rtree_test.cc +++ /dev/null @@ -1,334 +0,0 @@ -#include "core/index/rtree/rtree.h" -#include -#include "core/cjson/jsonbuilder.h" -#include "core/index/rtree/greenesplitter.h" -#include "core/index/rtree/linearsplitter.h" -#include "core/index/rtree/quadraticsplitter.h" -#include "core/index/rtree/rstarsplitter.h" -#include "gtest/gtest.h" -#include "reindexer_api.h" -#include "tools/randompoint.h" - -namespace { - -static constexpr long long kRange = 1000ll; - -template -struct Compare; - -template <> -struct Compare { - bool operator()(reindexer::Point lhs, reindexer::Point rhs) const noexcept { - if (lhs.X() == rhs.X()) return lhs.Y() < rhs.Y(); - return lhs.X() < rhs.X(); - } -}; - -template -struct Compare> { - bool operator()(const reindexer::RMapValue& lhs, const reindexer::RMapValue& rhs) const noexcept { - return lhs.second < rhs.second; - } -}; - -template -class SearchVisitor : public RTree::Visitor { -public: - bool operator()(const typename RTree::value_type& v) override { - const auto it = data_.find(v); - if (it == data_.end()) { - ++wrong_; - } else { - data_.erase(it); - } - return false; - } - size_t Size() const noexcept { return data_.size(); } - void Add(const typename RTree::value_type& r) { data_.insert(r); } - size_t Wrong() const noexcept { return wrong_; } - -private: - size_t wrong_ = 0; - std::multiset> data_; -}; - -template -class DeleteVisitor : public RTree::Visitor { -public: - DeleteVisitor(const reindexer::Rectangle& r) : rect_{r} {} - bool operator()(const typename RTree::value_type& v) override { return rect_.Contain(RTree::traits::GetPoint(v)); } - -private: - const reindexer::Rectangle rect_; -}; - -} // namespace - -// Checks of inserting of points to RectangleTree and verifies of its structure after each insertion -template